dd.c 19.5 KB
Newer Older
1
/*
2
 * drivers/base/dd.c - The core device/driver interactions.
3
 *
4 5 6
 * This file contains the (sometimes tricky) code that controls the
 * interactions between devices and drivers, which primarily includes
 * driver binding and unbinding.
7
 *
8 9 10
 * All of this code used to exist in drivers/base/bus.c, but was
 * relocated to here in the name of compartmentalization (since it wasn't
 * strictly code just for the 'struct bus_type'.
11
 *
12 13
 * Copyright (c) 2002-5 Patrick Mochel
 * Copyright (c) 2002-3 Open Source Development Labs
14 15
 * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
 * Copyright (c) 2007-2009 Novell Inc.
16
 *
17
 * This file is released under the GPLv2
18 19 20
 */

#include <linux/device.h>
21
#include <linux/delay.h>
22
#include <linux/module.h>
23
#include <linux/kthread.h>
24
#include <linux/wait.h>
25
#include <linux/async.h>
26
#include <linux/pm_runtime.h>
27
#include <linux/pinctrl/devinfo.h>
28 29 30 31

#include "base.h"
#include "power/power.h"

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Deferred Probe infrastructure.
 *
 * Sometimes driver probe order matters, but the kernel doesn't always have
 * dependency information which means some drivers will get probed before a
 * resource it depends on is available.  For example, an SDHCI driver may
 * first need a GPIO line from an i2c GPIO controller before it can be
 * initialized.  If a required resource is not available yet, a driver can
 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
 *
 * Deferred probe maintains two lists of devices, a pending list and an active
 * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
 * pending list.  A successful driver probe will trigger moving all devices
 * from the pending to the active list so that the workqueue will eventually
 * retry them.
 *
 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
49
 * of the (struct device*)->p->deferred_probe pointers are manipulated
50 51 52 53 54
 */
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
55
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
56

57
/*
58 59 60 61 62
 * deferred_probe_work_func() - Retry probing devices in the active list.
 */
static void deferred_probe_work_func(struct work_struct *work)
{
	struct device *dev;
63
	struct device_private *private;
64 65 66 67 68 69 70 71 72 73 74 75 76 77
	/*
	 * This block processes every device in the deferred 'active' list.
	 * Each device is removed from the active list and passed to
	 * bus_probe_device() to re-attempt the probe.  The loop continues
	 * until every device in the active list is removed and retried.
	 *
	 * Note: Once the device is removed from the list and the mutex is
	 * released, it is possible for the device get freed by another thread
	 * and cause a illegal pointer dereference.  This code uses
	 * get/put_device() to ensure the device structure cannot disappear
	 * from under our feet.
	 */
	mutex_lock(&deferred_probe_mutex);
	while (!list_empty(&deferred_probe_active_list)) {
78 79 80 81
		private = list_first_entry(&deferred_probe_active_list,
					typeof(*dev->p), deferred_probe);
		dev = private->device;
		list_del_init(&private->deferred_probe);
82 83 84

		get_device(dev);

85 86 87 88
		/*
		 * Drop the mutex while probing each device; the probe path may
		 * manipulate the deferred list
		 */
89
		mutex_unlock(&deferred_probe_mutex);
90 91 92 93 94 95 96 97 98 99 100

		/*
		 * Force the device to the end of the dpm_list since
		 * the PM code assumes that the order we add things to
		 * the list is a good order for suspend but deferred
		 * probe makes that very unsafe.
		 */
		device_pm_lock();
		device_pm_move_last(dev);
		device_pm_unlock();

101 102
		dev_dbg(dev, "Retrying from deferred list\n");
		bus_probe_device(dev);
103

104 105 106 107 108 109 110 111 112 113 114
		mutex_lock(&deferred_probe_mutex);

		put_device(dev);
	}
	mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);

static void driver_deferred_probe_add(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
115
	if (list_empty(&dev->p->deferred_probe)) {
116
		dev_dbg(dev, "Added to deferred list\n");
117
		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
118 119 120 121 122 123 124
	}
	mutex_unlock(&deferred_probe_mutex);
}

void driver_deferred_probe_del(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
125
	if (!list_empty(&dev->p->deferred_probe)) {
126
		dev_dbg(dev, "Removed from deferred list\n");
127
		list_del_init(&dev->p->deferred_probe);
128 129 130 131 132 133 134 135 136 137 138
	}
	mutex_unlock(&deferred_probe_mutex);
}

static bool driver_deferred_probe_enable = false;
/**
 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
 *
 * This functions moves all devices from the pending list to the active
 * list and schedules the deferred probe workqueue to process them.  It
 * should be called anytime a driver is successfully bound to a device.
139 140 141 142 143 144 145 146 147 148 149
 *
 * Note, there is a race condition in multi-threaded probe. In the case where
 * more than one device is probing at the same time, it is possible for one
 * probe to complete successfully while another is about to defer. If the second
 * depends on the first, then it will get put on the pending list after the
 * trigger event has already occured and will be stuck there.
 *
 * The atomic 'deferred_trigger_count' is used to determine if a successful
 * trigger has occurred in the midst of probing a driver. If the trigger count
 * changes in the midst of a probe, then deferred processing should be triggered
 * again.
150 151 152 153 154 155
 */
static void driver_deferred_probe_trigger(void)
{
	if (!driver_deferred_probe_enable)
		return;

156 157
	/*
	 * A successful probe means that all the devices in the pending list
158
	 * should be triggered to be reprobed.  Move all the deferred devices
159 160
	 * into the active list so they can be retried by the workqueue
	 */
161
	mutex_lock(&deferred_probe_mutex);
162
	atomic_inc(&deferred_trigger_count);
163 164 165 166
	list_splice_tail_init(&deferred_probe_pending_list,
			      &deferred_probe_active_list);
	mutex_unlock(&deferred_probe_mutex);

167 168 169 170
	/*
	 * Kick the re-probe thread.  It may already be scheduled, but it is
	 * safe to kick it again.
	 */
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	queue_work(deferred_wq, &deferred_probe_work);
}

/**
 * deferred_probe_initcall() - Enable probing of deferred devices
 *
 * We don't want to get in the way when the bulk of drivers are getting probed.
 * Instead, this initcall makes sure that deferred probing is delayed until
 * late_initcall time.
 */
static int deferred_probe_initcall(void)
{
	deferred_wq = create_singlethread_workqueue("deferwq");
	if (WARN_ON(!deferred_wq))
		return -ENOMEM;

	driver_deferred_probe_enable = true;
	driver_deferred_probe_trigger();
189 190
	/* Sort as many dependencies as possible before exiting initcalls */
	flush_workqueue(deferred_wq);
191 192 193
	return 0;
}
late_initcall(deferred_probe_initcall);
194

195
static void driver_bound(struct device *dev)
196
{
197
	if (klist_node_attached(&dev->p->knode_driver)) {
A
Andrew Morton 已提交
198
		printk(KERN_WARNING "%s: device %s already bound\n",
199
			__func__, kobject_name(&dev->kobj));
200
		return;
A
Andrew Morton 已提交
201
	}
202

203 204
	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
		 __func__, dev_name(dev));
205

206 207
	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);

208 209 210 211
	/*
	 * Make sure the device is no longer in one of the deferred lists and
	 * kick off retrying all pending devices
	 */
212 213 214
	driver_deferred_probe_del(dev);
	driver_deferred_probe_trigger();

215
	if (dev->bus)
216
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
217
					     BUS_NOTIFY_BOUND_DRIVER, dev);
218 219 220 221 222 223
}

static int driver_sysfs_add(struct device *dev)
{
	int ret;

224 225 226 227
	if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_BIND_DRIVER, dev);

228
	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
229
			  kobject_name(&dev->kobj));
A
Andrew Morton 已提交
230
	if (ret == 0) {
231
		ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
A
Andrew Morton 已提交
232 233
					"driver");
		if (ret)
234
			sysfs_remove_link(&dev->driver->p->kobj,
A
Andrew Morton 已提交
235 236 237
					kobject_name(&dev->kobj));
	}
	return ret;
238 239
}

240 241 242 243 244
static void driver_sysfs_remove(struct device *dev)
{
	struct device_driver *drv = dev->driver;

	if (drv) {
245
		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
246 247 248 249 250
		sysfs_remove_link(&dev->kobj, "driver");
	}
}

/**
251 252
 * device_bind_driver - bind a driver to one device.
 * @dev: device.
253
 *
254 255
 * Allow manual attachment of a driver to a device.
 * Caller must have already set @dev->driver.
256
 *
257 258 259 260
 * Note that this does not modify the bus reference count
 * nor take the bus's rwsem. Please verify those are accounted
 * for before calling this. (It is ok to call with no other effort
 * from a driver's probe() method.)
261
 *
262
 * This function must be called with the device lock held.
263 264 265
 */
int device_bind_driver(struct device *dev)
{
266 267 268 269 270 271
	int ret;

	ret = driver_sysfs_add(dev);
	if (!ret)
		driver_bound(dev);
	return ret;
272
}
273
EXPORT_SYMBOL_GPL(device_bind_driver);
274

275
static atomic_t probe_count = ATOMIC_INIT(0);
276 277
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);

278
static int really_probe(struct device *dev, struct device_driver *drv)
279
{
280
	int ret = 0;
281
	int local_trigger_count = atomic_read(&deferred_trigger_count);
282

283
	atomic_inc(&probe_count);
284
	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
285
		 drv->bus->name, __func__, drv->name, dev_name(dev));
T
Tejun Heo 已提交
286
	WARN_ON(!list_empty(&dev->devres_head));
287 288

	dev->driver = drv;
289 290 291 292 293 294

	/* If using pinctrl, bind pins now before probing */
	ret = pinctrl_bind_pins(dev);
	if (ret)
		goto probe_failed;

295 296
	if (driver_sysfs_add(dev)) {
		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
297
			__func__, dev_name(dev));
298 299 300
		goto probe_failed;
	}

301 302 303 304 305 306
	if (dev->pm_domain && dev->pm_domain->activate) {
		ret = dev->pm_domain->activate(dev);
		if (ret)
			goto probe_failed;
	}

307 308
	if (dev->bus->probe) {
		ret = dev->bus->probe(dev);
309
		if (ret)
310
			goto probe_failed;
311
	} else if (drv->probe) {
312
		ret = drv->probe(dev);
313
		if (ret)
314
			goto probe_failed;
A
Andrew Morton 已提交
315
	}
316

317 318 319
	if (dev->pm_domain && dev->pm_domain->sync)
		dev->pm_domain->sync(dev);

320
	driver_bound(dev);
321
	ret = 1;
322
	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
323
		 drv->bus->name, __func__, dev_name(dev), drv->name);
324
	goto done;
325

326
probe_failed:
T
Tejun Heo 已提交
327
	devres_release_all(dev);
328 329
	driver_sysfs_remove(dev);
	dev->driver = NULL;
330
	dev_set_drvdata(dev, NULL);
331 332
	if (dev->pm_domain && dev->pm_domain->dismiss)
		dev->pm_domain->dismiss(dev);
333

334 335
	switch (ret) {
	case -EPROBE_DEFER:
336
		/* Driver requested deferred probing */
337
		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
338
		driver_deferred_probe_add(dev);
339 340 341
		/* Did a trigger occur while probing? Need to re-trigger if yes */
		if (local_trigger_count != atomic_read(&deferred_trigger_count))
			driver_deferred_probe_trigger();
342 343 344 345 346 347 348
		break;
	case -ENODEV:
	case -ENXIO:
		pr_debug("%s: probe of %s rejects match %d\n",
			 drv->name, dev_name(dev), ret);
		break;
	default:
349 350 351
		/* driver matched but the probe failed */
		printk(KERN_WARNING
		       "%s: probe of %s failed with error %d\n",
352
		       drv->name, dev_name(dev), ret);
353
	}
354 355 356 357 358
	/*
	 * Ignore errors returned by ->probe so that the next driver can try
	 * its luck.
	 */
	ret = 0;
359 360
done:
	atomic_dec(&probe_count);
361
	wake_up(&probe_waitqueue);
362 363 364 365 366 367 368 369 370 371 372
	return ret;
}

/**
 * driver_probe_done
 * Determine if the probe sequence is finished or not.
 *
 * Should somehow figure out how to use a semaphore, not an atomic variable...
 */
int driver_probe_done(void)
{
373
	pr_debug("%s: probe_count = %d\n", __func__,
374 375 376 377 378 379
		 atomic_read(&probe_count));
	if (atomic_read(&probe_count))
		return -EBUSY;
	return 0;
}

380 381 382 383
/**
 * wait_for_device_probe
 * Wait for device probing to be completed.
 */
384
void wait_for_device_probe(void)
385 386
{
	/* wait for the known devices to complete their probing */
387
	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
388 389
	async_synchronize_full();
}
390
EXPORT_SYMBOL_GPL(wait_for_device_probe);
391

392 393 394 395 396
/**
 * driver_probe_device - attempt to bind device & driver together
 * @drv: driver to bind a device to
 * @dev: device to try to bind to the driver
 *
397
 * This function returns -ENODEV if the device is not registered,
398
 * 1 if the device is bound successfully and 0 otherwise.
399
 *
400 401
 * This function must be called with @dev lock held.  When called for a
 * USB interface, @dev->parent lock must be held as well.
402
 */
403
int driver_probe_device(struct device_driver *drv, struct device *dev)
404 405 406
{
	int ret = 0;

407 408
	if (!device_is_registered(dev))
		return -ENODEV;
409

410
	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
411
		 drv->bus->name, __func__, dev_name(dev), drv->name);
412

413
	pm_runtime_barrier(dev);
414
	ret = really_probe(dev, drv);
415
	pm_request_idle(dev);
416

417
	return ret;
418 419
}

420
bool driver_allows_async_probing(struct device_driver *drv)
421
{
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
	return drv->probe_type == PROBE_PREFER_ASYNCHRONOUS;
}

struct device_attach_data {
	struct device *dev;

	/*
	 * Indicates whether we are are considering asynchronous probing or
	 * not. Only initial binding after device or driver registration
	 * (including deferral processing) may be done asynchronously, the
	 * rest is always synchronous, as we expect it is being done by
	 * request from userspace.
	 */
	bool check_async;

	/*
	 * Indicates if we are binding synchronous or asynchronous drivers.
	 * When asynchronous probing is enabled we'll execute 2 passes
	 * over drivers: first pass doing synchronous probing and second
	 * doing asynchronous probing (if synchronous did not succeed -
	 * most likely because there was no driver requiring synchronous
	 * probing - and we found asynchronous driver during first pass).
	 * The 2 passes are done because we can't shoot asynchronous
	 * probe for given device and driver from bus_for_each_drv() since
	 * driver pointer is not guaranteed to stay valid once
	 * bus_for_each_drv() iterates to the next driver on the bus.
	 */
	bool want_async;

	/*
	 * We'll set have_async to 'true' if, while scanning for matching
	 * driver, we'll encounter one that requests asynchronous probing.
	 */
	bool have_async;
};

static int __device_attach_driver(struct device_driver *drv, void *_data)
{
	struct device_attach_data *data = _data;
	struct device *dev = data->dev;
	bool async_allowed;

	/*
	 * Check if device has already been claimed. This may
	 * happen with driver loading, device discovery/registration,
	 * and deferred probe processing happens all at once with
	 * multiple threads.
	 */
	if (dev->driver)
		return -EBUSY;
472 473 474 475

	if (!driver_match_device(drv, dev))
		return 0;

476 477 478 479 480 481 482 483
	async_allowed = driver_allows_async_probing(drv);

	if (async_allowed)
		data->have_async = true;

	if (data->check_async && async_allowed != data->want_async)
		return 0;

484
	return driver_probe_device(drv, dev);
485 486
}

487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508
static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
{
	struct device *dev = _dev;
	struct device_attach_data data = {
		.dev		= dev,
		.check_async	= true,
		.want_async	= true,
	};

	device_lock(dev);

	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
	dev_dbg(dev, "async probe completed\n");

	pm_request_idle(dev);

	device_unlock(dev);

	put_device(dev);
}

int __device_attach(struct device *dev, bool allow_async)
509
{
510 511
	int ret = 0;

512
	device_lock(dev);
513
	if (dev->driver) {
514 515 516 517
		if (klist_node_attached(&dev->p->knode_driver)) {
			ret = 1;
			goto out_unlock;
		}
A
Andrew Morton 已提交
518 519 520
		ret = device_bind_driver(dev);
		if (ret == 0)
			ret = 1;
521 522 523 524
		else {
			dev->driver = NULL;
			ret = 0;
		}
525
	} else {
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
		struct device_attach_data data = {
			.dev = dev,
			.check_async = allow_async,
			.want_async = false,
		};

		ret = bus_for_each_drv(dev->bus, NULL, &data,
					__device_attach_driver);
		if (!ret && allow_async && data.have_async) {
			/*
			 * If we could not find appropriate driver
			 * synchronously and we are allowed to do
			 * async probes and there are drivers that
			 * want to probe asynchronously, we'll
			 * try them.
			 */
			dev_dbg(dev, "scheduling asynchronous probe\n");
			get_device(dev);
			async_schedule(__device_attach_async_helper, dev);
		} else {
			pm_request_idle(dev);
		}
548
	}
549
out_unlock:
550
	device_unlock(dev);
551
	return ret;
552
}
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

/**
 * device_attach - try to attach device to a driver.
 * @dev: device.
 *
 * Walk the list of drivers that the bus has and call
 * driver_probe_device() for each pair. If a compatible
 * pair is found, break out and return.
 *
 * Returns 1 if the device was bound to a driver;
 * 0 if no matching driver was found;
 * -ENODEV if the device is not registered.
 *
 * When called for a USB interface, @dev->parent lock must be held.
 */
int device_attach(struct device *dev)
{
	return __device_attach(dev, false);
}
572
EXPORT_SYMBOL_GPL(device_attach);
573

574 575 576 577 578
void device_initial_probe(struct device *dev)
{
	__device_attach(dev, true);
}

579
static int __driver_attach(struct device *dev, void *data)
580
{
581
	struct device_driver *drv = data;
582 583 584 585 586 587 588 589 590 591 592

	/*
	 * Lock device and try to bind to it. We drop the error
	 * here and always return 0, because we need to keep trying
	 * to bind to devices and some drivers will return an error
	 * simply if it didn't support the device.
	 *
	 * driver_probe_device() will spit a warning if there
	 * is an error.
	 */

593
	if (!driver_match_device(drv, dev))
594 595
		return 0;

596
	if (dev->parent)	/* Needed for USB */
597 598
		device_lock(dev->parent);
	device_lock(dev);
599 600
	if (!dev->driver)
		driver_probe_device(drv, dev);
601
	device_unlock(dev);
602
	if (dev->parent)
603
		device_unlock(dev->parent);
604

605 606 607 608
	return 0;
}

/**
609 610
 * driver_attach - try to bind driver to devices.
 * @drv: driver.
611
 *
612 613 614 615
 * Walk the list of devices that the bus has on it and try to
 * match the driver with each one.  If driver_probe_device()
 * returns 0 and the @dev->driver is set, we've found a
 * compatible pair.
616
 */
617
int driver_attach(struct device_driver *drv)
618
{
A
Andrew Morton 已提交
619
	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
620
}
621
EXPORT_SYMBOL_GPL(driver_attach);
622

623
/*
624 625
 * __device_release_driver() must be called with @dev lock held.
 * When called for a USB interface, @dev->parent lock must be held as well.
626
 */
627
static void __device_release_driver(struct device *dev)
628
{
629
	struct device_driver *drv;
630

631
	drv = dev->driver;
632
	if (drv) {
633 634 635
		if (driver_allows_async_probing(drv))
			async_synchronize_full();

636
		pm_runtime_get_sync(dev);
637

638
		driver_sysfs_remove(dev);
639

640
		if (dev->bus)
641
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
642 643 644
						     BUS_NOTIFY_UNBIND_DRIVER,
						     dev);

645
		pm_runtime_put_sync(dev);
646

647
		if (dev->bus && dev->bus->remove)
648 649
			dev->bus->remove(dev);
		else if (drv->remove)
650
			drv->remove(dev);
T
Tejun Heo 已提交
651
		devres_release_all(dev);
652
		dev->driver = NULL;
653
		dev_set_drvdata(dev, NULL);
654 655 656
		if (dev->pm_domain && dev->pm_domain->dismiss)
			dev->pm_domain->dismiss(dev);

657
		klist_remove(&dev->p->knode_driver);
658 659 660 661
		if (dev->bus)
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
						     BUS_NOTIFY_UNBOUND_DRIVER,
						     dev);
662

663
	}
664 665
}

666
/**
667 668
 * device_release_driver - manually detach device from driver.
 * @dev: device.
669
 *
670
 * Manually detach device from driver.
671
 * When called for a USB interface, @dev->parent lock must be held.
672
 */
673
void device_release_driver(struct device *dev)
674
{
675 676 677 678 679
	/*
	 * If anyone calls device_release_driver() recursively from
	 * within their ->remove callback for the same device, they
	 * will deadlock right here.
	 */
680
	device_lock(dev);
681
	__device_release_driver(dev);
682
	device_unlock(dev);
683
}
684
EXPORT_SYMBOL_GPL(device_release_driver);
685

686 687 688 689
/**
 * driver_detach - detach driver from all devices it controls.
 * @drv: driver.
 */
690
void driver_detach(struct device_driver *drv)
691
{
692
	struct device_private *dev_prv;
693
	struct device *dev;
694 695

	for (;;) {
696 697 698
		spin_lock(&drv->p->klist_devices.k_lock);
		if (list_empty(&drv->p->klist_devices.k_list)) {
			spin_unlock(&drv->p->klist_devices.k_lock);
699 700
			break;
		}
701 702 703 704
		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
				     struct device_private,
				     knode_driver.n_node);
		dev = dev_prv->device;
705
		get_device(dev);
706
		spin_unlock(&drv->p->klist_devices.k_lock);
707

708
		if (dev->parent)	/* Needed for USB */
709 710
			device_lock(dev->parent);
		device_lock(dev);
711 712
		if (dev->driver == drv)
			__device_release_driver(dev);
713
		device_unlock(dev);
714
		if (dev->parent)
715
			device_unlock(dev->parent);
716 717
		put_device(dev);
	}
718
}