dd.c 24.4 KB
Newer Older
1
/*
2
 * drivers/base/dd.c - The core device/driver interactions.
3
 *
4 5 6
 * This file contains the (sometimes tricky) code that controls the
 * interactions between devices and drivers, which primarily includes
 * driver binding and unbinding.
7
 *
8 9 10
 * All of this code used to exist in drivers/base/bus.c, but was
 * relocated to here in the name of compartmentalization (since it wasn't
 * strictly code just for the 'struct bus_type'.
11
 *
12 13
 * Copyright (c) 2002-5 Patrick Mochel
 * Copyright (c) 2002-3 Open Source Development Labs
14 15
 * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
 * Copyright (c) 2007-2009 Novell Inc.
16
 *
17
 * This file is released under the GPLv2
18 19 20
 */

#include <linux/device.h>
21
#include <linux/delay.h>
22
#include <linux/module.h>
23
#include <linux/kthread.h>
24
#include <linux/wait.h>
25
#include <linux/async.h>
26
#include <linux/pm_runtime.h>
27
#include <linux/pinctrl/devinfo.h>
28 29 30 31

#include "base.h"
#include "power/power.h"

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
/*
 * Deferred Probe infrastructure.
 *
 * Sometimes driver probe order matters, but the kernel doesn't always have
 * dependency information which means some drivers will get probed before a
 * resource it depends on is available.  For example, an SDHCI driver may
 * first need a GPIO line from an i2c GPIO controller before it can be
 * initialized.  If a required resource is not available yet, a driver can
 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
 *
 * Deferred probe maintains two lists of devices, a pending list and an active
 * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
 * pending list.  A successful driver probe will trigger moving all devices
 * from the pending to the active list so that the workqueue will eventually
 * retry them.
 *
 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
49
 * of the (struct device*)->p->deferred_probe pointers are manipulated
50 51 52 53
 */
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
54
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
55

56 57 58 59 60 61 62 63 64 65 66 67 68
static ssize_t deferred_probe_show(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	bool value;

	mutex_lock(&deferred_probe_mutex);
	value = !list_empty(&dev->p->deferred_probe);
	mutex_unlock(&deferred_probe_mutex);

	return sprintf(buf, "%d\n", value);
}
DEVICE_ATTR_RO(deferred_probe);

69 70 71 72 73 74 75
/*
 * In some cases, like suspend to RAM or hibernation, It might be reasonable
 * to prohibit probing of devices as it could be unsafe.
 * Once defer_all_probes is true all drivers probes will be forcibly deferred.
 */
static bool defer_all_probes;

76
/*
77 78 79 80 81
 * deferred_probe_work_func() - Retry probing devices in the active list.
 */
static void deferred_probe_work_func(struct work_struct *work)
{
	struct device *dev;
82
	struct device_private *private;
83 84 85 86 87 88 89 90 91 92 93 94 95 96
	/*
	 * This block processes every device in the deferred 'active' list.
	 * Each device is removed from the active list and passed to
	 * bus_probe_device() to re-attempt the probe.  The loop continues
	 * until every device in the active list is removed and retried.
	 *
	 * Note: Once the device is removed from the list and the mutex is
	 * released, it is possible for the device get freed by another thread
	 * and cause a illegal pointer dereference.  This code uses
	 * get/put_device() to ensure the device structure cannot disappear
	 * from under our feet.
	 */
	mutex_lock(&deferred_probe_mutex);
	while (!list_empty(&deferred_probe_active_list)) {
97 98 99 100
		private = list_first_entry(&deferred_probe_active_list,
					typeof(*dev->p), deferred_probe);
		dev = private->device;
		list_del_init(&private->deferred_probe);
101 102 103

		get_device(dev);

104 105 106 107
		/*
		 * Drop the mutex while probing each device; the probe path may
		 * manipulate the deferred list
		 */
108
		mutex_unlock(&deferred_probe_mutex);
109 110 111 112 113 114 115 116 117 118 119

		/*
		 * Force the device to the end of the dpm_list since
		 * the PM code assumes that the order we add things to
		 * the list is a good order for suspend but deferred
		 * probe makes that very unsafe.
		 */
		device_pm_lock();
		device_pm_move_last(dev);
		device_pm_unlock();

120 121
		dev_dbg(dev, "Retrying from deferred list\n");
		bus_probe_device(dev);
122

123 124 125 126 127 128 129 130 131 132 133
		mutex_lock(&deferred_probe_mutex);

		put_device(dev);
	}
	mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);

static void driver_deferred_probe_add(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
134
	if (list_empty(&dev->p->deferred_probe)) {
135
		dev_dbg(dev, "Added to deferred list\n");
136
		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
137 138 139 140 141 142 143
	}
	mutex_unlock(&deferred_probe_mutex);
}

void driver_deferred_probe_del(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
144
	if (!list_empty(&dev->p->deferred_probe)) {
145
		dev_dbg(dev, "Removed from deferred list\n");
146
		list_del_init(&dev->p->deferred_probe);
147 148 149 150 151 152 153 154 155 156 157
	}
	mutex_unlock(&deferred_probe_mutex);
}

static bool driver_deferred_probe_enable = false;
/**
 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
 *
 * This functions moves all devices from the pending list to the active
 * list and schedules the deferred probe workqueue to process them.  It
 * should be called anytime a driver is successfully bound to a device.
158 159 160 161 162
 *
 * Note, there is a race condition in multi-threaded probe. In the case where
 * more than one device is probing at the same time, it is possible for one
 * probe to complete successfully while another is about to defer. If the second
 * depends on the first, then it will get put on the pending list after the
163
 * trigger event has already occurred and will be stuck there.
164 165 166 167 168
 *
 * The atomic 'deferred_trigger_count' is used to determine if a successful
 * trigger has occurred in the midst of probing a driver. If the trigger count
 * changes in the midst of a probe, then deferred processing should be triggered
 * again.
169 170 171 172 173 174
 */
static void driver_deferred_probe_trigger(void)
{
	if (!driver_deferred_probe_enable)
		return;

175 176
	/*
	 * A successful probe means that all the devices in the pending list
177
	 * should be triggered to be reprobed.  Move all the deferred devices
178 179
	 * into the active list so they can be retried by the workqueue
	 */
180
	mutex_lock(&deferred_probe_mutex);
181
	atomic_inc(&deferred_trigger_count);
182 183 184 185
	list_splice_tail_init(&deferred_probe_pending_list,
			      &deferred_probe_active_list);
	mutex_unlock(&deferred_probe_mutex);

186 187 188 189
	/*
	 * Kick the re-probe thread.  It may already be scheduled, but it is
	 * safe to kick it again.
	 */
190
	schedule_work(&deferred_probe_work);
191 192
}

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/**
 * device_block_probing() - Block/defere device's probes
 *
 *	It will disable probing of devices and defer their probes instead.
 */
void device_block_probing(void)
{
	defer_all_probes = true;
	/* sync with probes to avoid races. */
	wait_for_device_probe();
}

/**
 * device_unblock_probing() - Unblock/enable device's probes
 *
 *	It will restore normal behavior and trigger re-probing of deferred
 * devices.
 */
void device_unblock_probing(void)
{
	defer_all_probes = false;
	driver_deferred_probe_trigger();
}

217 218 219 220 221 222 223 224 225 226 227
/**
 * deferred_probe_initcall() - Enable probing of deferred devices
 *
 * We don't want to get in the way when the bulk of drivers are getting probed.
 * Instead, this initcall makes sure that deferred probing is delayed until
 * late_initcall time.
 */
static int deferred_probe_initcall(void)
{
	driver_deferred_probe_enable = true;
	driver_deferred_probe_trigger();
228
	/* Sort as many dependencies as possible before exiting initcalls */
229
	flush_work(&deferred_probe_work);
230 231 232
	return 0;
}
late_initcall(deferred_probe_initcall);
233

234 235 236 237 238 239 240 241 242 243 244
/**
 * device_is_bound() - Check if device is bound to a driver
 * @dev: device to check
 *
 * Returns true if passed device has already finished probing successfully
 * against a driver.
 *
 * This function must be called with the device lock held.
 */
bool device_is_bound(struct device *dev)
{
245
	return dev->p && klist_node_attached(&dev->p->knode_driver);
246 247
}

248
static void driver_bound(struct device *dev)
249
{
250
	if (device_is_bound(dev)) {
A
Andrew Morton 已提交
251
		printk(KERN_WARNING "%s: device %s already bound\n",
252
			__func__, kobject_name(&dev->kobj));
253
		return;
A
Andrew Morton 已提交
254
	}
255

256 257
	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
		 __func__, dev_name(dev));
258

259
	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
260
	device_links_driver_bound(dev);
261

262 263
	device_pm_check_callbacks(dev);

264 265 266 267
	/*
	 * Make sure the device is no longer in one of the deferred lists and
	 * kick off retrying all pending devices
	 */
268 269 270
	driver_deferred_probe_del(dev);
	driver_deferred_probe_trigger();

271
	if (dev->bus)
272
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
273
					     BUS_NOTIFY_BOUND_DRIVER, dev);
274 275 276 277 278 279
}

static int driver_sysfs_add(struct device *dev)
{
	int ret;

280 281 282 283
	if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_BIND_DRIVER, dev);

284
	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
285
			  kobject_name(&dev->kobj));
A
Andrew Morton 已提交
286
	if (ret == 0) {
287
		ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
A
Andrew Morton 已提交
288 289
					"driver");
		if (ret)
290
			sysfs_remove_link(&dev->driver->p->kobj,
A
Andrew Morton 已提交
291 292 293
					kobject_name(&dev->kobj));
	}
	return ret;
294 295
}

296 297 298 299 300
static void driver_sysfs_remove(struct device *dev)
{
	struct device_driver *drv = dev->driver;

	if (drv) {
301
		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
302 303 304 305 306
		sysfs_remove_link(&dev->kobj, "driver");
	}
}

/**
307 308
 * device_bind_driver - bind a driver to one device.
 * @dev: device.
309
 *
310 311
 * Allow manual attachment of a driver to a device.
 * Caller must have already set @dev->driver.
312
 *
313 314 315 316
 * Note that this does not modify the bus reference count
 * nor take the bus's rwsem. Please verify those are accounted
 * for before calling this. (It is ok to call with no other effort
 * from a driver's probe() method.)
317
 *
318
 * This function must be called with the device lock held.
319 320 321
 */
int device_bind_driver(struct device *dev)
{
322 323 324 325 326
	int ret;

	ret = driver_sysfs_add(dev);
	if (!ret)
		driver_bound(dev);
327 328 329
	else if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
330
	return ret;
331
}
332
EXPORT_SYMBOL_GPL(device_bind_driver);
333

334
static atomic_t probe_count = ATOMIC_INIT(0);
335 336
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);

337
static int really_probe(struct device *dev, struct device_driver *drv)
338
{
339
	int ret = -EPROBE_DEFER;
340
	int local_trigger_count = atomic_read(&deferred_trigger_count);
341
	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE);
342

343 344 345 346 347 348 349 350 351 352 353
	if (defer_all_probes) {
		/*
		 * Value of defer_all_probes can be set only by
		 * device_defer_all_probes_enable() which, in turn, will call
		 * wait_for_device_probe() right after that to avoid any races.
		 */
		dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
		driver_deferred_probe_add(dev);
		return ret;
	}

354 355 356 357
	ret = device_links_check_suppliers(dev);
	if (ret)
		return ret;

358
	atomic_inc(&probe_count);
359
	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
360
		 drv->bus->name, __func__, drv->name, dev_name(dev));
T
Tejun Heo 已提交
361
	WARN_ON(!list_empty(&dev->devres_head));
362

363
re_probe:
364
	dev->driver = drv;
365 366 367 368

	/* If using pinctrl, bind pins now before probing */
	ret = pinctrl_bind_pins(dev);
	if (ret)
369
		goto pinctrl_bind_failed;
370

371 372
	if (driver_sysfs_add(dev)) {
		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
373
			__func__, dev_name(dev));
374 375 376
		goto probe_failed;
	}

377 378 379 380 381 382
	if (dev->pm_domain && dev->pm_domain->activate) {
		ret = dev->pm_domain->activate(dev);
		if (ret)
			goto probe_failed;
	}

383 384 385 386 387 388 389 390
	/*
	 * Ensure devices are listed in devices_kset in correct order
	 * It's important to move Dev to the end of devices_kset before
	 * calling .probe, because it could be recursive and parent Dev
	 * should always go first
	 */
	devices_kset_move_last(dev);

391 392
	if (dev->bus->probe) {
		ret = dev->bus->probe(dev);
393
		if (ret)
394
			goto probe_failed;
395
	} else if (drv->probe) {
396
		ret = drv->probe(dev);
397
		if (ret)
398
			goto probe_failed;
A
Andrew Morton 已提交
399
	}
400

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
	if (test_remove) {
		test_remove = false;

		if (dev->bus && dev->bus->remove)
			dev->bus->remove(dev);
		else if (drv->remove)
			drv->remove(dev);

		devres_release_all(dev);
		driver_sysfs_remove(dev);
		dev->driver = NULL;
		dev_set_drvdata(dev, NULL);
		if (dev->pm_domain && dev->pm_domain->dismiss)
			dev->pm_domain->dismiss(dev);
		pm_runtime_reinit(dev);

		goto re_probe;
	}

420 421
	pinctrl_init_done(dev);

422 423 424
	if (dev->pm_domain && dev->pm_domain->sync)
		dev->pm_domain->sync(dev);

425
	driver_bound(dev);
426
	ret = 1;
427
	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
428
		 drv->bus->name, __func__, dev_name(dev), drv->name);
429
	goto done;
430

431
probe_failed:
432 433 434 435
	if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
436
	device_links_no_driver(dev);
T
Tejun Heo 已提交
437
	devres_release_all(dev);
438 439
	driver_sysfs_remove(dev);
	dev->driver = NULL;
440
	dev_set_drvdata(dev, NULL);
441 442
	if (dev->pm_domain && dev->pm_domain->dismiss)
		dev->pm_domain->dismiss(dev);
443
	pm_runtime_reinit(dev);
444

445 446
	switch (ret) {
	case -EPROBE_DEFER:
447
		/* Driver requested deferred probing */
448
		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
449
		driver_deferred_probe_add(dev);
450 451 452
		/* Did a trigger occur while probing? Need to re-trigger if yes */
		if (local_trigger_count != atomic_read(&deferred_trigger_count))
			driver_deferred_probe_trigger();
453 454 455 456 457 458 459
		break;
	case -ENODEV:
	case -ENXIO:
		pr_debug("%s: probe of %s rejects match %d\n",
			 drv->name, dev_name(dev), ret);
		break;
	default:
460 461 462
		/* driver matched but the probe failed */
		printk(KERN_WARNING
		       "%s: probe of %s failed with error %d\n",
463
		       drv->name, dev_name(dev), ret);
464
	}
465 466 467 468 469
	/*
	 * Ignore errors returned by ->probe so that the next driver can try
	 * its luck.
	 */
	ret = 0;
470 471
done:
	atomic_dec(&probe_count);
472
	wake_up(&probe_waitqueue);
473 474 475 476 477 478 479 480 481 482 483
	return ret;
}

/**
 * driver_probe_done
 * Determine if the probe sequence is finished or not.
 *
 * Should somehow figure out how to use a semaphore, not an atomic variable...
 */
int driver_probe_done(void)
{
484
	pr_debug("%s: probe_count = %d\n", __func__,
485 486 487 488 489 490
		 atomic_read(&probe_count));
	if (atomic_read(&probe_count))
		return -EBUSY;
	return 0;
}

491 492 493 494
/**
 * wait_for_device_probe
 * Wait for device probing to be completed.
 */
495
void wait_for_device_probe(void)
496
{
497
	/* wait for the deferred probe workqueue to finish */
498
	flush_work(&deferred_probe_work);
499

500
	/* wait for the known devices to complete their probing */
501
	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
502 503
	async_synchronize_full();
}
504
EXPORT_SYMBOL_GPL(wait_for_device_probe);
505

506 507 508 509 510
/**
 * driver_probe_device - attempt to bind device & driver together
 * @drv: driver to bind a device to
 * @dev: device to try to bind to the driver
 *
511
 * This function returns -ENODEV if the device is not registered,
512
 * 1 if the device is bound successfully and 0 otherwise.
513
 *
514 515
 * This function must be called with @dev lock held.  When called for a
 * USB interface, @dev->parent lock must be held as well.
516 517
 *
 * If the device has a parent, runtime-resume the parent before driver probing.
518
 */
519
int driver_probe_device(struct device_driver *drv, struct device *dev)
520 521 522
{
	int ret = 0;

523 524
	if (!device_is_registered(dev))
		return -ENODEV;
525

526
	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
527
		 drv->bus->name, __func__, dev_name(dev), drv->name);
528

R
Rafael J. Wysocki 已提交
529
	pm_runtime_get_suppliers(dev);
530 531 532
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);

533
	pm_runtime_barrier(dev);
534
	ret = really_probe(dev, drv);
535
	pm_request_idle(dev);
536

537 538 539
	if (dev->parent)
		pm_runtime_put(dev->parent);

R
Rafael J. Wysocki 已提交
540
	pm_runtime_put_suppliers(dev);
541
	return ret;
542 543
}

544
bool driver_allows_async_probing(struct device_driver *drv)
545
{
546 547
	switch (drv->probe_type) {
	case PROBE_PREFER_ASYNCHRONOUS:
548 549
		return true;

550 551 552 553
	case PROBE_FORCE_SYNCHRONOUS:
		return false;

	default:
554
		if (module_requested_async_probing(drv->owner))
555
			return true;
556

557 558
		return false;
	}
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
}

struct device_attach_data {
	struct device *dev;

	/*
	 * Indicates whether we are are considering asynchronous probing or
	 * not. Only initial binding after device or driver registration
	 * (including deferral processing) may be done asynchronously, the
	 * rest is always synchronous, as we expect it is being done by
	 * request from userspace.
	 */
	bool check_async;

	/*
	 * Indicates if we are binding synchronous or asynchronous drivers.
	 * When asynchronous probing is enabled we'll execute 2 passes
	 * over drivers: first pass doing synchronous probing and second
	 * doing asynchronous probing (if synchronous did not succeed -
	 * most likely because there was no driver requiring synchronous
	 * probing - and we found asynchronous driver during first pass).
	 * The 2 passes are done because we can't shoot asynchronous
	 * probe for given device and driver from bus_for_each_drv() since
	 * driver pointer is not guaranteed to stay valid once
	 * bus_for_each_drv() iterates to the next driver on the bus.
	 */
	bool want_async;

	/*
	 * We'll set have_async to 'true' if, while scanning for matching
	 * driver, we'll encounter one that requests asynchronous probing.
	 */
	bool have_async;
};

static int __device_attach_driver(struct device_driver *drv, void *_data)
{
	struct device_attach_data *data = _data;
	struct device *dev = data->dev;
	bool async_allowed;
599
	int ret;
600 601 602 603 604 605 606 607 608

	/*
	 * Check if device has already been claimed. This may
	 * happen with driver loading, device discovery/registration,
	 * and deferred probe processing happens all at once with
	 * multiple threads.
	 */
	if (dev->driver)
		return -EBUSY;
609

610 611 612
	ret = driver_match_device(drv, dev);
	if (ret == 0) {
		/* no match */
613
		return 0;
614 615 616 617 618 619 620
	} else if (ret == -EPROBE_DEFER) {
		dev_dbg(dev, "Device match requests probe deferral\n");
		driver_deferred_probe_add(dev);
	} else if (ret < 0) {
		dev_dbg(dev, "Bus failed to match device: %d", ret);
		return ret;
	} /* ret > 0 means positive match */
621

622 623 624 625 626 627 628 629
	async_allowed = driver_allows_async_probing(drv);

	if (async_allowed)
		data->have_async = true;

	if (data->check_async && async_allowed != data->want_async)
		return 0;

630
	return driver_probe_device(drv, dev);
631 632
}

633 634 635 636 637 638 639 640 641 642 643
static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
{
	struct device *dev = _dev;
	struct device_attach_data data = {
		.dev		= dev,
		.check_async	= true,
		.want_async	= true,
	};

	device_lock(dev);

644 645 646
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);

647 648 649 650 651
	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
	dev_dbg(dev, "async probe completed\n");

	pm_request_idle(dev);

652 653 654
	if (dev->parent)
		pm_runtime_put(dev->parent);

655 656 657 658 659
	device_unlock(dev);

	put_device(dev);
}

660
static int __device_attach(struct device *dev, bool allow_async)
661
{
662 663
	int ret = 0;

664
	device_lock(dev);
665
	if (dev->driver) {
666
		if (device_is_bound(dev)) {
667 668 669
			ret = 1;
			goto out_unlock;
		}
A
Andrew Morton 已提交
670 671 672
		ret = device_bind_driver(dev);
		if (ret == 0)
			ret = 1;
673 674 675 676
		else {
			dev->driver = NULL;
			ret = 0;
		}
677
	} else {
678 679 680 681 682 683
		struct device_attach_data data = {
			.dev = dev,
			.check_async = allow_async,
			.want_async = false,
		};

684 685 686
		if (dev->parent)
			pm_runtime_get_sync(dev->parent);

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
		ret = bus_for_each_drv(dev->bus, NULL, &data,
					__device_attach_driver);
		if (!ret && allow_async && data.have_async) {
			/*
			 * If we could not find appropriate driver
			 * synchronously and we are allowed to do
			 * async probes and there are drivers that
			 * want to probe asynchronously, we'll
			 * try them.
			 */
			dev_dbg(dev, "scheduling asynchronous probe\n");
			get_device(dev);
			async_schedule(__device_attach_async_helper, dev);
		} else {
			pm_request_idle(dev);
		}
703 704 705

		if (dev->parent)
			pm_runtime_put(dev->parent);
706
	}
707
out_unlock:
708
	device_unlock(dev);
709
	return ret;
710
}
711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729

/**
 * device_attach - try to attach device to a driver.
 * @dev: device.
 *
 * Walk the list of drivers that the bus has and call
 * driver_probe_device() for each pair. If a compatible
 * pair is found, break out and return.
 *
 * Returns 1 if the device was bound to a driver;
 * 0 if no matching driver was found;
 * -ENODEV if the device is not registered.
 *
 * When called for a USB interface, @dev->parent lock must be held.
 */
int device_attach(struct device *dev)
{
	return __device_attach(dev, false);
}
730
EXPORT_SYMBOL_GPL(device_attach);
731

732 733 734 735 736
void device_initial_probe(struct device *dev)
{
	__device_attach(dev, true);
}

737
static int __driver_attach(struct device *dev, void *data)
738
{
739
	struct device_driver *drv = data;
740
	int ret;
741 742 743 744 745 746 747 748 749 750 751

	/*
	 * Lock device and try to bind to it. We drop the error
	 * here and always return 0, because we need to keep trying
	 * to bind to devices and some drivers will return an error
	 * simply if it didn't support the device.
	 *
	 * driver_probe_device() will spit a warning if there
	 * is an error.
	 */

752 753 754
	ret = driver_match_device(drv, dev);
	if (ret == 0) {
		/* no match */
755
		return 0;
756 757 758 759 760 761 762
	} else if (ret == -EPROBE_DEFER) {
		dev_dbg(dev, "Device match requests probe deferral\n");
		driver_deferred_probe_add(dev);
	} else if (ret < 0) {
		dev_dbg(dev, "Bus failed to match device: %d", ret);
		return ret;
	} /* ret > 0 means positive match */
763

764
	if (dev->parent)	/* Needed for USB */
765 766
		device_lock(dev->parent);
	device_lock(dev);
767 768
	if (!dev->driver)
		driver_probe_device(drv, dev);
769
	device_unlock(dev);
770
	if (dev->parent)
771
		device_unlock(dev->parent);
772

773 774 775 776
	return 0;
}

/**
777 778
 * driver_attach - try to bind driver to devices.
 * @drv: driver.
779
 *
780 781 782 783
 * Walk the list of devices that the bus has on it and try to
 * match the driver with each one.  If driver_probe_device()
 * returns 0 and the @dev->driver is set, we've found a
 * compatible pair.
784
 */
785
int driver_attach(struct device_driver *drv)
786
{
A
Andrew Morton 已提交
787
	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
788
}
789
EXPORT_SYMBOL_GPL(driver_attach);
790

791
/*
792 793
 * __device_release_driver() must be called with @dev lock held.
 * When called for a USB interface, @dev->parent lock must be held as well.
794
 */
795
static void __device_release_driver(struct device *dev, struct device *parent)
796
{
797
	struct device_driver *drv;
798

799
	drv = dev->driver;
800
	if (drv) {
801 802 803
		if (driver_allows_async_probing(drv))
			async_synchronize_full();

804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
		while (device_links_busy(dev)) {
			device_unlock(dev);
			if (parent)
				device_unlock(parent);

			device_links_unbind_consumers(dev);
			if (parent)
				device_lock(parent);

			device_lock(dev);
			/*
			 * A concurrent invocation of the same function might
			 * have released the driver successfully while this one
			 * was waiting, so check for that.
			 */
			if (dev->driver != drv)
				return;
		}

823
		pm_runtime_get_sync(dev);
R
Rafael J. Wysocki 已提交
824
		pm_runtime_clean_up_links(dev);
825

826
		driver_sysfs_remove(dev);
827

828
		if (dev->bus)
829
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
830 831 832
						     BUS_NOTIFY_UNBIND_DRIVER,
						     dev);

833
		pm_runtime_put_sync(dev);
834

835
		if (dev->bus && dev->bus->remove)
836 837
			dev->bus->remove(dev);
		else if (drv->remove)
838
			drv->remove(dev);
839 840

		device_links_driver_cleanup(dev);
T
Tejun Heo 已提交
841
		devres_release_all(dev);
842
		dev->driver = NULL;
843
		dev_set_drvdata(dev, NULL);
844 845
		if (dev->pm_domain && dev->pm_domain->dismiss)
			dev->pm_domain->dismiss(dev);
846
		pm_runtime_reinit(dev);
847

848
		klist_remove(&dev->p->knode_driver);
849
		device_pm_check_callbacks(dev);
850 851 852 853
		if (dev->bus)
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
						     BUS_NOTIFY_UNBOUND_DRIVER,
						     dev);
854
	}
855 856
}

857 858 859
void device_release_driver_internal(struct device *dev,
				    struct device_driver *drv,
				    struct device *parent)
860 861 862 863 864 865
{
	if (parent)
		device_lock(parent);

	device_lock(dev);
	if (!drv || drv == dev->driver)
866
		__device_release_driver(dev, parent);
867 868 869 870 871 872

	device_unlock(dev);
	if (parent)
		device_unlock(parent);
}

873
/**
874 875
 * device_release_driver - manually detach device from driver.
 * @dev: device.
876
 *
877
 * Manually detach device from driver.
878
 * When called for a USB interface, @dev->parent lock must be held.
879 880 881 882
 *
 * If this function is to be called with @dev->parent lock held, ensure that
 * the device's consumers are unbound in advance or that their locks can be
 * acquired under the @dev->parent lock.
883
 */
884
void device_release_driver(struct device *dev)
885
{
886 887 888 889 890
	/*
	 * If anyone calls device_release_driver() recursively from
	 * within their ->remove callback for the same device, they
	 * will deadlock right here.
	 */
891
	device_release_driver_internal(dev, NULL, NULL);
892
}
893
EXPORT_SYMBOL_GPL(device_release_driver);
894

895 896 897 898
/**
 * driver_detach - detach driver from all devices it controls.
 * @drv: driver.
 */
899
void driver_detach(struct device_driver *drv)
900
{
901
	struct device_private *dev_prv;
902
	struct device *dev;
903 904

	for (;;) {
905 906 907
		spin_lock(&drv->p->klist_devices.k_lock);
		if (list_empty(&drv->p->klist_devices.k_list)) {
			spin_unlock(&drv->p->klist_devices.k_lock);
908 909
			break;
		}
910 911 912 913
		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
				     struct device_private,
				     knode_driver.n_node);
		dev = dev_prv->device;
914
		get_device(dev);
915
		spin_unlock(&drv->p->klist_devices.k_lock);
916
		device_release_driver_internal(dev, drv, dev->parent);
917 918
		put_device(dev);
	}
919
}