dd.c 28.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
 * drivers/base/dd.c - The core device/driver interactions.
4
 *
5 6 7
 * This file contains the (sometimes tricky) code that controls the
 * interactions between devices and drivers, which primarily includes
 * driver binding and unbinding.
8
 *
9 10 11
 * All of this code used to exist in drivers/base/bus.c, but was
 * relocated to here in the name of compartmentalization (since it wasn't
 * strictly code just for the 'struct bus_type'.
12
 *
13 14
 * Copyright (c) 2002-5 Patrick Mochel
 * Copyright (c) 2002-3 Open Source Development Labs
15 16
 * Copyright (c) 2007-2009 Greg Kroah-Hartman <gregkh@suse.de>
 * Copyright (c) 2007-2009 Novell Inc.
17 18
 */

19
#include <linux/debugfs.h>
20
#include <linux/device.h>
21
#include <linux/delay.h>
22
#include <linux/dma-mapping.h>
23
#include <linux/init.h>
24
#include <linux/module.h>
25
#include <linux/kthread.h>
26
#include <linux/wait.h>
27
#include <linux/async.h>
28
#include <linux/pm_runtime.h>
29
#include <linux/pinctrl/devinfo.h>
30 31 32 33

#include "base.h"
#include "power/power.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50
/*
 * Deferred Probe infrastructure.
 *
 * Sometimes driver probe order matters, but the kernel doesn't always have
 * dependency information which means some drivers will get probed before a
 * resource it depends on is available.  For example, an SDHCI driver may
 * first need a GPIO line from an i2c GPIO controller before it can be
 * initialized.  If a required resource is not available yet, a driver can
 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
 *
 * Deferred probe maintains two lists of devices, a pending list and an active
 * list.  A driver returning -EPROBE_DEFER causes the device to be added to the
 * pending list.  A successful driver probe will trigger moving all devices
 * from the pending to the active list so that the workqueue will eventually
 * retry them.
 *
 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
51
 * of the (struct device*)->p->deferred_probe pointers are manipulated
52 53 54 55
 */
static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
56
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
57
static struct dentry *deferred_devices;
58
static bool initcalls_done;
59

60 61 62 63 64 65 66
/*
 * In some cases, like suspend to RAM or hibernation, It might be reasonable
 * to prohibit probing of devices as it could be unsafe.
 * Once defer_all_probes is true all drivers probes will be forcibly deferred.
 */
static bool defer_all_probes;

67
/*
68 69 70 71 72
 * deferred_probe_work_func() - Retry probing devices in the active list.
 */
static void deferred_probe_work_func(struct work_struct *work)
{
	struct device *dev;
73
	struct device_private *private;
74 75 76 77 78 79 80 81 82 83 84 85 86 87
	/*
	 * This block processes every device in the deferred 'active' list.
	 * Each device is removed from the active list and passed to
	 * bus_probe_device() to re-attempt the probe.  The loop continues
	 * until every device in the active list is removed and retried.
	 *
	 * Note: Once the device is removed from the list and the mutex is
	 * released, it is possible for the device get freed by another thread
	 * and cause a illegal pointer dereference.  This code uses
	 * get/put_device() to ensure the device structure cannot disappear
	 * from under our feet.
	 */
	mutex_lock(&deferred_probe_mutex);
	while (!list_empty(&deferred_probe_active_list)) {
88 89 90 91
		private = list_first_entry(&deferred_probe_active_list,
					typeof(*dev->p), deferred_probe);
		dev = private->device;
		list_del_init(&private->deferred_probe);
92 93 94

		get_device(dev);

95 96 97 98
		/*
		 * Drop the mutex while probing each device; the probe path may
		 * manipulate the deferred list
		 */
99
		mutex_unlock(&deferred_probe_mutex);
100 101 102 103 104 105 106

		/*
		 * Force the device to the end of the dpm_list since
		 * the PM code assumes that the order we add things to
		 * the list is a good order for suspend but deferred
		 * probe makes that very unsafe.
		 */
107
		device_pm_move_to_tail(dev);
108

109
		dev_dbg(dev, "Retrying from deferred list\n");
110
		bus_probe_device(dev);
111 112 113 114 115 116 117 118 119 120 121
		mutex_lock(&deferred_probe_mutex);

		put_device(dev);
	}
	mutex_unlock(&deferred_probe_mutex);
}
static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);

static void driver_deferred_probe_add(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
122
	if (list_empty(&dev->p->deferred_probe)) {
123
		dev_dbg(dev, "Added to deferred list\n");
124
		list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
125 126 127 128 129 130 131
	}
	mutex_unlock(&deferred_probe_mutex);
}

void driver_deferred_probe_del(struct device *dev)
{
	mutex_lock(&deferred_probe_mutex);
132
	if (!list_empty(&dev->p->deferred_probe)) {
133
		dev_dbg(dev, "Removed from deferred list\n");
134
		list_del_init(&dev->p->deferred_probe);
135 136 137 138 139 140 141 142 143 144 145
	}
	mutex_unlock(&deferred_probe_mutex);
}

static bool driver_deferred_probe_enable = false;
/**
 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
 *
 * This functions moves all devices from the pending list to the active
 * list and schedules the deferred probe workqueue to process them.  It
 * should be called anytime a driver is successfully bound to a device.
146 147 148 149 150
 *
 * Note, there is a race condition in multi-threaded probe. In the case where
 * more than one device is probing at the same time, it is possible for one
 * probe to complete successfully while another is about to defer. If the second
 * depends on the first, then it will get put on the pending list after the
151
 * trigger event has already occurred and will be stuck there.
152 153 154 155 156
 *
 * The atomic 'deferred_trigger_count' is used to determine if a successful
 * trigger has occurred in the midst of probing a driver. If the trigger count
 * changes in the midst of a probe, then deferred processing should be triggered
 * again.
157 158 159 160 161 162
 */
static void driver_deferred_probe_trigger(void)
{
	if (!driver_deferred_probe_enable)
		return;

163 164
	/*
	 * A successful probe means that all the devices in the pending list
165
	 * should be triggered to be reprobed.  Move all the deferred devices
166 167
	 * into the active list so they can be retried by the workqueue
	 */
168
	mutex_lock(&deferred_probe_mutex);
169
	atomic_inc(&deferred_trigger_count);
170 171 172 173
	list_splice_tail_init(&deferred_probe_pending_list,
			      &deferred_probe_active_list);
	mutex_unlock(&deferred_probe_mutex);

174 175 176 177
	/*
	 * Kick the re-probe thread.  It may already be scheduled, but it is
	 * safe to kick it again.
	 */
178
	schedule_work(&deferred_probe_work);
179 180
}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
/**
 * device_block_probing() - Block/defere device's probes
 *
 *	It will disable probing of devices and defer their probes instead.
 */
void device_block_probing(void)
{
	defer_all_probes = true;
	/* sync with probes to avoid races. */
	wait_for_device_probe();
}

/**
 * device_unblock_probing() - Unblock/enable device's probes
 *
 *	It will restore normal behavior and trigger re-probing of deferred
 * devices.
 */
void device_unblock_probing(void)
{
	defer_all_probes = false;
	driver_deferred_probe_trigger();
}

205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
/*
 * deferred_devs_show() - Show the devices in the deferred probe pending list.
 */
static int deferred_devs_show(struct seq_file *s, void *data)
{
	struct device_private *curr;

	mutex_lock(&deferred_probe_mutex);

	list_for_each_entry(curr, &deferred_probe_pending_list, deferred_probe)
		seq_printf(s, "%s\n", dev_name(curr->device));

	mutex_unlock(&deferred_probe_mutex);

	return 0;
}
DEFINE_SHOW_ATTRIBUTE(deferred_devs);

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
static int deferred_probe_timeout = -1;
static int __init deferred_probe_timeout_setup(char *str)
{
	deferred_probe_timeout = simple_strtol(str, NULL, 10);
	return 1;
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);

/**
 * driver_deferred_probe_check_state() - Check deferred probe state
 * @dev: device to check
 *
 * Returns -ENODEV if init is done and all built-in drivers have had a chance
 * to probe (i.e. initcalls are done), -ETIMEDOUT if deferred probe debug
 * timeout has expired, or -EPROBE_DEFER if none of those conditions are met.
 *
 * Drivers or subsystems can opt-in to calling this function instead of directly
 * returning -EPROBE_DEFER.
 */
int driver_deferred_probe_check_state(struct device *dev)
{
	if (initcalls_done) {
		if (!deferred_probe_timeout) {
			dev_WARN(dev, "deferred probe timeout, ignoring dependency");
			return -ETIMEDOUT;
		}
		dev_warn(dev, "ignoring dependency for device, assuming no driver");
		return -ENODEV;
	}
	return -EPROBE_DEFER;
}

static void deferred_probe_timeout_work_func(struct work_struct *work)
{
	struct device_private *private, *p;

	deferred_probe_timeout = 0;
	driver_deferred_probe_trigger();
	flush_work(&deferred_probe_work);

	list_for_each_entry_safe(private, p, &deferred_probe_pending_list, deferred_probe)
		dev_info(private->device, "deferred probe pending");
}
static DECLARE_DELAYED_WORK(deferred_probe_timeout_work, deferred_probe_timeout_work_func);

268 269 270 271 272 273 274 275 276
/**
 * deferred_probe_initcall() - Enable probing of deferred devices
 *
 * We don't want to get in the way when the bulk of drivers are getting probed.
 * Instead, this initcall makes sure that deferred probing is delayed until
 * late_initcall time.
 */
static int deferred_probe_initcall(void)
{
277 278 279
	deferred_devices = debugfs_create_file("devices_deferred", 0444, NULL,
					       NULL, &deferred_devs_fops);

280 281
	driver_deferred_probe_enable = true;
	driver_deferred_probe_trigger();
282
	/* Sort as many dependencies as possible before exiting initcalls */
283
	flush_work(&deferred_probe_work);
284 285 286 287 288 289 290 291 292 293 294 295 296
	initcalls_done = true;

	/*
	 * Trigger deferred probe again, this time we won't defer anything
	 * that is optional
	 */
	driver_deferred_probe_trigger();
	flush_work(&deferred_probe_work);

	if (deferred_probe_timeout > 0) {
		schedule_delayed_work(&deferred_probe_timeout_work,
			deferred_probe_timeout * HZ);
	}
297 298 299
	return 0;
}
late_initcall(deferred_probe_initcall);
300

301 302 303 304 305 306
static void __exit deferred_probe_exit(void)
{
	debugfs_remove_recursive(deferred_devices);
}
__exitcall(deferred_probe_exit);

307 308 309 310 311 312 313 314 315 316 317
/**
 * device_is_bound() - Check if device is bound to a driver
 * @dev: device to check
 *
 * Returns true if passed device has already finished probing successfully
 * against a driver.
 *
 * This function must be called with the device lock held.
 */
bool device_is_bound(struct device *dev)
{
318
	return dev->p && klist_node_attached(&dev->p->knode_driver);
319 320
}

321
static void driver_bound(struct device *dev)
322
{
323
	if (device_is_bound(dev)) {
A
Andrew Morton 已提交
324
		printk(KERN_WARNING "%s: device %s already bound\n",
325
			__func__, kobject_name(&dev->kobj));
326
		return;
A
Andrew Morton 已提交
327
	}
328

329 330
	pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
		 __func__, dev_name(dev));
331

332
	klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
333
	device_links_driver_bound(dev);
334

335 336
	device_pm_check_callbacks(dev);

337 338 339 340
	/*
	 * Make sure the device is no longer in one of the deferred lists and
	 * kick off retrying all pending devices
	 */
341 342 343
	driver_deferred_probe_del(dev);
	driver_deferred_probe_trigger();

344
	if (dev->bus)
345
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
346
					     BUS_NOTIFY_BOUND_DRIVER, dev);
347 348

	kobject_uevent(&dev->kobj, KOBJ_BIND);
349 350
}

351 352 353 354
static ssize_t coredump_store(struct device *dev, struct device_attribute *attr,
			    const char *buf, size_t count)
{
	device_lock(dev);
355
	dev->driver->coredump(dev);
356 357 358 359 360 361
	device_unlock(dev);

	return count;
}
static DEVICE_ATTR_WO(coredump);

362 363 364 365
static int driver_sysfs_add(struct device *dev)
{
	int ret;

366 367 368 369
	if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_BIND_DRIVER, dev);

370
	ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
				kobject_name(&dev->kobj));
	if (ret)
		goto fail;

	ret = sysfs_create_link(&dev->kobj, &dev->driver->p->kobj,
				"driver");
	if (ret)
		goto rm_dev;

	if (!IS_ENABLED(CONFIG_DEV_COREDUMP) || !dev->driver->coredump ||
	    !device_create_file(dev, &dev_attr_coredump))
		return 0;

	sysfs_remove_link(&dev->kobj, "driver");

rm_dev:
	sysfs_remove_link(&dev->driver->p->kobj,
388
			  kobject_name(&dev->kobj));
389 390

fail:
A
Andrew Morton 已提交
391
	return ret;
392 393
}

394 395 396 397 398
static void driver_sysfs_remove(struct device *dev)
{
	struct device_driver *drv = dev->driver;

	if (drv) {
399 400
		if (drv->coredump)
			device_remove_file(dev, &dev_attr_coredump);
401
		sysfs_remove_link(&drv->p->kobj, kobject_name(&dev->kobj));
402 403 404 405 406
		sysfs_remove_link(&dev->kobj, "driver");
	}
}

/**
407 408
 * device_bind_driver - bind a driver to one device.
 * @dev: device.
409
 *
410 411
 * Allow manual attachment of a driver to a device.
 * Caller must have already set @dev->driver.
412
 *
413 414 415 416
 * Note that this does not modify the bus reference count
 * nor take the bus's rwsem. Please verify those are accounted
 * for before calling this. (It is ok to call with no other effort
 * from a driver's probe() method.)
417
 *
418
 * This function must be called with the device lock held.
419 420 421
 */
int device_bind_driver(struct device *dev)
{
422 423 424 425 426
	int ret;

	ret = driver_sysfs_add(dev);
	if (!ret)
		driver_bound(dev);
427 428 429
	else if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
430
	return ret;
431
}
432
EXPORT_SYMBOL_GPL(device_bind_driver);
433

434
static atomic_t probe_count = ATOMIC_INIT(0);
435 436
static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);

437 438 439 440 441 442 443 444 445
static void driver_deferred_probe_add_trigger(struct device *dev,
					      int local_trigger_count)
{
	driver_deferred_probe_add(dev);
	/* Did a trigger occur while probing? Need to re-trigger if yes */
	if (local_trigger_count != atomic_read(&deferred_trigger_count))
		driver_deferred_probe_trigger();
}

446
static int really_probe(struct device *dev, struct device_driver *drv)
447
{
448
	int ret = -EPROBE_DEFER;
449
	int local_trigger_count = atomic_read(&deferred_trigger_count);
450 451
	bool test_remove = IS_ENABLED(CONFIG_DEBUG_TEST_DRIVER_REMOVE) &&
			   !drv->suppress_bind_attrs;
452

453 454 455 456 457 458 459 460 461 462 463
	if (defer_all_probes) {
		/*
		 * Value of defer_all_probes can be set only by
		 * device_defer_all_probes_enable() which, in turn, will call
		 * wait_for_device_probe() right after that to avoid any races.
		 */
		dev_dbg(dev, "Driver %s force probe deferral\n", drv->name);
		driver_deferred_probe_add(dev);
		return ret;
	}

464
	ret = device_links_check_suppliers(dev);
465 466
	if (ret == -EPROBE_DEFER)
		driver_deferred_probe_add_trigger(dev, local_trigger_count);
467 468 469
	if (ret)
		return ret;

470
	atomic_inc(&probe_count);
471
	pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
472
		 drv->bus->name, __func__, drv->name, dev_name(dev));
T
Tejun Heo 已提交
473
	WARN_ON(!list_empty(&dev->devres_head));
474

475
re_probe:
476
	dev->driver = drv;
477 478 479 480

	/* If using pinctrl, bind pins now before probing */
	ret = pinctrl_bind_pins(dev);
	if (ret)
481
		goto pinctrl_bind_failed;
482

483 484 485 486
	ret = dma_configure(dev);
	if (ret)
		goto dma_failed;

487 488
	if (driver_sysfs_add(dev)) {
		printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
489
			__func__, dev_name(dev));
490 491 492
		goto probe_failed;
	}

493 494 495 496 497 498
	if (dev->pm_domain && dev->pm_domain->activate) {
		ret = dev->pm_domain->activate(dev);
		if (ret)
			goto probe_failed;
	}

499 500
	if (dev->bus->probe) {
		ret = dev->bus->probe(dev);
501
		if (ret)
502
			goto probe_failed;
503
	} else if (drv->probe) {
504
		ret = drv->probe(dev);
505
		if (ret)
506
			goto probe_failed;
A
Andrew Morton 已提交
507
	}
508

509 510 511
	if (test_remove) {
		test_remove = false;

512
		if (dev->bus->remove)
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
			dev->bus->remove(dev);
		else if (drv->remove)
			drv->remove(dev);

		devres_release_all(dev);
		driver_sysfs_remove(dev);
		dev->driver = NULL;
		dev_set_drvdata(dev, NULL);
		if (dev->pm_domain && dev->pm_domain->dismiss)
			dev->pm_domain->dismiss(dev);
		pm_runtime_reinit(dev);

		goto re_probe;
	}

528 529
	pinctrl_init_done(dev);

530 531 532
	if (dev->pm_domain && dev->pm_domain->sync)
		dev->pm_domain->sync(dev);

533
	driver_bound(dev);
534
	ret = 1;
535
	pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
536
		 drv->bus->name, __func__, dev_name(dev), drv->name);
537
	goto done;
538

539
probe_failed:
540 541
	dma_deconfigure(dev);
dma_failed:
542 543 544 545
	if (dev->bus)
		blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
					     BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
546
	device_links_no_driver(dev);
T
Tejun Heo 已提交
547
	devres_release_all(dev);
548 549
	driver_sysfs_remove(dev);
	dev->driver = NULL;
550
	dev_set_drvdata(dev, NULL);
551 552
	if (dev->pm_domain && dev->pm_domain->dismiss)
		dev->pm_domain->dismiss(dev);
553
	pm_runtime_reinit(dev);
554
	dev_pm_set_driver_flags(dev, 0);
555

556 557
	switch (ret) {
	case -EPROBE_DEFER:
558
		/* Driver requested deferred probing */
559
		dev_dbg(dev, "Driver %s requests probe deferral\n", drv->name);
560
		driver_deferred_probe_add_trigger(dev, local_trigger_count);
561 562 563 564 565 566 567
		break;
	case -ENODEV:
	case -ENXIO:
		pr_debug("%s: probe of %s rejects match %d\n",
			 drv->name, dev_name(dev), ret);
		break;
	default:
568 569 570
		/* driver matched but the probe failed */
		printk(KERN_WARNING
		       "%s: probe of %s failed with error %d\n",
571
		       drv->name, dev_name(dev), ret);
572
	}
573 574 575 576 577
	/*
	 * Ignore errors returned by ->probe so that the next driver can try
	 * its luck.
	 */
	ret = 0;
578 579
done:
	atomic_dec(&probe_count);
580
	wake_up(&probe_waitqueue);
581 582 583
	return ret;
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
/*
 * For initcall_debug, show the driver probe time.
 */
static int really_probe_debug(struct device *dev, struct device_driver *drv)
{
	ktime_t calltime, delta, rettime;
	int ret;

	calltime = ktime_get();
	ret = really_probe(dev, drv);
	rettime = ktime_get();
	delta = ktime_sub(rettime, calltime);
	printk(KERN_DEBUG "probe of %s returned %d after %lld usecs\n",
	       dev_name(dev), ret, (s64) ktime_to_us(delta));
	return ret;
}

601 602 603 604 605 606 607 608
/**
 * driver_probe_done
 * Determine if the probe sequence is finished or not.
 *
 * Should somehow figure out how to use a semaphore, not an atomic variable...
 */
int driver_probe_done(void)
{
609
	pr_debug("%s: probe_count = %d\n", __func__,
610 611 612 613 614 615
		 atomic_read(&probe_count));
	if (atomic_read(&probe_count))
		return -EBUSY;
	return 0;
}

616 617 618 619
/**
 * wait_for_device_probe
 * Wait for device probing to be completed.
 */
620
void wait_for_device_probe(void)
621
{
622
	/* wait for the deferred probe workqueue to finish */
623
	flush_work(&deferred_probe_work);
624

625
	/* wait for the known devices to complete their probing */
626
	wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
627 628
	async_synchronize_full();
}
629
EXPORT_SYMBOL_GPL(wait_for_device_probe);
630

631 632 633 634 635
/**
 * driver_probe_device - attempt to bind device & driver together
 * @drv: driver to bind a device to
 * @dev: device to try to bind to the driver
 *
636
 * This function returns -ENODEV if the device is not registered,
637
 * 1 if the device is bound successfully and 0 otherwise.
638
 *
639 640
 * This function must be called with @dev lock held.  When called for a
 * USB interface, @dev->parent lock must be held as well.
641 642
 *
 * If the device has a parent, runtime-resume the parent before driver probing.
643
 */
644
int driver_probe_device(struct device_driver *drv, struct device *dev)
645 646 647
{
	int ret = 0;

648 649
	if (!device_is_registered(dev))
		return -ENODEV;
650

651
	pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
652
		 drv->bus->name, __func__, dev_name(dev), drv->name);
653

654
	pm_runtime_get_suppliers(dev);
655 656 657
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);

658
	pm_runtime_barrier(dev);
659 660 661 662
	if (initcall_debug)
		ret = really_probe_debug(dev, drv);
	else
		ret = really_probe(dev, drv);
663
	pm_request_idle(dev);
664

665 666 667
	if (dev->parent)
		pm_runtime_put(dev->parent);

668
	pm_runtime_put_suppliers(dev);
669
	return ret;
670 671
}

672
bool driver_allows_async_probing(struct device_driver *drv)
673
{
674 675
	switch (drv->probe_type) {
	case PROBE_PREFER_ASYNCHRONOUS:
676 677
		return true;

678 679 680 681
	case PROBE_FORCE_SYNCHRONOUS:
		return false;

	default:
682
		if (module_requested_async_probing(drv->owner))
683
			return true;
684

685 686
		return false;
	}
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726
}

struct device_attach_data {
	struct device *dev;

	/*
	 * Indicates whether we are are considering asynchronous probing or
	 * not. Only initial binding after device or driver registration
	 * (including deferral processing) may be done asynchronously, the
	 * rest is always synchronous, as we expect it is being done by
	 * request from userspace.
	 */
	bool check_async;

	/*
	 * Indicates if we are binding synchronous or asynchronous drivers.
	 * When asynchronous probing is enabled we'll execute 2 passes
	 * over drivers: first pass doing synchronous probing and second
	 * doing asynchronous probing (if synchronous did not succeed -
	 * most likely because there was no driver requiring synchronous
	 * probing - and we found asynchronous driver during first pass).
	 * The 2 passes are done because we can't shoot asynchronous
	 * probe for given device and driver from bus_for_each_drv() since
	 * driver pointer is not guaranteed to stay valid once
	 * bus_for_each_drv() iterates to the next driver on the bus.
	 */
	bool want_async;

	/*
	 * We'll set have_async to 'true' if, while scanning for matching
	 * driver, we'll encounter one that requests asynchronous probing.
	 */
	bool have_async;
};

static int __device_attach_driver(struct device_driver *drv, void *_data)
{
	struct device_attach_data *data = _data;
	struct device *dev = data->dev;
	bool async_allowed;
727
	int ret;
728 729 730 731 732 733 734 735 736

	/*
	 * Check if device has already been claimed. This may
	 * happen with driver loading, device discovery/registration,
	 * and deferred probe processing happens all at once with
	 * multiple threads.
	 */
	if (dev->driver)
		return -EBUSY;
737

738 739 740
	ret = driver_match_device(drv, dev);
	if (ret == 0) {
		/* no match */
741
		return 0;
742 743 744 745 746 747 748
	} else if (ret == -EPROBE_DEFER) {
		dev_dbg(dev, "Device match requests probe deferral\n");
		driver_deferred_probe_add(dev);
	} else if (ret < 0) {
		dev_dbg(dev, "Bus failed to match device: %d", ret);
		return ret;
	} /* ret > 0 means positive match */
749

750 751 752 753 754 755 756 757
	async_allowed = driver_allows_async_probing(drv);

	if (async_allowed)
		data->have_async = true;

	if (data->check_async && async_allowed != data->want_async)
		return 0;

758
	return driver_probe_device(drv, dev);
759 760
}

761 762 763 764 765 766 767 768 769 770 771
static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
{
	struct device *dev = _dev;
	struct device_attach_data data = {
		.dev		= dev,
		.check_async	= true,
		.want_async	= true,
	};

	device_lock(dev);

772 773 774
	if (dev->parent)
		pm_runtime_get_sync(dev->parent);

775 776 777 778 779
	bus_for_each_drv(dev->bus, NULL, &data, __device_attach_driver);
	dev_dbg(dev, "async probe completed\n");

	pm_request_idle(dev);

780 781 782
	if (dev->parent)
		pm_runtime_put(dev->parent);

783 784 785 786 787
	device_unlock(dev);

	put_device(dev);
}

788
static int __device_attach(struct device *dev, bool allow_async)
789
{
790 791
	int ret = 0;

792
	device_lock(dev);
793
	if (dev->driver) {
794
		if (device_is_bound(dev)) {
795 796 797
			ret = 1;
			goto out_unlock;
		}
A
Andrew Morton 已提交
798 799 800
		ret = device_bind_driver(dev);
		if (ret == 0)
			ret = 1;
801 802 803 804
		else {
			dev->driver = NULL;
			ret = 0;
		}
805
	} else {
806 807 808 809 810 811
		struct device_attach_data data = {
			.dev = dev,
			.check_async = allow_async,
			.want_async = false,
		};

812 813 814
		if (dev->parent)
			pm_runtime_get_sync(dev->parent);

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
		ret = bus_for_each_drv(dev->bus, NULL, &data,
					__device_attach_driver);
		if (!ret && allow_async && data.have_async) {
			/*
			 * If we could not find appropriate driver
			 * synchronously and we are allowed to do
			 * async probes and there are drivers that
			 * want to probe asynchronously, we'll
			 * try them.
			 */
			dev_dbg(dev, "scheduling asynchronous probe\n");
			get_device(dev);
			async_schedule(__device_attach_async_helper, dev);
		} else {
			pm_request_idle(dev);
		}
831 832 833

		if (dev->parent)
			pm_runtime_put(dev->parent);
834
	}
835
out_unlock:
836
	device_unlock(dev);
837
	return ret;
838
}
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857

/**
 * device_attach - try to attach device to a driver.
 * @dev: device.
 *
 * Walk the list of drivers that the bus has and call
 * driver_probe_device() for each pair. If a compatible
 * pair is found, break out and return.
 *
 * Returns 1 if the device was bound to a driver;
 * 0 if no matching driver was found;
 * -ENODEV if the device is not registered.
 *
 * When called for a USB interface, @dev->parent lock must be held.
 */
int device_attach(struct device *dev)
{
	return __device_attach(dev, false);
}
858
EXPORT_SYMBOL_GPL(device_attach);
859

860 861 862 863 864
void device_initial_probe(struct device *dev)
{
	__device_attach(dev, true);
}

865
static int __driver_attach(struct device *dev, void *data)
866
{
867
	struct device_driver *drv = data;
868
	int ret;
869 870 871 872 873 874 875 876 877 878 879

	/*
	 * Lock device and try to bind to it. We drop the error
	 * here and always return 0, because we need to keep trying
	 * to bind to devices and some drivers will return an error
	 * simply if it didn't support the device.
	 *
	 * driver_probe_device() will spit a warning if there
	 * is an error.
	 */

880 881 882
	ret = driver_match_device(drv, dev);
	if (ret == 0) {
		/* no match */
883
		return 0;
884 885 886 887 888 889 890
	} else if (ret == -EPROBE_DEFER) {
		dev_dbg(dev, "Device match requests probe deferral\n");
		driver_deferred_probe_add(dev);
	} else if (ret < 0) {
		dev_dbg(dev, "Bus failed to match device: %d", ret);
		return ret;
	} /* ret > 0 means positive match */
891

892
	if (dev->parent && dev->bus->need_parent_lock)
893 894
		device_lock(dev->parent);
	device_lock(dev);
895 896
	if (!dev->driver)
		driver_probe_device(drv, dev);
897
	device_unlock(dev);
898
	if (dev->parent && dev->bus->need_parent_lock)
899
		device_unlock(dev->parent);
900

901 902 903 904
	return 0;
}

/**
905 906
 * driver_attach - try to bind driver to devices.
 * @drv: driver.
907
 *
908 909 910 911
 * Walk the list of devices that the bus has on it and try to
 * match the driver with each one.  If driver_probe_device()
 * returns 0 and the @dev->driver is set, we've found a
 * compatible pair.
912
 */
913
int driver_attach(struct device_driver *drv)
914
{
A
Andrew Morton 已提交
915
	return bus_for_each_dev(drv->bus, NULL, drv, __driver_attach);
916
}
917
EXPORT_SYMBOL_GPL(driver_attach);
918

919
/*
920 921
 * __device_release_driver() must be called with @dev lock held.
 * When called for a USB interface, @dev->parent lock must be held as well.
922
 */
923
static void __device_release_driver(struct device *dev, struct device *parent)
924
{
925
	struct device_driver *drv;
926

927
	drv = dev->driver;
928
	if (drv) {
929 930
		while (device_links_busy(dev)) {
			device_unlock(dev);
931
			if (parent && dev->bus->need_parent_lock)
932 933 934
				device_unlock(parent);

			device_links_unbind_consumers(dev);
935
			if (parent && dev->bus->need_parent_lock)
936 937 938 939 940 941 942 943 944 945 946 947
				device_lock(parent);

			device_lock(dev);
			/*
			 * A concurrent invocation of the same function might
			 * have released the driver successfully while this one
			 * was waiting, so check for that.
			 */
			if (dev->driver != drv)
				return;
		}

948
		pm_runtime_get_sync(dev);
R
Rafael J. Wysocki 已提交
949
		pm_runtime_clean_up_links(dev);
950

951
		driver_sysfs_remove(dev);
952

953
		if (dev->bus)
954
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
955 956 957
						     BUS_NOTIFY_UNBIND_DRIVER,
						     dev);

958
		pm_runtime_put_sync(dev);
959

960
		if (dev->bus && dev->bus->remove)
961 962
			dev->bus->remove(dev);
		else if (drv->remove)
963
			drv->remove(dev);
964 965

		device_links_driver_cleanup(dev);
966

T
Tejun Heo 已提交
967
		devres_release_all(dev);
968
		dma_deconfigure(dev);
969
		dev->driver = NULL;
970
		dev_set_drvdata(dev, NULL);
971 972
		if (dev->pm_domain && dev->pm_domain->dismiss)
			dev->pm_domain->dismiss(dev);
973
		pm_runtime_reinit(dev);
974
		dev_pm_set_driver_flags(dev, 0);
975

976
		klist_remove(&dev->p->knode_driver);
977
		device_pm_check_callbacks(dev);
978 979 980 981
		if (dev->bus)
			blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
						     BUS_NOTIFY_UNBOUND_DRIVER,
						     dev);
982 983

		kobject_uevent(&dev->kobj, KOBJ_UNBIND);
984
	}
985 986
}

987 988 989
void device_release_driver_internal(struct device *dev,
				    struct device_driver *drv,
				    struct device *parent)
990
{
991
	if (parent && dev->bus->need_parent_lock)
992 993 994 995
		device_lock(parent);

	device_lock(dev);
	if (!drv || drv == dev->driver)
996
		__device_release_driver(dev, parent);
997 998

	device_unlock(dev);
999
	if (parent && dev->bus->need_parent_lock)
1000 1001 1002
		device_unlock(parent);
}

1003
/**
1004 1005
 * device_release_driver - manually detach device from driver.
 * @dev: device.
1006
 *
1007
 * Manually detach device from driver.
1008
 * When called for a USB interface, @dev->parent lock must be held.
1009 1010 1011 1012
 *
 * If this function is to be called with @dev->parent lock held, ensure that
 * the device's consumers are unbound in advance or that their locks can be
 * acquired under the @dev->parent lock.
1013
 */
1014
void device_release_driver(struct device *dev)
1015
{
1016 1017 1018 1019 1020
	/*
	 * If anyone calls device_release_driver() recursively from
	 * within their ->remove callback for the same device, they
	 * will deadlock right here.
	 */
1021
	device_release_driver_internal(dev, NULL, NULL);
1022
}
1023
EXPORT_SYMBOL_GPL(device_release_driver);
1024

1025 1026 1027 1028
/**
 * driver_detach - detach driver from all devices it controls.
 * @drv: driver.
 */
1029
void driver_detach(struct device_driver *drv)
1030
{
1031
	struct device_private *dev_prv;
1032
	struct device *dev;
1033

1034 1035 1036
	if (driver_allows_async_probing(drv))
		async_synchronize_full();

1037
	for (;;) {
1038 1039 1040
		spin_lock(&drv->p->klist_devices.k_lock);
		if (list_empty(&drv->p->klist_devices.k_list)) {
			spin_unlock(&drv->p->klist_devices.k_lock);
1041 1042
			break;
		}
1043 1044 1045 1046
		dev_prv = list_entry(drv->p->klist_devices.k_list.prev,
				     struct device_private,
				     knode_driver.n_node);
		dev = dev_prv->device;
1047
		get_device(dev);
1048
		spin_unlock(&drv->p->klist_devices.k_lock);
1049
		device_release_driver_internal(dev, drv, dev->parent);
1050 1051
		put_device(dev);
	}
1052
}