spi.c 54.0 KB
Newer Older
1
/*
G
Grant Likely 已提交
2
 * SPI init/core code
3 4
 *
 * Copyright (C) 2005 David Brownell
5
 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/kernel.h>
23
#include <linux/kmod.h>
24 25 26
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
27
#include <linux/mutex.h>
28
#include <linux/of_device.h>
29
#include <linux/of_irq.h>
30
#include <linux/slab.h>
31
#include <linux/mod_devicetable.h>
32
#include <linux/spi/spi.h>
33
#include <linux/of_gpio.h>
M
Mark Brown 已提交
34
#include <linux/pm_runtime.h>
35
#include <linux/export.h>
36
#include <linux/sched/rt.h>
37 38
#include <linux/delay.h>
#include <linux/kthread.h>
39 40
#include <linux/ioport.h>
#include <linux/acpi.h>
41

42 43 44
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>

45 46
static void spidev_release(struct device *dev)
{
47
	struct spi_device	*spi = to_spi_device(dev);
48 49 50 51 52

	/* spi masters may cleanup for released devices */
	if (spi->master->cleanup)
		spi->master->cleanup(spi);

D
David Brownell 已提交
53
	spi_master_put(spi->master);
54
	kfree(spi);
55 56 57 58 59 60 61
}

static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
	const struct spi_device	*spi = to_spi_device(dev);

62
	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
63 64 65 66 67 68 69 70 71 72 73
}

static struct device_attribute spi_dev_attrs[] = {
	__ATTR_RO(modalias),
	__ATTR_NULL,
};

/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 * and the sysfs version makes coldplug work too.
 */

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
						const struct spi_device *sdev)
{
	while (id->name[0]) {
		if (!strcmp(sdev->modalias, id->name))
			return id;
		id++;
	}
	return NULL;
}

const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);

	return spi_match_id(sdrv->id_table, sdev);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);

93 94 95
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
	const struct spi_device	*spi = to_spi_device(dev);
96 97
	const struct spi_driver	*sdrv = to_spi_driver(drv);

98 99 100 101
	/* Attempt an OF style match */
	if (of_driver_match_device(dev, drv))
		return 1;

102 103 104 105
	/* Then try ACPI */
	if (acpi_driver_match_device(dev, drv))
		return 1;

106 107
	if (sdrv->id_table)
		return !!spi_match_id(sdrv->id_table, spi);
108

109
	return strcmp(spi->modalias, drv->name) == 0;
110 111
}

112
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
113 114 115
{
	const struct spi_device		*spi = to_spi_device(dev);

116
	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
117 118 119
	return 0;
}

M
Mark Brown 已提交
120 121
#ifdef CONFIG_PM_SLEEP
static int spi_legacy_suspend(struct device *dev, pm_message_t message)
122
{
123
	int			value = 0;
124
	struct spi_driver	*drv = to_spi_driver(dev->driver);
125 126

	/* suspend will stop irqs and dma; no more i/o */
127 128 129 130 131 132
	if (drv) {
		if (drv->suspend)
			value = drv->suspend(to_spi_device(dev), message);
		else
			dev_dbg(dev, "... can't suspend\n");
	}
133 134 135
	return value;
}

M
Mark Brown 已提交
136
static int spi_legacy_resume(struct device *dev)
137
{
138
	int			value = 0;
139
	struct spi_driver	*drv = to_spi_driver(dev->driver);
140 141

	/* resume may restart the i/o queue */
142 143 144 145 146 147
	if (drv) {
		if (drv->resume)
			value = drv->resume(to_spi_device(dev));
		else
			dev_dbg(dev, "... can't resume\n");
	}
148 149 150
	return value;
}

M
Mark Brown 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
static int spi_pm_suspend(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_suspend(dev);
	else
		return spi_legacy_suspend(dev, PMSG_SUSPEND);
}

static int spi_pm_resume(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_resume(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_freeze(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_freeze(dev);
	else
		return spi_legacy_suspend(dev, PMSG_FREEZE);
}

static int spi_pm_thaw(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_thaw(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_poweroff(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_poweroff(dev);
	else
		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
}

static int spi_pm_restore(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_restore(dev);
	else
		return spi_legacy_resume(dev);
}
210
#else
M
Mark Brown 已提交
211 212 213 214 215 216
#define spi_pm_suspend	NULL
#define spi_pm_resume	NULL
#define spi_pm_freeze	NULL
#define spi_pm_thaw	NULL
#define spi_pm_poweroff	NULL
#define spi_pm_restore	NULL
217 218
#endif

M
Mark Brown 已提交
219 220 221 222 223 224 225 226 227 228
static const struct dev_pm_ops spi_pm = {
	.suspend = spi_pm_suspend,
	.resume = spi_pm_resume,
	.freeze = spi_pm_freeze,
	.thaw = spi_pm_thaw,
	.poweroff = spi_pm_poweroff,
	.restore = spi_pm_restore,
	SET_RUNTIME_PM_OPS(
		pm_generic_runtime_suspend,
		pm_generic_runtime_resume,
229
		NULL
M
Mark Brown 已提交
230 231 232
	)
};

233 234 235 236 237
struct bus_type spi_bus_type = {
	.name		= "spi",
	.dev_attrs	= spi_dev_attrs,
	.match		= spi_match_device,
	.uevent		= spi_uevent,
M
Mark Brown 已提交
238
	.pm		= &spi_pm,
239 240 241
};
EXPORT_SYMBOL_GPL(spi_bus_type);

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

static int spi_drv_probe(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);

	return sdrv->probe(to_spi_device(dev));
}

static int spi_drv_remove(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);

	return sdrv->remove(to_spi_device(dev));
}

static void spi_drv_shutdown(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);

	sdrv->shutdown(to_spi_device(dev));
}

D
David Brownell 已提交
264 265 266 267 268
/**
 * spi_register_driver - register a SPI driver
 * @sdrv: the driver to register
 * Context: can sleep
 */
269 270 271 272 273 274 275 276 277 278 279 280 281
int spi_register_driver(struct spi_driver *sdrv)
{
	sdrv->driver.bus = &spi_bus_type;
	if (sdrv->probe)
		sdrv->driver.probe = spi_drv_probe;
	if (sdrv->remove)
		sdrv->driver.remove = spi_drv_remove;
	if (sdrv->shutdown)
		sdrv->driver.shutdown = spi_drv_shutdown;
	return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(spi_register_driver);

282 283 284 285 286 287 288 289 290 291
/*-------------------------------------------------------------------------*/

/* SPI devices should normally not be created by SPI device drivers; that
 * would make them board-specific.  Similarly with SPI master drivers.
 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 * with other readonly (flashable) information about mainboard devices.
 */

struct boardinfo {
	struct list_head	list;
292
	struct spi_board_info	board_info;
293 294 295
};

static LIST_HEAD(board_list);
296 297 298 299 300 301
static LIST_HEAD(spi_master_list);

/*
 * Used to protect add/del opertion for board_info list and
 * spi_master list, and their matching process
 */
302
static DEFINE_MUTEX(board_lock);
303

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
/**
 * spi_alloc_device - Allocate a new SPI device
 * @master: Controller to which device is connected
 * Context: can sleep
 *
 * Allows a driver to allocate and initialize a spi_device without
 * registering it immediately.  This allows a driver to directly
 * fill the spi_device with device parameters before calling
 * spi_add_device() on it.
 *
 * Caller is responsible to call spi_add_device() on the returned
 * spi_device structure to add it to the SPI master.  If the caller
 * needs to discard the spi_device without adding it, then it should
 * call spi_dev_put() on it.
 *
 * Returns a pointer to the new device, or NULL.
 */
struct spi_device *spi_alloc_device(struct spi_master *master)
{
	struct spi_device	*spi;
	struct device		*dev = master->dev.parent;

	if (!spi_master_get(master))
		return NULL;

	spi = kzalloc(sizeof *spi, GFP_KERNEL);
	if (!spi) {
		dev_err(dev, "cannot alloc spi_device\n");
		spi_master_put(master);
		return NULL;
	}

	spi->master = master;
337
	spi->dev.parent = &master->dev;
338 339
	spi->dev.bus = &spi_bus_type;
	spi->dev.release = spidev_release;
340
	spi->cs_gpio = -ENOENT;
341 342 343 344 345 346 347 348 349 350 351 352
	device_initialize(&spi->dev);
	return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);

/**
 * spi_add_device - Add spi_device allocated with spi_alloc_device
 * @spi: spi_device to register
 *
 * Companion function to spi_alloc_device.  Devices allocated with
 * spi_alloc_device can be added onto the spi bus with this function.
 *
353
 * Returns 0 on success; negative errno on failure
354 355 356
 */
int spi_add_device(struct spi_device *spi)
{
357
	static DEFINE_MUTEX(spi_add_lock);
358 359
	struct spi_master *master = spi->master;
	struct device *dev = master->dev.parent;
360
	struct device *d;
361 362 363
	int status;

	/* Chipselects are numbered 0..max; validate. */
364
	if (spi->chip_select >= master->num_chipselect) {
365 366
		dev_err(dev, "cs%d >= max %d\n",
			spi->chip_select,
367
			master->num_chipselect);
368 369 370 371
		return -EINVAL;
	}

	/* Set the bus ID string */
372
	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
373 374
			spi->chip_select);

375 376 377 378 379 380 381

	/* We need to make sure there's no other device with this
	 * chipselect **BEFORE** we call setup(), else we'll trash
	 * its configuration.  Lock against concurrent add() calls.
	 */
	mutex_lock(&spi_add_lock);

382 383
	d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
	if (d != NULL) {
384 385
		dev_err(dev, "chipselect %d already in use\n",
				spi->chip_select);
386
		put_device(d);
387 388 389 390
		status = -EBUSY;
		goto done;
	}

391 392 393
	if (master->cs_gpios)
		spi->cs_gpio = master->cs_gpios[spi->chip_select];

394 395 396 397
	/* Drivers may modify this initial i/o setup, but will
	 * normally rely on the device being setup.  Devices
	 * using SPI_CS_HIGH can't coexist well otherwise...
	 */
398
	status = spi_setup(spi);
399
	if (status < 0) {
400 401
		dev_err(dev, "can't setup %s, status %d\n",
				dev_name(&spi->dev), status);
402
		goto done;
403 404
	}

405
	/* Device may be bound to an active driver when this returns */
406
	status = device_add(&spi->dev);
407
	if (status < 0)
408 409
		dev_err(dev, "can't add %s, status %d\n",
				dev_name(&spi->dev), status);
410
	else
411
		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
412

413 414 415
done:
	mutex_unlock(&spi_add_lock);
	return status;
416 417
}
EXPORT_SYMBOL_GPL(spi_add_device);
418

D
David Brownell 已提交
419 420 421 422 423 424 425
/**
 * spi_new_device - instantiate one new SPI device
 * @master: Controller to which device is connected
 * @chip: Describes the SPI device
 * Context: can sleep
 *
 * On typical mainboards, this is purely internal; and it's not needed
426 427 428 429
 * after board init creates the hard-wired devices.  Some development
 * platforms may not be able to use spi_register_board_info though, and
 * this is exported so that for example a USB or parport based adapter
 * driver could add devices (which it would learn about out-of-band).
430 431
 *
 * Returns the new device, or NULL.
432
 */
433 434
struct spi_device *spi_new_device(struct spi_master *master,
				  struct spi_board_info *chip)
435 436 437 438
{
	struct spi_device	*proxy;
	int			status;

439 440 441 442 443 444 445
	/* NOTE:  caller did any chip->bus_num checks necessary.
	 *
	 * Also, unless we change the return value convention to use
	 * error-or-pointer (not NULL-or-pointer), troubleshootability
	 * suggests syslogged diagnostics are best here (ugh).
	 */

446 447
	proxy = spi_alloc_device(master);
	if (!proxy)
448 449
		return NULL;

450 451
	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));

452 453
	proxy->chip_select = chip->chip_select;
	proxy->max_speed_hz = chip->max_speed_hz;
454
	proxy->mode = chip->mode;
455
	proxy->irq = chip->irq;
456
	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
457 458 459 460
	proxy->dev.platform_data = (void *) chip->platform_data;
	proxy->controller_data = chip->controller_data;
	proxy->controller_state = NULL;

461
	status = spi_add_device(proxy);
462
	if (status < 0) {
463 464
		spi_dev_put(proxy);
		return NULL;
465 466 467 468 469 470
	}

	return proxy;
}
EXPORT_SYMBOL_GPL(spi_new_device);

471 472 473 474 475 476 477 478 479 480 481 482 483 484
static void spi_match_master_to_boardinfo(struct spi_master *master,
				struct spi_board_info *bi)
{
	struct spi_device *dev;

	if (master->bus_num != bi->bus_num)
		return;

	dev = spi_new_device(master, bi);
	if (!dev)
		dev_err(master->dev.parent, "can't create new device for %s\n",
			bi->modalias);
}

D
David Brownell 已提交
485 486 487 488 489 490
/**
 * spi_register_board_info - register SPI devices for a given board
 * @info: array of chip descriptors
 * @n: how many descriptors are provided
 * Context: can sleep
 *
491 492 493 494 495 496 497 498 499 500 501 502 503
 * Board-specific early init code calls this (probably during arch_initcall)
 * with segments of the SPI device table.  Any device nodes are created later,
 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 * this table of devices forever, so that reloading a controller driver will
 * not make Linux forget about these hard-wired devices.
 *
 * Other code can also call this, e.g. a particular add-on board might provide
 * SPI devices through its expansion connector, so code initializing that board
 * would naturally declare its SPI devices.
 *
 * The board info passed can safely be __initdata ... but be careful of
 * any embedded pointers (platform_data, etc), they're copied as-is.
 */
504
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
505
{
506 507
	struct boardinfo *bi;
	int i;
508

509
	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
510 511 512
	if (!bi)
		return -ENOMEM;

513 514
	for (i = 0; i < n; i++, bi++, info++) {
		struct spi_master *master;
515

516 517 518 519 520 521
		memcpy(&bi->board_info, info, sizeof(*info));
		mutex_lock(&board_lock);
		list_add_tail(&bi->list, &board_list);
		list_for_each_entry(master, &spi_master_list, list)
			spi_match_master_to_boardinfo(master, &bi->board_info);
		mutex_unlock(&board_lock);
522
	}
523 524

	return 0;
525 526 527 528
}

/*-------------------------------------------------------------------------*/

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
static void spi_set_cs(struct spi_device *spi, bool enable)
{
	if (spi->mode & SPI_CS_HIGH)
		enable = !enable;

	if (spi->cs_gpio >= 0)
		gpio_set_value(spi->cs_gpio, !enable);
	else if (spi->master->set_cs)
		spi->master->set_cs(spi, !enable);
}

/*
 * spi_transfer_one_message - Default implementation of transfer_one_message()
 *
 * This is a standard implementation of transfer_one_message() for
 * drivers which impelment a transfer_one() operation.  It provides
 * standard handling of delays and chip select management.
 */
static int spi_transfer_one_message(struct spi_master *master,
				    struct spi_message *msg)
{
	struct spi_transfer *xfer;
	bool cur_cs = true;
	bool keep_cs = false;
	int ret = 0;

	spi_set_cs(msg->spi, true);

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		trace_spi_transfer_start(msg, xfer);

		INIT_COMPLETION(master->xfer_completion);

		ret = master->transfer_one(master, msg->spi, xfer);
		if (ret < 0) {
			dev_err(&msg->spi->dev,
				"SPI transfer failed: %d\n", ret);
			goto out;
		}

		if (ret > 0)
			wait_for_completion(&master->xfer_completion);

		trace_spi_transfer_stop(msg, xfer);

		if (msg->status != -EINPROGRESS)
			goto out;

		if (xfer->delay_usecs)
			udelay(xfer->delay_usecs);

		if (xfer->cs_change) {
			if (list_is_last(&xfer->transfer_list,
					 &msg->transfers)) {
				keep_cs = true;
			} else {
				cur_cs = !cur_cs;
				spi_set_cs(msg->spi, cur_cs);
			}
		}

		msg->actual_length += xfer->len;
	}

out:
	if (ret != 0 || !keep_cs)
		spi_set_cs(msg->spi, false);

	if (msg->status == -EINPROGRESS)
		msg->status = ret;

	spi_finalize_current_message(master);

	return ret;
}

/**
 * spi_finalize_current_transfer - report completion of a transfer
 *
 * Called by SPI drivers using the core transfer_one_message()
 * implementation to notify it that the current interrupt driven
 * transfer has finised and the next one may be scheduled.
 */
void spi_finalize_current_transfer(struct spi_master *master)
{
	complete(&master->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);

618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
/**
 * spi_pump_messages - kthread work function which processes spi message queue
 * @work: pointer to kthread work struct contained in the master struct
 *
 * This function checks if there is any spi message in the queue that
 * needs processing and if so call out to the driver to initialize hardware
 * and transfer each message.
 *
 */
static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);
	unsigned long flags;
	bool was_busy = false;
	int ret;

	/* Lock queue and check for queue work */
	spin_lock_irqsave(&master->queue_lock, flags);
	if (list_empty(&master->queue) || !master->running) {
638 639 640
		if (!master->busy) {
			spin_unlock_irqrestore(&master->queue_lock, flags);
			return;
641 642 643
		}
		master->busy = false;
		spin_unlock_irqrestore(&master->queue_lock, flags);
644 645 646 647
		if (master->unprepare_transfer_hardware &&
		    master->unprepare_transfer_hardware(master))
			dev_err(&master->dev,
				"failed to unprepare transfer hardware\n");
648 649 650 651
		if (master->auto_runtime_pm) {
			pm_runtime_mark_last_busy(master->dev.parent);
			pm_runtime_put_autosuspend(master->dev.parent);
		}
652
		trace_spi_master_idle(master);
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
		return;
	}

	/* Make sure we are not already running a message */
	if (master->cur_msg) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}
	/* Extract head of queue */
	master->cur_msg =
	    list_entry(master->queue.next, struct spi_message, queue);

	list_del_init(&master->cur_msg->queue);
	if (master->busy)
		was_busy = true;
	else
		master->busy = true;
	spin_unlock_irqrestore(&master->queue_lock, flags);

672 673 674 675 676 677 678 679 680
	if (!was_busy && master->auto_runtime_pm) {
		ret = pm_runtime_get_sync(master->dev.parent);
		if (ret < 0) {
			dev_err(&master->dev, "Failed to power device: %d\n",
				ret);
			return;
		}
	}

681 682 683
	if (!was_busy)
		trace_spi_master_busy(master);

684
	if (!was_busy && master->prepare_transfer_hardware) {
685 686 687 688
		ret = master->prepare_transfer_hardware(master);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare transfer hardware\n");
689 690 691

			if (master->auto_runtime_pm)
				pm_runtime_put(master->dev.parent);
692 693 694 695
			return;
		}
	}

696 697
	trace_spi_message_start(master->cur_msg);

698 699 700 701 702 703 704 705 706 707 708 709
	if (master->prepare_message) {
		ret = master->prepare_message(master, master->cur_msg);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare message: %d\n", ret);
			master->cur_msg->status = ret;
			spi_finalize_current_message(master);
			return;
		}
		master->cur_msg_prepared = true;
	}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
	ret = master->transfer_one_message(master, master->cur_msg);
	if (ret) {
		dev_err(&master->dev,
			"failed to transfer one message from queue\n");
		return;
	}
}

static int spi_init_queue(struct spi_master *master)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	INIT_LIST_HEAD(&master->queue);
	spin_lock_init(&master->queue_lock);

	master->running = false;
	master->busy = false;

	init_kthread_worker(&master->kworker);
	master->kworker_task = kthread_run(kthread_worker_fn,
730
					   &master->kworker, "%s",
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
					   dev_name(&master->dev));
	if (IS_ERR(master->kworker_task)) {
		dev_err(&master->dev, "failed to create message pump task\n");
		return -ENOMEM;
	}
	init_kthread_work(&master->pump_messages, spi_pump_messages);

	/*
	 * Master config will indicate if this controller should run the
	 * message pump with high (realtime) priority to reduce the transfer
	 * latency on the bus by minimising the delay between a transfer
	 * request and the scheduling of the message pump thread. Without this
	 * setting the message pump thread will remain at default priority.
	 */
	if (master->rt) {
		dev_info(&master->dev,
			"will run message pump with realtime priority\n");
		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
	}

	return 0;
}

/**
 * spi_get_next_queued_message() - called by driver to check for queued
 * messages
 * @master: the master to check for queued messages
 *
 * If there are more messages in the queue, the next message is returned from
 * this call.
 */
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
	struct spi_message *next;
	unsigned long flags;

	/* get a pointer to the next message, if any */
	spin_lock_irqsave(&master->queue_lock, flags);
	if (list_empty(&master->queue))
		next = NULL;
	else
		next = list_entry(master->queue.next,
				  struct spi_message, queue);
	spin_unlock_irqrestore(&master->queue_lock, flags);

	return next;
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);

/**
 * spi_finalize_current_message() - the current message is complete
 * @master: the master to return the message to
 *
 * Called by the driver to notify the core that the message in the front of the
 * queue is complete and can be removed from the queue.
 */
void spi_finalize_current_message(struct spi_master *master)
{
	struct spi_message *mesg;
	unsigned long flags;
791
	int ret;
792 793 794 795 796 797 798 799

	spin_lock_irqsave(&master->queue_lock, flags);
	mesg = master->cur_msg;
	master->cur_msg = NULL;

	queue_kthread_work(&master->kworker, &master->pump_messages);
	spin_unlock_irqrestore(&master->queue_lock, flags);

800 801 802 803 804 805 806 807 808
	if (master->cur_msg_prepared && master->unprepare_message) {
		ret = master->unprepare_message(master, mesg);
		if (ret) {
			dev_err(&master->dev,
				"failed to unprepare message: %d\n", ret);
		}
	}
	master->cur_msg_prepared = false;

809 810 811
	mesg->state = NULL;
	if (mesg->complete)
		mesg->complete(mesg->context);
812 813

	trace_spi_message_done(mesg);
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);

static int spi_start_queue(struct spi_master *master)
{
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (master->running || master->busy) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -EBUSY;
	}

	master->running = true;
	master->cur_msg = NULL;
	spin_unlock_irqrestore(&master->queue_lock, flags);

	queue_kthread_work(&master->kworker, &master->pump_messages);

	return 0;
}

static int spi_stop_queue(struct spi_master *master)
{
	unsigned long flags;
	unsigned limit = 500;
	int ret = 0;

	spin_lock_irqsave(&master->queue_lock, flags);

	/*
	 * This is a bit lame, but is optimized for the common execution path.
	 * A wait_queue on the master->busy could be used, but then the common
	 * execution path (pump_messages) would be required to call wake_up or
	 * friends on every SPI message. Do this instead.
	 */
	while ((!list_empty(&master->queue) || master->busy) && limit--) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		msleep(10);
		spin_lock_irqsave(&master->queue_lock, flags);
	}

	if (!list_empty(&master->queue) || master->busy)
		ret = -EBUSY;
	else
		master->running = false;

	spin_unlock_irqrestore(&master->queue_lock, flags);

	if (ret) {
		dev_warn(&master->dev,
			 "could not stop message queue\n");
		return ret;
	}
	return ret;
}

static int spi_destroy_queue(struct spi_master *master)
{
	int ret;

	ret = spi_stop_queue(master);

	/*
	 * flush_kthread_worker will block until all work is done.
	 * If the reason that stop_queue timed out is that the work will never
	 * finish, then it does no good to call flush/stop thread, so
	 * return anyway.
	 */
	if (ret) {
		dev_err(&master->dev, "problem destroying queue\n");
		return ret;
	}

	flush_kthread_worker(&master->kworker);
	kthread_stop(master->kworker_task);

	return 0;
}

/**
 * spi_queued_transfer - transfer function for queued transfers
 * @spi: spi device which is requesting transfer
 * @msg: spi message which is to handled is queued to driver queue
 */
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
	struct spi_master *master = spi->master;
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (!master->running) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -ESHUTDOWN;
	}
	msg->actual_length = 0;
	msg->status = -EINPROGRESS;

	list_add_tail(&msg->queue, &master->queue);
915
	if (!master->busy)
916 917 918 919 920 921 922 923 924 925 926 927
		queue_kthread_work(&master->kworker, &master->pump_messages);

	spin_unlock_irqrestore(&master->queue_lock, flags);
	return 0;
}

static int spi_master_initialize_queue(struct spi_master *master)
{
	int ret;

	master->queued = true;
	master->transfer = spi_queued_transfer;
928 929
	if (!master->transfer_one_message)
		master->transfer_one_message = spi_transfer_one_message;
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952

	/* Initialize and start queue */
	ret = spi_init_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem initializing queue\n");
		goto err_init_queue;
	}
	ret = spi_start_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem starting queue\n");
		goto err_start_queue;
	}

	return 0;

err_start_queue:
err_init_queue:
	spi_destroy_queue(master);
	return ret;
}

/*-------------------------------------------------------------------------*/

953
#if defined(CONFIG_OF)
954 955 956 957 958 959 960 961 962 963 964 965
/**
 * of_register_spi_devices() - Register child devices onto the SPI bus
 * @master:	Pointer to spi_master device
 *
 * Registers an spi_device for each child node of master node which has a 'reg'
 * property.
 */
static void of_register_spi_devices(struct spi_master *master)
{
	struct spi_device *spi;
	struct device_node *nc;
	const __be32 *prop;
966
	char modalias[SPI_NAME_SIZE + 4];
967 968 969 970 971 972
	int rc;
	int len;

	if (!master->dev.of_node)
		return;

973
	for_each_available_child_of_node(master->dev.of_node, nc) {
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
		/* Alloc an spi_device */
		spi = spi_alloc_device(master);
		if (!spi) {
			dev_err(&master->dev, "spi_device alloc error for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Select device driver */
		if (of_modalias_node(nc, spi->modalias,
				     sizeof(spi->modalias)) < 0) {
			dev_err(&master->dev, "cannot find modalias for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Device address */
		prop = of_get_property(nc, "reg", &len);
		if (!prop || len < sizeof(*prop)) {
			dev_err(&master->dev, "%s has no 'reg' property\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}
		spi->chip_select = be32_to_cpup(prop);

		/* Mode (clock phase/polarity/etc.) */
		if (of_find_property(nc, "spi-cpha", NULL))
			spi->mode |= SPI_CPHA;
		if (of_find_property(nc, "spi-cpol", NULL))
			spi->mode |= SPI_CPOL;
		if (of_find_property(nc, "spi-cs-high", NULL))
			spi->mode |= SPI_CS_HIGH;
1009 1010
		if (of_find_property(nc, "spi-3wire", NULL))
			spi->mode |= SPI_3WIRE;
1011

W
wangyuhang 已提交
1012
		/* Device DUAL/QUAD mode */
1013
		prop = of_get_property(nc, "spi-tx-bus-width", &len);
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
		if (prop && len == sizeof(*prop)) {
			switch (be32_to_cpup(prop)) {
			case SPI_NBITS_SINGLE:
				break;
			case SPI_NBITS_DUAL:
				spi->mode |= SPI_TX_DUAL;
				break;
			case SPI_NBITS_QUAD:
				spi->mode |= SPI_TX_QUAD;
				break;
			default:
				dev_err(&master->dev,
1026
					"spi-tx-bus-width %d not supported\n",
1027 1028 1029 1030
					be32_to_cpup(prop));
				spi_dev_put(spi);
				continue;
			}
W
wangyuhang 已提交
1031 1032
		}

1033
		prop = of_get_property(nc, "spi-rx-bus-width", &len);
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		if (prop && len == sizeof(*prop)) {
			switch (be32_to_cpup(prop)) {
			case SPI_NBITS_SINGLE:
				break;
			case SPI_NBITS_DUAL:
				spi->mode |= SPI_RX_DUAL;
				break;
			case SPI_NBITS_QUAD:
				spi->mode |= SPI_RX_QUAD;
				break;
			default:
				dev_err(&master->dev,
1046
					"spi-rx-bus-width %d not supported\n",
1047 1048 1049 1050
					be32_to_cpup(prop));
				spi_dev_put(spi);
				continue;
			}
W
wangyuhang 已提交
1051 1052
		}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
		/* Device speed */
		prop = of_get_property(nc, "spi-max-frequency", &len);
		if (!prop || len < sizeof(*prop)) {
			dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}
		spi->max_speed_hz = be32_to_cpup(prop);

		/* IRQ */
		spi->irq = irq_of_parse_and_map(nc, 0);

		/* Store a pointer to the node in the device structure */
		of_node_get(nc);
		spi->dev.of_node = nc;

		/* Register the new device */
1071 1072 1073
		snprintf(modalias, sizeof(modalias), "%s%s", SPI_MODULE_PREFIX,
			 spi->modalias);
		request_module(modalias);
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
		rc = spi_add_device(spi);
		if (rc) {
			dev_err(&master->dev, "spi_device register error %s\n",
				nc->full_name);
			spi_dev_put(spi);
		}

	}
}
#else
static void of_register_spi_devices(struct spi_master *master) { }
#endif

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
#ifdef CONFIG_ACPI
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
	struct spi_device *spi = data;

	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
		struct acpi_resource_spi_serialbus *sb;

		sb = &ares->data.spi_serial_bus;
		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
			spi->chip_select = sb->device_selection;
			spi->max_speed_hz = sb->connection_speed;

			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
				spi->mode |= SPI_CPHA;
			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
				spi->mode |= SPI_CPOL;
			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
				spi->mode |= SPI_CS_HIGH;
		}
	} else if (spi->irq < 0) {
		struct resource r;

		if (acpi_dev_resource_interrupt(ares, 0, &r))
			spi->irq = r.start;
	}

	/* Always tell the ACPI core to skip this resource */
	return 1;
}

static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
				       void *data, void **return_value)
{
	struct spi_master *master = data;
	struct list_head resource_list;
	struct acpi_device *adev;
	struct spi_device *spi;
	int ret;

	if (acpi_bus_get_device(handle, &adev))
		return AE_OK;
	if (acpi_bus_get_status(adev) || !adev->status.present)
		return AE_OK;

	spi = spi_alloc_device(master);
	if (!spi) {
		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
			dev_name(&adev->dev));
		return AE_NO_MEMORY;
	}

	ACPI_HANDLE_SET(&spi->dev, handle);
	spi->irq = -1;

	INIT_LIST_HEAD(&resource_list);
	ret = acpi_dev_get_resources(adev, &resource_list,
				     acpi_spi_add_resource, spi);
	acpi_dev_free_resource_list(&resource_list);

	if (ret < 0 || !spi->max_speed_hz) {
		spi_dev_put(spi);
		return AE_OK;
	}

	strlcpy(spi->modalias, dev_name(&adev->dev), sizeof(spi->modalias));
	if (spi_add_device(spi)) {
		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
			dev_name(&adev->dev));
		spi_dev_put(spi);
	}

	return AE_OK;
}

static void acpi_register_spi_devices(struct spi_master *master)
{
	acpi_status status;
	acpi_handle handle;

1167
	handle = ACPI_HANDLE(master->dev.parent);
1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
	if (!handle)
		return;

	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
				     acpi_spi_add_device, NULL,
				     master, NULL);
	if (ACPI_FAILURE(status))
		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_master *master) {}
#endif /* CONFIG_ACPI */

T
Tony Jones 已提交
1181
static void spi_master_release(struct device *dev)
1182 1183 1184
{
	struct spi_master *master;

T
Tony Jones 已提交
1185
	master = container_of(dev, struct spi_master, dev);
1186 1187 1188 1189 1190 1191
	kfree(master);
}

static struct class spi_master_class = {
	.name		= "spi_master",
	.owner		= THIS_MODULE,
T
Tony Jones 已提交
1192
	.dev_release	= spi_master_release,
1193 1194 1195
};


1196

1197 1198 1199
/**
 * spi_alloc_master - allocate SPI master controller
 * @dev: the controller, possibly using the platform_bus
D
David Brownell 已提交
1200
 * @size: how much zeroed driver-private data to allocate; the pointer to this
T
Tony Jones 已提交
1201
 *	memory is in the driver_data field of the returned device,
D
David Brownell 已提交
1202
 *	accessible with spi_master_get_devdata().
D
David Brownell 已提交
1203
 * Context: can sleep
1204 1205 1206
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.  It's how they allocate
D
dmitry pervushin 已提交
1207
 * an spi_master structure, prior to calling spi_register_master().
1208 1209 1210 1211 1212
 *
 * This must be called from context that can sleep.  It returns the SPI
 * master structure on success, else NULL.
 *
 * The caller is responsible for assigning the bus number and initializing
D
dmitry pervushin 已提交
1213
 * the master's methods before calling spi_register_master(); and (after errors
1214 1215
 * adding the device) calling spi_master_put() and kfree() to prevent a memory
 * leak.
1216
 */
1217
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1218 1219 1220
{
	struct spi_master	*master;

D
David Brownell 已提交
1221 1222 1223
	if (!dev)
		return NULL;

1224
	master = kzalloc(size + sizeof *master, GFP_KERNEL);
1225 1226 1227
	if (!master)
		return NULL;

T
Tony Jones 已提交
1228
	device_initialize(&master->dev);
1229 1230
	master->bus_num = -1;
	master->num_chipselect = 1;
T
Tony Jones 已提交
1231 1232
	master->dev.class = &spi_master_class;
	master->dev.parent = get_device(dev);
D
David Brownell 已提交
1233
	spi_master_set_devdata(master, &master[1]);
1234 1235 1236 1237 1238

	return master;
}
EXPORT_SYMBOL_GPL(spi_alloc_master);

1239 1240 1241
#ifdef CONFIG_OF
static int of_spi_register_master(struct spi_master *master)
{
1242
	int nb, i, *cs;
1243 1244 1245 1246 1247 1248
	struct device_node *np = master->dev.of_node;

	if (!np)
		return 0;

	nb = of_gpio_named_count(np, "cs-gpios");
1249
	master->num_chipselect = max(nb, (int)master->num_chipselect);
1250

1251 1252
	/* Return error only for an incorrectly formed cs-gpios property */
	if (nb == 0 || nb == -ENOENT)
1253
		return 0;
1254 1255
	else if (nb < 0)
		return nb;
1256 1257 1258 1259 1260 1261 1262 1263 1264

	cs = devm_kzalloc(&master->dev,
			  sizeof(int) * master->num_chipselect,
			  GFP_KERNEL);
	master->cs_gpios = cs;

	if (!master->cs_gpios)
		return -ENOMEM;

1265
	for (i = 0; i < master->num_chipselect; i++)
1266
		cs[i] = -ENOENT;
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279

	for (i = 0; i < nb; i++)
		cs[i] = of_get_named_gpio(np, "cs-gpios", i);

	return 0;
}
#else
static int of_spi_register_master(struct spi_master *master)
{
	return 0;
}
#endif

1280 1281 1282
/**
 * spi_register_master - register SPI master controller
 * @master: initialized master, originally from spi_alloc_master()
D
David Brownell 已提交
1283
 * Context: can sleep
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
 *
 * SPI master controllers connect to their drivers using some non-SPI bus,
 * such as the platform bus.  The final stage of probe() in that code
 * includes calling spi_register_master() to hook up to this SPI bus glue.
 *
 * SPI controllers use board specific (often SOC specific) bus numbers,
 * and board-specific addressing for SPI devices combines those numbers
 * with chip select numbers.  Since SPI does not directly support dynamic
 * device identification, boards need configuration tables telling which
 * chip is at which address.
 *
 * This must be called from context that can sleep.  It returns zero on
 * success, else a negative error code (dropping the master's refcount).
D
David Brownell 已提交
1297 1298
 * After a successful return, the caller is responsible for calling
 * spi_unregister_master().
1299
 */
1300
int spi_register_master(struct spi_master *master)
1301
{
1302
	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
T
Tony Jones 已提交
1303
	struct device		*dev = master->dev.parent;
1304
	struct boardinfo	*bi;
1305 1306 1307
	int			status = -ENODEV;
	int			dynamic = 0;

D
David Brownell 已提交
1308 1309 1310
	if (!dev)
		return -ENODEV;

1311 1312 1313 1314
	status = of_spi_register_master(master);
	if (status)
		return status;

1315 1316 1317 1318 1319 1320
	/* even if it's just one always-selected device, there must
	 * be at least one chipselect
	 */
	if (master->num_chipselect == 0)
		return -EINVAL;

1321 1322 1323
	if ((master->bus_num < 0) && master->dev.of_node)
		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");

1324
	/* convention:  dynamically assigned bus IDs count down from the max */
1325
	if (master->bus_num < 0) {
1326 1327 1328
		/* FIXME switch to an IDR based scheme, something like
		 * I2C now uses, so we can't run out of "dynamic" IDs
		 */
1329
		master->bus_num = atomic_dec_return(&dyn_bus_id);
1330
		dynamic = 1;
1331 1332
	}

1333 1334 1335
	spin_lock_init(&master->bus_lock_spinlock);
	mutex_init(&master->bus_lock_mutex);
	master->bus_lock_flag = 0;
1336
	init_completion(&master->xfer_completion);
1337

1338 1339 1340
	/* register the device, then userspace will see it.
	 * registration fails if the bus ID is in use.
	 */
1341
	dev_set_name(&master->dev, "spi%u", master->bus_num);
T
Tony Jones 已提交
1342
	status = device_add(&master->dev);
1343
	if (status < 0)
1344
		goto done;
1345
	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1346 1347
			dynamic ? " (dynamic)" : "");

1348 1349 1350 1351 1352 1353
	/* If we're using a queued driver, start the queue */
	if (master->transfer)
		dev_info(dev, "master is unqueued, this is deprecated\n");
	else {
		status = spi_master_initialize_queue(master);
		if (status) {
1354
			device_del(&master->dev);
1355 1356 1357 1358
			goto done;
		}
	}

1359 1360 1361 1362 1363 1364
	mutex_lock(&board_lock);
	list_add_tail(&master->list, &spi_master_list);
	list_for_each_entry(bi, &board_list, list)
		spi_match_master_to_boardinfo(master, &bi->board_info);
	mutex_unlock(&board_lock);

1365
	/* Register devices from the device tree and ACPI */
1366
	of_register_spi_devices(master);
1367
	acpi_register_spi_devices(master);
1368 1369 1370 1371 1372
done:
	return status;
}
EXPORT_SYMBOL_GPL(spi_register_master);

1373
static int __unregister(struct device *dev, void *null)
1374
{
1375
	spi_unregister_device(to_spi_device(dev));
1376 1377 1378 1379 1380 1381
	return 0;
}

/**
 * spi_unregister_master - unregister SPI master controller
 * @master: the master being unregistered
D
David Brownell 已提交
1382
 * Context: can sleep
1383 1384 1385 1386 1387 1388 1389 1390
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.
 *
 * This must be called from context that can sleep.
 */
void spi_unregister_master(struct spi_master *master)
{
1391 1392
	int dummy;

1393 1394 1395 1396 1397
	if (master->queued) {
		if (spi_destroy_queue(master))
			dev_err(&master->dev, "queue remove failed\n");
	}

1398 1399 1400 1401
	mutex_lock(&board_lock);
	list_del(&master->list);
	mutex_unlock(&board_lock);

1402
	dummy = device_for_each_child(&master->dev, NULL, __unregister);
T
Tony Jones 已提交
1403
	device_unregister(&master->dev);
1404 1405 1406
}
EXPORT_SYMBOL_GPL(spi_unregister_master);

1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
int spi_master_suspend(struct spi_master *master)
{
	int ret;

	/* Basically no-ops for non-queued masters */
	if (!master->queued)
		return 0;

	ret = spi_stop_queue(master);
	if (ret)
		dev_err(&master->dev, "queue stop failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_suspend);

int spi_master_resume(struct spi_master *master)
{
	int ret;

	if (!master->queued)
		return 0;

	ret = spi_start_queue(master);
	if (ret)
		dev_err(&master->dev, "queue restart failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_resume);

1438
static int __spi_master_match(struct device *dev, const void *data)
D
Dave Young 已提交
1439 1440
{
	struct spi_master *m;
1441
	const u16 *bus_num = data;
D
Dave Young 已提交
1442 1443 1444 1445 1446

	m = container_of(dev, struct spi_master, dev);
	return m->bus_num == *bus_num;
}

1447 1448 1449
/**
 * spi_busnum_to_master - look up master associated with bus_num
 * @bus_num: the master's bus number
D
David Brownell 已提交
1450
 * Context: can sleep
1451 1452 1453 1454 1455 1456 1457 1458
 *
 * This call may be used with devices that are registered after
 * arch init time.  It returns a refcounted pointer to the relevant
 * spi_master (which the caller must release), or NULL if there is
 * no such master registered.
 */
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
T
Tony Jones 已提交
1459
	struct device		*dev;
1460
	struct spi_master	*master = NULL;
D
Dave Young 已提交
1461

1462
	dev = class_find_device(&spi_master_class, NULL, &bus_num,
D
Dave Young 已提交
1463 1464 1465 1466
				__spi_master_match);
	if (dev)
		master = container_of(dev, struct spi_master, dev);
	/* reference got in class_find_device */
1467
	return master;
1468 1469 1470 1471 1472 1473
}
EXPORT_SYMBOL_GPL(spi_busnum_to_master);


/*-------------------------------------------------------------------------*/

1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
/* Core methods for SPI master protocol drivers.  Some of the
 * other core methods are currently defined as inline functions.
 */

/**
 * spi_setup - setup SPI mode and clock rate
 * @spi: the device whose settings are being modified
 * Context: can sleep, and no requests are queued to the device
 *
 * SPI protocol drivers may need to update the transfer mode if the
 * device doesn't work with its default.  They may likewise need
 * to update clock rates or word sizes from initial values.  This function
 * changes those settings, and must be called from a context that can sleep.
 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
 * effect the next time the device is selected and data is transferred to
 * or from it.  When this function returns, the spi device is deselected.
 *
 * Note that this call will fail if the protocol driver specifies an option
 * that the underlying controller or its driver does not support.  For
 * example, not all hardware supports wire transfers using nine bit words,
 * LSB-first wire encoding, or active-high chipselects.
 */
int spi_setup(struct spi_device *spi)
{
1498
	unsigned	bad_bits;
1499
	int		status = 0;
1500

W
wangyuhang 已提交
1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
	/* check mode to prevent that DUAL and QUAD set at the same time
	 */
	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
		dev_err(&spi->dev,
		"setup: can not select dual and quad at the same time\n");
		return -EINVAL;
	}
	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
	 */
	if ((spi->mode & SPI_3WIRE) && (spi->mode &
		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
		return -EINVAL;
1514 1515 1516 1517 1518
	/* help drivers fail *cleanly* when they need options
	 * that aren't supported with their current master
	 */
	bad_bits = spi->mode & ~spi->master->mode_bits;
	if (bad_bits) {
1519
		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1520 1521 1522 1523
			bad_bits);
		return -EINVAL;
	}

1524 1525 1526
	if (!spi->bits_per_word)
		spi->bits_per_word = 8;

1527 1528
	if (spi->master->setup)
		status = spi->master->setup(spi);
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543

	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s"
				"%u bits/w, %u Hz max --> %d\n",
			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
			(spi->mode & SPI_LOOP) ? "loopback, " : "",
			spi->bits_per_word, spi->max_speed_hz,
			status);

	return status;
}
EXPORT_SYMBOL_GPL(spi_setup);

1544 1545 1546
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
1547
	struct spi_transfer *xfer;
1548

1549 1550 1551 1552
	message->spi = spi;

	trace_spi_message_submit(message);

1553 1554 1555 1556 1557
	if (list_empty(&message->transfers))
		return -EINVAL;
	if (!message->complete)
		return -EINVAL;

1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
	/* Half-duplex links include original MicroWire, and ones with
	 * only one data pin like SPI_3WIRE (switches direction) or where
	 * either MOSI or MISO is missing.  They can also be caused by
	 * software limitations.
	 */
	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
			|| (spi->mode & SPI_3WIRE)) {
		unsigned flags = master->flags;

		list_for_each_entry(xfer, &message->transfers, transfer_list) {
			if (xfer->rx_buf && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
				return -EINVAL;
		}
	}

1577
	/**
1578 1579
	 * Set transfer bits_per_word and max speed as spi device default if
	 * it is not set for this transfer.
W
wangyuhang 已提交
1580 1581
	 * Set transfer tx_nbits and rx_nbits as single transfer default
	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1582 1583
	 */
	list_for_each_entry(xfer, &message->transfers, transfer_list) {
1584
		message->frame_length += xfer->len;
1585 1586
		if (!xfer->bits_per_word)
			xfer->bits_per_word = spi->bits_per_word;
1587
		if (!xfer->speed_hz) {
1588
			xfer->speed_hz = spi->max_speed_hz;
1589 1590 1591 1592 1593
			if (master->max_speed_hz &&
			    xfer->speed_hz > master->max_speed_hz)
				xfer->speed_hz = master->max_speed_hz;
		}

1594 1595 1596 1597 1598 1599 1600 1601
		if (master->bits_per_word_mask) {
			/* Only 32 bits fit in the mask */
			if (xfer->bits_per_word > 32)
				return -EINVAL;
			if (!(master->bits_per_word_mask &
					BIT(xfer->bits_per_word - 1)))
				return -EINVAL;
		}
1602 1603 1604 1605 1606 1607

		if (xfer->speed_hz && master->min_speed_hz &&
		    xfer->speed_hz < master->min_speed_hz)
			return -EINVAL;
		if (xfer->speed_hz && master->max_speed_hz &&
		    xfer->speed_hz > master->max_speed_hz)
W
wangyuhang 已提交
1608
			return -EINVAL;
W
wangyuhang 已提交
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618

		if (xfer->tx_buf && !xfer->tx_nbits)
			xfer->tx_nbits = SPI_NBITS_SINGLE;
		if (xfer->rx_buf && !xfer->rx_nbits)
			xfer->rx_nbits = SPI_NBITS_SINGLE;
		/* check transfer tx/rx_nbits:
		 * 1. keep the value is not out of single, dual and quad
		 * 2. keep tx/rx_nbits is contained by mode in spi_device
		 * 3. if SPI_3WIRE, tx/rx_nbits should be in single
		 */
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
		if (xfer->tx_buf) {
			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
				xfer->tx_nbits != SPI_NBITS_DUAL &&
				xfer->tx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_TX_QUAD))
				return -EINVAL;
			if ((spi->mode & SPI_3WIRE) &&
				(xfer->tx_nbits != SPI_NBITS_SINGLE))
				return -EINVAL;
		}
W
wangyuhang 已提交
1634
		/* check transfer rx_nbits */
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
		if (xfer->rx_buf) {
			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
				xfer->rx_nbits != SPI_NBITS_DUAL &&
				xfer->rx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_RX_QUAD))
				return -EINVAL;
			if ((spi->mode & SPI_3WIRE) &&
				(xfer->rx_nbits != SPI_NBITS_SINGLE))
				return -EINVAL;
		}
1650 1651
	}

1652 1653 1654 1655
	message->status = -EINPROGRESS;
	return master->transfer(spi, message);
}

D
David Brownell 已提交
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687
/**
 * spi_async - asynchronous SPI transfer
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
1688 1689
	int ret;
	unsigned long flags;
D
David Brownell 已提交
1690

1691
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
D
David Brownell 已提交
1692

1693 1694 1695 1696
	if (master->bus_lock_flag)
		ret = -EBUSY;
	else
		ret = __spi_async(spi, message);
D
David Brownell 已提交
1697

1698 1699 1700
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;
D
David Brownell 已提交
1701 1702 1703
}
EXPORT_SYMBOL_GPL(spi_async);

1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
/**
 * spi_async_locked - version of spi_async with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
	int ret;
	unsigned long flags;

	spin_lock_irqsave(&master->bus_lock_spinlock, flags);

	ret = __spi_async(spi, message);

	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;

}
EXPORT_SYMBOL_GPL(spi_async_locked);

1750 1751 1752 1753 1754 1755 1756 1757

/*-------------------------------------------------------------------------*/

/* Utility methods for SPI master protocol drivers, layered on
 * top of the core.  Some other utility methods are defined as
 * inline functions.
 */

1758 1759 1760 1761 1762
static void spi_complete(void *arg)
{
	complete(arg);
}

1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788
static int __spi_sync(struct spi_device *spi, struct spi_message *message,
		      int bus_locked)
{
	DECLARE_COMPLETION_ONSTACK(done);
	int status;
	struct spi_master *master = spi->master;

	message->complete = spi_complete;
	message->context = &done;

	if (!bus_locked)
		mutex_lock(&master->bus_lock_mutex);

	status = spi_async_locked(spi, message);

	if (!bus_locked)
		mutex_unlock(&master->bus_lock_mutex);

	if (status == 0) {
		wait_for_completion(&done);
		status = message->status;
	}
	message->context = NULL;
	return status;
}

1789 1790 1791 1792
/**
 * spi_sync - blocking/synchronous SPI data transfers
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
D
David Brownell 已提交
1793
 * Context: can sleep
1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * Note that the SPI device's chip select is active during the message,
 * and then is normally disabled between messages.  Drivers for some
 * frequently-used devices may want to minimize costs of selecting a chip,
 * by leaving it selected in anticipation that the next message will go
 * to the same chip.  (That may increase power usage.)
 *
D
David Brownell 已提交
1805 1806 1807
 * Also, the caller is guaranteeing that the memory associated with the
 * message will not be freed before this call returns.
 *
1808
 * It returns zero on success, else a negative error code.
1809 1810 1811
 */
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
1812
	return __spi_sync(spi, message, 0);
1813 1814 1815
}
EXPORT_SYMBOL_GPL(spi_sync);

1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826
/**
 * spi_sync_locked - version of spi_sync with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * This call should be used by drivers that require exclusive access to the
L
Lucas De Marchi 已提交
1827
 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
 * be released by a spi_bus_unlock call when the exclusive access is over.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
	return __spi_sync(spi, message, 1);
}
EXPORT_SYMBOL_GPL(spi_sync_locked);

/**
 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
 * @master: SPI bus master that should be locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call should be used by drivers that require exclusive access to the
 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
 * exclusive access is over. Data transfer must be done by spi_sync_locked
 * and spi_async_locked calls when the SPI bus lock is held.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_lock(struct spi_master *master)
{
	unsigned long flags;

	mutex_lock(&master->bus_lock_mutex);

	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
	master->bus_lock_flag = 1;
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	/* mutex remains locked until spi_bus_unlock is called */

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_lock);

/**
 * spi_bus_unlock - release the lock for exclusive SPI bus usage
 * @master: SPI bus master that was locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
 * call.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_unlock(struct spi_master *master)
{
	master->bus_lock_flag = 0;

	mutex_unlock(&master->bus_lock_mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);

1892 1893
/* portable code must never pass more than 32 bytes */
#define	SPI_BUFSIZ	max(32,SMP_CACHE_BYTES)
1894 1895 1896 1897 1898 1899 1900 1901

static u8	*buf;

/**
 * spi_write_then_read - SPI synchronous write followed by read
 * @spi: device with which data will be exchanged
 * @txbuf: data to be written (need not be dma-safe)
 * @n_tx: size of txbuf, in bytes
1902 1903
 * @rxbuf: buffer into which data will be read (need not be dma-safe)
 * @n_rx: size of rxbuf, in bytes
D
David Brownell 已提交
1904
 * Context: can sleep
1905 1906 1907 1908
 *
 * This performs a half duplex MicroWire style transaction with the
 * device, sending txbuf and then reading rxbuf.  The return value
 * is zero for success, else a negative errno status code.
1909
 * This call may only be used from a context that may sleep.
1910
 *
D
David Brownell 已提交
1911
 * Parameters to this routine are always copied using a small buffer;
D
David Brownell 已提交
1912 1913
 * portable code should never use this for more than 32 bytes.
 * Performance-sensitive or bulk transfer code should instead use
D
David Brownell 已提交
1914
 * spi_{async,sync}() calls with dma-safe buffers.
1915 1916
 */
int spi_write_then_read(struct spi_device *spi,
1917 1918
		const void *txbuf, unsigned n_tx,
		void *rxbuf, unsigned n_rx)
1919
{
D
David Brownell 已提交
1920
	static DEFINE_MUTEX(lock);
1921 1922 1923

	int			status;
	struct spi_message	message;
1924
	struct spi_transfer	x[2];
1925 1926
	u8			*local_buf;

1927 1928 1929 1930
	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
	 * copying here, (as a pure convenience thing), but we can
	 * keep heap costs out of the hot path unless someone else is
	 * using the pre-allocated buffer or the transfer is too large.
1931
	 */
1932
	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
1933 1934
		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
				    GFP_KERNEL | GFP_DMA);
1935 1936 1937 1938 1939
		if (!local_buf)
			return -ENOMEM;
	} else {
		local_buf = buf;
	}
1940

1941
	spi_message_init(&message);
1942 1943 1944 1945 1946 1947 1948 1949 1950
	memset(x, 0, sizeof x);
	if (n_tx) {
		x[0].len = n_tx;
		spi_message_add_tail(&x[0], &message);
	}
	if (n_rx) {
		x[1].len = n_rx;
		spi_message_add_tail(&x[1], &message);
	}
1951

1952
	memcpy(local_buf, txbuf, n_tx);
1953 1954
	x[0].tx_buf = local_buf;
	x[1].rx_buf = local_buf + n_tx;
1955 1956 1957

	/* do the i/o */
	status = spi_sync(spi, &message);
1958
	if (status == 0)
1959
		memcpy(rxbuf, x[1].rx_buf, n_rx);
1960

1961
	if (x[0].tx_buf == buf)
D
David Brownell 已提交
1962
		mutex_unlock(&lock);
1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
	else
		kfree(local_buf);

	return status;
}
EXPORT_SYMBOL_GPL(spi_write_then_read);

/*-------------------------------------------------------------------------*/

static int __init spi_init(void)
{
1974 1975
	int	status;

1976
	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
1977 1978 1979 1980 1981 1982 1983 1984
	if (!buf) {
		status = -ENOMEM;
		goto err0;
	}

	status = bus_register(&spi_bus_type);
	if (status < 0)
		goto err1;
1985

1986 1987 1988
	status = class_register(&spi_master_class);
	if (status < 0)
		goto err2;
1989
	return 0;
1990 1991 1992 1993 1994 1995 1996 1997

err2:
	bus_unregister(&spi_bus_type);
err1:
	kfree(buf);
	buf = NULL;
err0:
	return status;
1998
}
1999

2000 2001
/* board_info is normally registered in arch_initcall(),
 * but even essential drivers wait till later
2002 2003 2004 2005
 *
 * REVISIT only boardinfo really needs static linking. the rest (device and
 * driver registration) _could_ be dynamically linked (modular) ... costs
 * include needing to have boardinfo data structures be much more public.
2006
 */
2007
postcore_initcall(spi_init);
2008