spi.c 58.5 KB
Newer Older
1
/*
G
Grant Likely 已提交
2
 * SPI init/core code
3 4
 *
 * Copyright (C) 2005 David Brownell
5
 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/kernel.h>
23
#include <linux/kmod.h>
24 25 26
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
27 28
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
29
#include <linux/mutex.h>
30
#include <linux/of_device.h>
31
#include <linux/of_irq.h>
32
#include <linux/slab.h>
33
#include <linux/mod_devicetable.h>
34
#include <linux/spi/spi.h>
35
#include <linux/of_gpio.h>
M
Mark Brown 已提交
36
#include <linux/pm_runtime.h>
37
#include <linux/export.h>
38
#include <linux/sched/rt.h>
39 40
#include <linux/delay.h>
#include <linux/kthread.h>
41 42
#include <linux/ioport.h>
#include <linux/acpi.h>
43

44 45 46
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>

47 48
static void spidev_release(struct device *dev)
{
49
	struct spi_device	*spi = to_spi_device(dev);
50 51 52 53 54

	/* spi masters may cleanup for released devices */
	if (spi->master->cleanup)
		spi->master->cleanup(spi);

D
David Brownell 已提交
55
	spi_master_put(spi->master);
56
	kfree(spi);
57 58 59 60 61 62
}

static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
	const struct spi_device	*spi = to_spi_device(dev);
63 64 65 66 67
	int len;

	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
	if (len != -ENODEV)
		return len;
68

69
	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70
}
71
static DEVICE_ATTR_RO(modalias);
72

73 74 75
static struct attribute *spi_dev_attrs[] = {
	&dev_attr_modalias.attr,
	NULL,
76
};
77
ATTRIBUTE_GROUPS(spi_dev);
78 79 80 81 82

/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 * and the sysfs version makes coldplug work too.
 */

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
						const struct spi_device *sdev)
{
	while (id->name[0]) {
		if (!strcmp(sdev->modalias, id->name))
			return id;
		id++;
	}
	return NULL;
}

const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);

	return spi_match_id(sdrv->id_table, sdev);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);

102 103 104
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
	const struct spi_device	*spi = to_spi_device(dev);
105 106
	const struct spi_driver	*sdrv = to_spi_driver(drv);

107 108 109 110
	/* Attempt an OF style match */
	if (of_driver_match_device(dev, drv))
		return 1;

111 112 113 114
	/* Then try ACPI */
	if (acpi_driver_match_device(dev, drv))
		return 1;

115 116
	if (sdrv->id_table)
		return !!spi_match_id(sdrv->id_table, spi);
117

118
	return strcmp(spi->modalias, drv->name) == 0;
119 120
}

121
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
122 123
{
	const struct spi_device		*spi = to_spi_device(dev);
124 125 126 127 128
	int rc;

	rc = acpi_device_uevent_modalias(dev, env);
	if (rc != -ENODEV)
		return rc;
129

130
	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
131 132 133
	return 0;
}

M
Mark Brown 已提交
134 135
#ifdef CONFIG_PM_SLEEP
static int spi_legacy_suspend(struct device *dev, pm_message_t message)
136
{
137
	int			value = 0;
138
	struct spi_driver	*drv = to_spi_driver(dev->driver);
139 140

	/* suspend will stop irqs and dma; no more i/o */
141 142 143 144 145 146
	if (drv) {
		if (drv->suspend)
			value = drv->suspend(to_spi_device(dev), message);
		else
			dev_dbg(dev, "... can't suspend\n");
	}
147 148 149
	return value;
}

M
Mark Brown 已提交
150
static int spi_legacy_resume(struct device *dev)
151
{
152
	int			value = 0;
153
	struct spi_driver	*drv = to_spi_driver(dev->driver);
154 155

	/* resume may restart the i/o queue */
156 157 158 159 160 161
	if (drv) {
		if (drv->resume)
			value = drv->resume(to_spi_device(dev));
		else
			dev_dbg(dev, "... can't resume\n");
	}
162 163 164
	return value;
}

M
Mark Brown 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static int spi_pm_suspend(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_suspend(dev);
	else
		return spi_legacy_suspend(dev, PMSG_SUSPEND);
}

static int spi_pm_resume(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_resume(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_freeze(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_freeze(dev);
	else
		return spi_legacy_suspend(dev, PMSG_FREEZE);
}

static int spi_pm_thaw(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_thaw(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_poweroff(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_poweroff(dev);
	else
		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
}

static int spi_pm_restore(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_restore(dev);
	else
		return spi_legacy_resume(dev);
}
224
#else
M
Mark Brown 已提交
225 226 227 228 229 230
#define spi_pm_suspend	NULL
#define spi_pm_resume	NULL
#define spi_pm_freeze	NULL
#define spi_pm_thaw	NULL
#define spi_pm_poweroff	NULL
#define spi_pm_restore	NULL
231 232
#endif

M
Mark Brown 已提交
233 234 235 236 237 238 239 240 241 242
static const struct dev_pm_ops spi_pm = {
	.suspend = spi_pm_suspend,
	.resume = spi_pm_resume,
	.freeze = spi_pm_freeze,
	.thaw = spi_pm_thaw,
	.poweroff = spi_pm_poweroff,
	.restore = spi_pm_restore,
	SET_RUNTIME_PM_OPS(
		pm_generic_runtime_suspend,
		pm_generic_runtime_resume,
243
		NULL
M
Mark Brown 已提交
244 245 246
	)
};

247 248
struct bus_type spi_bus_type = {
	.name		= "spi",
249
	.dev_groups	= spi_dev_groups,
250 251
	.match		= spi_match_device,
	.uevent		= spi_uevent,
M
Mark Brown 已提交
252
	.pm		= &spi_pm,
253 254 255
};
EXPORT_SYMBOL_GPL(spi_bus_type);

256 257 258 259

static int spi_drv_probe(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
260 261 262 263 264 265 266
	struct spi_device		*spi = to_spi_device(dev);
	int ret;

	acpi_dev_pm_attach(&spi->dev, true);
	ret = sdrv->probe(spi);
	if (ret)
		acpi_dev_pm_detach(&spi->dev, true);
267

268
	return ret;
269 270 271 272 273
}

static int spi_drv_remove(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
274 275 276 277 278
	struct spi_device		*spi = to_spi_device(dev);
	int ret;

	ret = sdrv->remove(spi);
	acpi_dev_pm_detach(&spi->dev, true);
279

280
	return ret;
281 282 283 284 285 286 287 288 289
}

static void spi_drv_shutdown(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);

	sdrv->shutdown(to_spi_device(dev));
}

D
David Brownell 已提交
290 291 292 293 294
/**
 * spi_register_driver - register a SPI driver
 * @sdrv: the driver to register
 * Context: can sleep
 */
295 296 297 298 299 300 301 302 303 304 305 306 307
int spi_register_driver(struct spi_driver *sdrv)
{
	sdrv->driver.bus = &spi_bus_type;
	if (sdrv->probe)
		sdrv->driver.probe = spi_drv_probe;
	if (sdrv->remove)
		sdrv->driver.remove = spi_drv_remove;
	if (sdrv->shutdown)
		sdrv->driver.shutdown = spi_drv_shutdown;
	return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(spi_register_driver);

308 309 310 311 312 313 314 315 316 317
/*-------------------------------------------------------------------------*/

/* SPI devices should normally not be created by SPI device drivers; that
 * would make them board-specific.  Similarly with SPI master drivers.
 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 * with other readonly (flashable) information about mainboard devices.
 */

struct boardinfo {
	struct list_head	list;
318
	struct spi_board_info	board_info;
319 320 321
};

static LIST_HEAD(board_list);
322 323 324 325 326 327
static LIST_HEAD(spi_master_list);

/*
 * Used to protect add/del opertion for board_info list and
 * spi_master list, and their matching process
 */
328
static DEFINE_MUTEX(board_lock);
329

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
/**
 * spi_alloc_device - Allocate a new SPI device
 * @master: Controller to which device is connected
 * Context: can sleep
 *
 * Allows a driver to allocate and initialize a spi_device without
 * registering it immediately.  This allows a driver to directly
 * fill the spi_device with device parameters before calling
 * spi_add_device() on it.
 *
 * Caller is responsible to call spi_add_device() on the returned
 * spi_device structure to add it to the SPI master.  If the caller
 * needs to discard the spi_device without adding it, then it should
 * call spi_dev_put() on it.
 *
 * Returns a pointer to the new device, or NULL.
 */
struct spi_device *spi_alloc_device(struct spi_master *master)
{
	struct spi_device	*spi;
	struct device		*dev = master->dev.parent;

	if (!spi_master_get(master))
		return NULL;

J
Jingoo Han 已提交
355
	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
356 357 358 359 360 361 362
	if (!spi) {
		dev_err(dev, "cannot alloc spi_device\n");
		spi_master_put(master);
		return NULL;
	}

	spi->master = master;
363
	spi->dev.parent = &master->dev;
364 365
	spi->dev.bus = &spi_bus_type;
	spi->dev.release = spidev_release;
366
	spi->cs_gpio = -ENOENT;
367 368 369 370 371
	device_initialize(&spi->dev);
	return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);

372 373 374 375 376 377 378 379 380 381 382 383 384
static void spi_dev_set_name(struct spi_device *spi)
{
	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);

	if (adev) {
		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
		return;
	}

	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
		     spi->chip_select);
}

385 386 387 388 389 390 391 392 393 394 395
static int spi_dev_check(struct device *dev, void *data)
{
	struct spi_device *spi = to_spi_device(dev);
	struct spi_device *new_spi = data;

	if (spi->master == new_spi->master &&
	    spi->chip_select == new_spi->chip_select)
		return -EBUSY;
	return 0;
}

396 397 398 399 400 401 402
/**
 * spi_add_device - Add spi_device allocated with spi_alloc_device
 * @spi: spi_device to register
 *
 * Companion function to spi_alloc_device.  Devices allocated with
 * spi_alloc_device can be added onto the spi bus with this function.
 *
403
 * Returns 0 on success; negative errno on failure
404 405 406
 */
int spi_add_device(struct spi_device *spi)
{
407
	static DEFINE_MUTEX(spi_add_lock);
408 409
	struct spi_master *master = spi->master;
	struct device *dev = master->dev.parent;
410 411 412
	int status;

	/* Chipselects are numbered 0..max; validate. */
413
	if (spi->chip_select >= master->num_chipselect) {
414 415
		dev_err(dev, "cs%d >= max %d\n",
			spi->chip_select,
416
			master->num_chipselect);
417 418 419 420
		return -EINVAL;
	}

	/* Set the bus ID string */
421
	spi_dev_set_name(spi);
422 423 424 425 426 427 428

	/* We need to make sure there's no other device with this
	 * chipselect **BEFORE** we call setup(), else we'll trash
	 * its configuration.  Lock against concurrent add() calls.
	 */
	mutex_lock(&spi_add_lock);

429 430
	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
	if (status) {
431 432 433 434 435
		dev_err(dev, "chipselect %d already in use\n",
				spi->chip_select);
		goto done;
	}

436 437 438
	if (master->cs_gpios)
		spi->cs_gpio = master->cs_gpios[spi->chip_select];

439 440 441 442
	/* Drivers may modify this initial i/o setup, but will
	 * normally rely on the device being setup.  Devices
	 * using SPI_CS_HIGH can't coexist well otherwise...
	 */
443
	status = spi_setup(spi);
444
	if (status < 0) {
445 446
		dev_err(dev, "can't setup %s, status %d\n",
				dev_name(&spi->dev), status);
447
		goto done;
448 449
	}

450
	/* Device may be bound to an active driver when this returns */
451
	status = device_add(&spi->dev);
452
	if (status < 0)
453 454
		dev_err(dev, "can't add %s, status %d\n",
				dev_name(&spi->dev), status);
455
	else
456
		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
457

458 459 460
done:
	mutex_unlock(&spi_add_lock);
	return status;
461 462
}
EXPORT_SYMBOL_GPL(spi_add_device);
463

D
David Brownell 已提交
464 465 466 467 468 469 470
/**
 * spi_new_device - instantiate one new SPI device
 * @master: Controller to which device is connected
 * @chip: Describes the SPI device
 * Context: can sleep
 *
 * On typical mainboards, this is purely internal; and it's not needed
471 472 473 474
 * after board init creates the hard-wired devices.  Some development
 * platforms may not be able to use spi_register_board_info though, and
 * this is exported so that for example a USB or parport based adapter
 * driver could add devices (which it would learn about out-of-band).
475 476
 *
 * Returns the new device, or NULL.
477
 */
478 479
struct spi_device *spi_new_device(struct spi_master *master,
				  struct spi_board_info *chip)
480 481 482 483
{
	struct spi_device	*proxy;
	int			status;

484 485 486 487 488 489 490
	/* NOTE:  caller did any chip->bus_num checks necessary.
	 *
	 * Also, unless we change the return value convention to use
	 * error-or-pointer (not NULL-or-pointer), troubleshootability
	 * suggests syslogged diagnostics are best here (ugh).
	 */

491 492
	proxy = spi_alloc_device(master);
	if (!proxy)
493 494
		return NULL;

495 496
	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));

497 498
	proxy->chip_select = chip->chip_select;
	proxy->max_speed_hz = chip->max_speed_hz;
499
	proxy->mode = chip->mode;
500
	proxy->irq = chip->irq;
501
	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
502 503 504 505
	proxy->dev.platform_data = (void *) chip->platform_data;
	proxy->controller_data = chip->controller_data;
	proxy->controller_state = NULL;

506
	status = spi_add_device(proxy);
507
	if (status < 0) {
508 509
		spi_dev_put(proxy);
		return NULL;
510 511 512 513 514 515
	}

	return proxy;
}
EXPORT_SYMBOL_GPL(spi_new_device);

516 517 518 519 520 521 522 523 524 525 526 527 528 529
static void spi_match_master_to_boardinfo(struct spi_master *master,
				struct spi_board_info *bi)
{
	struct spi_device *dev;

	if (master->bus_num != bi->bus_num)
		return;

	dev = spi_new_device(master, bi);
	if (!dev)
		dev_err(master->dev.parent, "can't create new device for %s\n",
			bi->modalias);
}

D
David Brownell 已提交
530 531 532 533 534 535
/**
 * spi_register_board_info - register SPI devices for a given board
 * @info: array of chip descriptors
 * @n: how many descriptors are provided
 * Context: can sleep
 *
536 537 538 539 540 541 542 543 544 545 546 547 548
 * Board-specific early init code calls this (probably during arch_initcall)
 * with segments of the SPI device table.  Any device nodes are created later,
 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 * this table of devices forever, so that reloading a controller driver will
 * not make Linux forget about these hard-wired devices.
 *
 * Other code can also call this, e.g. a particular add-on board might provide
 * SPI devices through its expansion connector, so code initializing that board
 * would naturally declare its SPI devices.
 *
 * The board info passed can safely be __initdata ... but be careful of
 * any embedded pointers (platform_data, etc), they're copied as-is.
 */
549
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
550
{
551 552
	struct boardinfo *bi;
	int i;
553

554
	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
555 556 557
	if (!bi)
		return -ENOMEM;

558 559
	for (i = 0; i < n; i++, bi++, info++) {
		struct spi_master *master;
560

561 562 563 564 565 566
		memcpy(&bi->board_info, info, sizeof(*info));
		mutex_lock(&board_lock);
		list_add_tail(&bi->list, &board_list);
		list_for_each_entry(master, &spi_master_list, list)
			spi_match_master_to_boardinfo(master, &bi->board_info);
		mutex_unlock(&board_lock);
567
	}
568 569

	return 0;
570 571 572 573
}

/*-------------------------------------------------------------------------*/

574 575 576 577 578 579 580 581 582 583 584
static void spi_set_cs(struct spi_device *spi, bool enable)
{
	if (spi->mode & SPI_CS_HIGH)
		enable = !enable;

	if (spi->cs_gpio >= 0)
		gpio_set_value(spi->cs_gpio, !enable);
	else if (spi->master->set_cs)
		spi->master->set_cs(spi, !enable);
}

585 586 587 588 589
static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
{
	struct device *dev = master->dev.parent;
	struct device *tx_dev, *rx_dev;
	struct spi_transfer *xfer;
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	void *tmp;
	size_t max_tx, max_rx;

	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
		max_tx = 0;
		max_rx = 0;

		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
			if ((master->flags & SPI_MASTER_MUST_TX) &&
			    !xfer->tx_buf)
				max_tx = max(xfer->len, max_tx);
			if ((master->flags & SPI_MASTER_MUST_RX) &&
			    !xfer->rx_buf)
				max_rx = max(xfer->len, max_rx);
		}

		if (max_tx) {
			tmp = krealloc(master->dummy_tx, max_tx,
				       GFP_KERNEL | GFP_DMA);
			if (!tmp)
				return -ENOMEM;
			master->dummy_tx = tmp;
			memset(tmp, 0, max_tx);
		}

		if (max_rx) {
			tmp = krealloc(master->dummy_rx, max_rx,
				       GFP_KERNEL | GFP_DMA);
			if (!tmp)
				return -ENOMEM;
			master->dummy_rx = tmp;
		}

		if (max_tx || max_rx) {
			list_for_each_entry(xfer, &msg->transfers,
					    transfer_list) {
				if (!xfer->tx_buf)
					xfer->tx_buf = master->dummy_tx;
				if (!xfer->rx_buf)
					xfer->rx_buf = master->dummy_rx;
			}
		}
	}
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698

	if (msg->is_dma_mapped || !master->can_dma)
		return 0;

	tx_dev = &master->dma_tx->dev->device;
	rx_dev = &master->dma_rx->dev->device;

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		if (!master->can_dma(master, msg->spi, xfer))
			continue;

		if (xfer->tx_buf != NULL) {
			xfer->tx_dma = dma_map_single(tx_dev,
						      (void *)xfer->tx_buf,
						      xfer->len,
						      DMA_TO_DEVICE);
			if (dma_mapping_error(dev, xfer->tx_dma)) {
				dev_err(dev, "dma_map_single Tx failed\n");
				return -ENOMEM;
			}
		}

		if (xfer->rx_buf != NULL) {
			xfer->rx_dma = dma_map_single(rx_dev,
						      xfer->rx_buf, xfer->len,
						      DMA_FROM_DEVICE);
			if (dma_mapping_error(dev, xfer->rx_dma)) {
				dev_err(dev, "dma_map_single Rx failed\n");
				dma_unmap_single(tx_dev, xfer->tx_dma,
						 xfer->len, DMA_TO_DEVICE);
				return -ENOMEM;
			}
		}
	}

	master->cur_msg_mapped = true;

	return 0;
}

static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
{
	struct spi_transfer *xfer;
	struct device *tx_dev, *rx_dev;

	if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma)
		return 0;

	tx_dev = &master->dma_tx->dev->device;
	rx_dev = &master->dma_rx->dev->device;

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		if (!master->can_dma(master, msg->spi, xfer))
			continue;

		if (xfer->rx_buf)
			dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len,
					 DMA_FROM_DEVICE);
		if (xfer->tx_buf)
			dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len,
					 DMA_TO_DEVICE);
	}

	return 0;
}

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
/*
 * spi_transfer_one_message - Default implementation of transfer_one_message()
 *
 * This is a standard implementation of transfer_one_message() for
 * drivers which impelment a transfer_one() operation.  It provides
 * standard handling of delays and chip select management.
 */
static int spi_transfer_one_message(struct spi_master *master,
				    struct spi_message *msg)
{
	struct spi_transfer *xfer;
	bool cur_cs = true;
	bool keep_cs = false;
	int ret = 0;

	spi_set_cs(msg->spi, true);

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		trace_spi_transfer_start(msg, xfer);

719
		reinit_completion(&master->xfer_completion);
720 721 722 723 724 725 726 727

		ret = master->transfer_one(master, msg->spi, xfer);
		if (ret < 0) {
			dev_err(&msg->spi->dev,
				"SPI transfer failed: %d\n", ret);
			goto out;
		}

728 729
		if (ret > 0) {
			ret = 0;
730
			wait_for_completion(&master->xfer_completion);
731
		}
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770

		trace_spi_transfer_stop(msg, xfer);

		if (msg->status != -EINPROGRESS)
			goto out;

		if (xfer->delay_usecs)
			udelay(xfer->delay_usecs);

		if (xfer->cs_change) {
			if (list_is_last(&xfer->transfer_list,
					 &msg->transfers)) {
				keep_cs = true;
			} else {
				cur_cs = !cur_cs;
				spi_set_cs(msg->spi, cur_cs);
			}
		}

		msg->actual_length += xfer->len;
	}

out:
	if (ret != 0 || !keep_cs)
		spi_set_cs(msg->spi, false);

	if (msg->status == -EINPROGRESS)
		msg->status = ret;

	spi_finalize_current_message(master);

	return ret;
}

/**
 * spi_finalize_current_transfer - report completion of a transfer
 *
 * Called by SPI drivers using the core transfer_one_message()
 * implementation to notify it that the current interrupt driven
771
 * transfer has finished and the next one may be scheduled.
772 773 774 775 776 777 778
 */
void spi_finalize_current_transfer(struct spi_master *master)
{
	complete(&master->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
/**
 * spi_pump_messages - kthread work function which processes spi message queue
 * @work: pointer to kthread work struct contained in the master struct
 *
 * This function checks if there is any spi message in the queue that
 * needs processing and if so call out to the driver to initialize hardware
 * and transfer each message.
 *
 */
static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);
	unsigned long flags;
	bool was_busy = false;
	int ret;

	/* Lock queue and check for queue work */
	spin_lock_irqsave(&master->queue_lock, flags);
	if (list_empty(&master->queue) || !master->running) {
799 800 801
		if (!master->busy) {
			spin_unlock_irqrestore(&master->queue_lock, flags);
			return;
802 803 804
		}
		master->busy = false;
		spin_unlock_irqrestore(&master->queue_lock, flags);
805 806 807 808
		kfree(master->dummy_rx);
		master->dummy_rx = NULL;
		kfree(master->dummy_tx);
		master->dummy_tx = NULL;
809 810 811 812
		if (master->unprepare_transfer_hardware &&
		    master->unprepare_transfer_hardware(master))
			dev_err(&master->dev,
				"failed to unprepare transfer hardware\n");
813 814 815 816
		if (master->auto_runtime_pm) {
			pm_runtime_mark_last_busy(master->dev.parent);
			pm_runtime_put_autosuspend(master->dev.parent);
		}
817
		trace_spi_master_idle(master);
818 819 820 821 822 823 824 825 826 827
		return;
	}

	/* Make sure we are not already running a message */
	if (master->cur_msg) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}
	/* Extract head of queue */
	master->cur_msg =
828
		list_first_entry(&master->queue, struct spi_message, queue);
829 830 831 832 833 834 835 836

	list_del_init(&master->cur_msg->queue);
	if (master->busy)
		was_busy = true;
	else
		master->busy = true;
	spin_unlock_irqrestore(&master->queue_lock, flags);

837 838 839 840 841 842 843 844 845
	if (!was_busy && master->auto_runtime_pm) {
		ret = pm_runtime_get_sync(master->dev.parent);
		if (ret < 0) {
			dev_err(&master->dev, "Failed to power device: %d\n",
				ret);
			return;
		}
	}

846 847 848
	if (!was_busy)
		trace_spi_master_busy(master);

849
	if (!was_busy && master->prepare_transfer_hardware) {
850 851 852 853
		ret = master->prepare_transfer_hardware(master);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare transfer hardware\n");
854 855 856

			if (master->auto_runtime_pm)
				pm_runtime_put(master->dev.parent);
857 858 859 860
			return;
		}
	}

861 862
	trace_spi_message_start(master->cur_msg);

863 864 865 866 867 868 869 870 871 872 873 874
	if (master->prepare_message) {
		ret = master->prepare_message(master, master->cur_msg);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare message: %d\n", ret);
			master->cur_msg->status = ret;
			spi_finalize_current_message(master);
			return;
		}
		master->cur_msg_prepared = true;
	}

875 876 877 878 879 880 881
	ret = spi_map_msg(master, master->cur_msg);
	if (ret) {
		master->cur_msg->status = ret;
		spi_finalize_current_message(master);
		return;
	}

882 883 884
	ret = master->transfer_one_message(master, master->cur_msg);
	if (ret) {
		dev_err(&master->dev,
885 886 887
			"failed to transfer one message from queue: %d\n", ret);
		master->cur_msg->status = ret;
		spi_finalize_current_message(master);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
		return;
	}
}

static int spi_init_queue(struct spi_master *master)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	INIT_LIST_HEAD(&master->queue);
	spin_lock_init(&master->queue_lock);

	master->running = false;
	master->busy = false;

	init_kthread_worker(&master->kworker);
	master->kworker_task = kthread_run(kthread_worker_fn,
904
					   &master->kworker, "%s",
905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
					   dev_name(&master->dev));
	if (IS_ERR(master->kworker_task)) {
		dev_err(&master->dev, "failed to create message pump task\n");
		return -ENOMEM;
	}
	init_kthread_work(&master->pump_messages, spi_pump_messages);

	/*
	 * Master config will indicate if this controller should run the
	 * message pump with high (realtime) priority to reduce the transfer
	 * latency on the bus by minimising the delay between a transfer
	 * request and the scheduling of the message pump thread. Without this
	 * setting the message pump thread will remain at default priority.
	 */
	if (master->rt) {
		dev_info(&master->dev,
			"will run message pump with realtime priority\n");
		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
	}

	return 0;
}

/**
 * spi_get_next_queued_message() - called by driver to check for queued
 * messages
 * @master: the master to check for queued messages
 *
 * If there are more messages in the queue, the next message is returned from
 * this call.
 */
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
	struct spi_message *next;
	unsigned long flags;

	/* get a pointer to the next message, if any */
	spin_lock_irqsave(&master->queue_lock, flags);
943 944
	next = list_first_entry_or_null(&master->queue, struct spi_message,
					queue);
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961
	spin_unlock_irqrestore(&master->queue_lock, flags);

	return next;
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);

/**
 * spi_finalize_current_message() - the current message is complete
 * @master: the master to return the message to
 *
 * Called by the driver to notify the core that the message in the front of the
 * queue is complete and can be removed from the queue.
 */
void spi_finalize_current_message(struct spi_master *master)
{
	struct spi_message *mesg;
	unsigned long flags;
962
	int ret;
963 964 965 966 967 968 969 970

	spin_lock_irqsave(&master->queue_lock, flags);
	mesg = master->cur_msg;
	master->cur_msg = NULL;

	queue_kthread_work(&master->kworker, &master->pump_messages);
	spin_unlock_irqrestore(&master->queue_lock, flags);

971 972
	spi_unmap_msg(master, mesg);

973 974 975 976 977 978 979 980 981
	if (master->cur_msg_prepared && master->unprepare_message) {
		ret = master->unprepare_message(master, mesg);
		if (ret) {
			dev_err(&master->dev,
				"failed to unprepare message: %d\n", ret);
		}
	}
	master->cur_msg_prepared = false;

982 983 984
	mesg->state = NULL;
	if (mesg->complete)
		mesg->complete(mesg->context);
985 986

	trace_spi_message_done(mesg);
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);

static int spi_start_queue(struct spi_master *master)
{
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (master->running || master->busy) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -EBUSY;
	}

	master->running = true;
	master->cur_msg = NULL;
	spin_unlock_irqrestore(&master->queue_lock, flags);

	queue_kthread_work(&master->kworker, &master->pump_messages);

	return 0;
}

static int spi_stop_queue(struct spi_master *master)
{
	unsigned long flags;
	unsigned limit = 500;
	int ret = 0;

	spin_lock_irqsave(&master->queue_lock, flags);

	/*
	 * This is a bit lame, but is optimized for the common execution path.
	 * A wait_queue on the master->busy could be used, but then the common
	 * execution path (pump_messages) would be required to call wake_up or
	 * friends on every SPI message. Do this instead.
	 */
	while ((!list_empty(&master->queue) || master->busy) && limit--) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		msleep(10);
		spin_lock_irqsave(&master->queue_lock, flags);
	}

	if (!list_empty(&master->queue) || master->busy)
		ret = -EBUSY;
	else
		master->running = false;

	spin_unlock_irqrestore(&master->queue_lock, flags);

	if (ret) {
		dev_warn(&master->dev,
			 "could not stop message queue\n");
		return ret;
	}
	return ret;
}

static int spi_destroy_queue(struct spi_master *master)
{
	int ret;

	ret = spi_stop_queue(master);

	/*
	 * flush_kthread_worker will block until all work is done.
	 * If the reason that stop_queue timed out is that the work will never
	 * finish, then it does no good to call flush/stop thread, so
	 * return anyway.
	 */
	if (ret) {
		dev_err(&master->dev, "problem destroying queue\n");
		return ret;
	}

	flush_kthread_worker(&master->kworker);
	kthread_stop(master->kworker_task);

	return 0;
}

/**
 * spi_queued_transfer - transfer function for queued transfers
 * @spi: spi device which is requesting transfer
 * @msg: spi message which is to handled is queued to driver queue
 */
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
	struct spi_master *master = spi->master;
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (!master->running) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -ESHUTDOWN;
	}
	msg->actual_length = 0;
	msg->status = -EINPROGRESS;

	list_add_tail(&msg->queue, &master->queue);
1088
	if (!master->busy)
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
		queue_kthread_work(&master->kworker, &master->pump_messages);

	spin_unlock_irqrestore(&master->queue_lock, flags);
	return 0;
}

static int spi_master_initialize_queue(struct spi_master *master)
{
	int ret;

	master->queued = true;
	master->transfer = spi_queued_transfer;
1101 1102
	if (!master->transfer_one_message)
		master->transfer_one_message = spi_transfer_one_message;
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125

	/* Initialize and start queue */
	ret = spi_init_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem initializing queue\n");
		goto err_init_queue;
	}
	ret = spi_start_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem starting queue\n");
		goto err_start_queue;
	}

	return 0;

err_start_queue:
err_init_queue:
	spi_destroy_queue(master);
	return ret;
}

/*-------------------------------------------------------------------------*/

1126
#if defined(CONFIG_OF)
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
/**
 * of_register_spi_devices() - Register child devices onto the SPI bus
 * @master:	Pointer to spi_master device
 *
 * Registers an spi_device for each child node of master node which has a 'reg'
 * property.
 */
static void of_register_spi_devices(struct spi_master *master)
{
	struct spi_device *spi;
	struct device_node *nc;
	int rc;
T
Trent Piepho 已提交
1139
	u32 value;
1140 1141 1142 1143

	if (!master->dev.of_node)
		return;

1144
	for_each_available_child_of_node(master->dev.of_node, nc) {
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
		/* Alloc an spi_device */
		spi = spi_alloc_device(master);
		if (!spi) {
			dev_err(&master->dev, "spi_device alloc error for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Select device driver */
		if (of_modalias_node(nc, spi->modalias,
				     sizeof(spi->modalias)) < 0) {
			dev_err(&master->dev, "cannot find modalias for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Device address */
T
Trent Piepho 已提交
1164 1165 1166 1167
		rc = of_property_read_u32(nc, "reg", &value);
		if (rc) {
			dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
				nc->full_name, rc);
1168 1169 1170
			spi_dev_put(spi);
			continue;
		}
T
Trent Piepho 已提交
1171
		spi->chip_select = value;
1172 1173 1174 1175 1176 1177 1178 1179

		/* Mode (clock phase/polarity/etc.) */
		if (of_find_property(nc, "spi-cpha", NULL))
			spi->mode |= SPI_CPHA;
		if (of_find_property(nc, "spi-cpol", NULL))
			spi->mode |= SPI_CPOL;
		if (of_find_property(nc, "spi-cs-high", NULL))
			spi->mode |= SPI_CS_HIGH;
1180 1181
		if (of_find_property(nc, "spi-3wire", NULL))
			spi->mode |= SPI_3WIRE;
1182

W
wangyuhang 已提交
1183
		/* Device DUAL/QUAD mode */
T
Trent Piepho 已提交
1184 1185 1186
		if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
			switch (value) {
			case 1:
1187
				break;
T
Trent Piepho 已提交
1188
			case 2:
1189 1190
				spi->mode |= SPI_TX_DUAL;
				break;
T
Trent Piepho 已提交
1191
			case 4:
1192 1193 1194 1195
				spi->mode |= SPI_TX_QUAD;
				break;
			default:
				dev_err(&master->dev,
1196
					"spi-tx-bus-width %d not supported\n",
T
Trent Piepho 已提交
1197
					value);
1198 1199 1200
				spi_dev_put(spi);
				continue;
			}
W
wangyuhang 已提交
1201 1202
		}

T
Trent Piepho 已提交
1203 1204 1205
		if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
			switch (value) {
			case 1:
1206
				break;
T
Trent Piepho 已提交
1207
			case 2:
1208 1209
				spi->mode |= SPI_RX_DUAL;
				break;
T
Trent Piepho 已提交
1210
			case 4:
1211 1212 1213 1214
				spi->mode |= SPI_RX_QUAD;
				break;
			default:
				dev_err(&master->dev,
1215
					"spi-rx-bus-width %d not supported\n",
T
Trent Piepho 已提交
1216
					value);
1217 1218 1219
				spi_dev_put(spi);
				continue;
			}
W
wangyuhang 已提交
1220 1221
		}

1222
		/* Device speed */
T
Trent Piepho 已提交
1223 1224 1225 1226
		rc = of_property_read_u32(nc, "spi-max-frequency", &value);
		if (rc) {
			dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
				nc->full_name, rc);
1227 1228 1229
			spi_dev_put(spi);
			continue;
		}
T
Trent Piepho 已提交
1230
		spi->max_speed_hz = value;
1231 1232 1233 1234 1235 1236 1237 1238 1239

		/* IRQ */
		spi->irq = irq_of_parse_and_map(nc, 0);

		/* Store a pointer to the node in the device structure */
		of_node_get(nc);
		spi->dev.of_node = nc;

		/* Register the new device */
1240
		request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
		rc = spi_add_device(spi);
		if (rc) {
			dev_err(&master->dev, "spi_device register error %s\n",
				nc->full_name);
			spi_dev_put(spi);
		}

	}
}
#else
static void of_register_spi_devices(struct spi_master *master) { }
#endif

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305
#ifdef CONFIG_ACPI
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
	struct spi_device *spi = data;

	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
		struct acpi_resource_spi_serialbus *sb;

		sb = &ares->data.spi_serial_bus;
		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
			spi->chip_select = sb->device_selection;
			spi->max_speed_hz = sb->connection_speed;

			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
				spi->mode |= SPI_CPHA;
			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
				spi->mode |= SPI_CPOL;
			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
				spi->mode |= SPI_CS_HIGH;
		}
	} else if (spi->irq < 0) {
		struct resource r;

		if (acpi_dev_resource_interrupt(ares, 0, &r))
			spi->irq = r.start;
	}

	/* Always tell the ACPI core to skip this resource */
	return 1;
}

static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
				       void *data, void **return_value)
{
	struct spi_master *master = data;
	struct list_head resource_list;
	struct acpi_device *adev;
	struct spi_device *spi;
	int ret;

	if (acpi_bus_get_device(handle, &adev))
		return AE_OK;
	if (acpi_bus_get_status(adev) || !adev->status.present)
		return AE_OK;

	spi = spi_alloc_device(master);
	if (!spi) {
		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
			dev_name(&adev->dev));
		return AE_NO_MEMORY;
	}

1306
	ACPI_COMPANION_SET(&spi->dev, adev);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318
	spi->irq = -1;

	INIT_LIST_HEAD(&resource_list);
	ret = acpi_dev_get_resources(adev, &resource_list,
				     acpi_spi_add_resource, spi);
	acpi_dev_free_resource_list(&resource_list);

	if (ret < 0 || !spi->max_speed_hz) {
		spi_dev_put(spi);
		return AE_OK;
	}

1319
	adev->power.flags.ignore_parent = true;
1320
	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1321
	if (spi_add_device(spi)) {
1322
		adev->power.flags.ignore_parent = false;
1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
			dev_name(&adev->dev));
		spi_dev_put(spi);
	}

	return AE_OK;
}

static void acpi_register_spi_devices(struct spi_master *master)
{
	acpi_status status;
	acpi_handle handle;

1336
	handle = ACPI_HANDLE(master->dev.parent);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
	if (!handle)
		return;

	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
				     acpi_spi_add_device, NULL,
				     master, NULL);
	if (ACPI_FAILURE(status))
		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_master *master) {}
#endif /* CONFIG_ACPI */

T
Tony Jones 已提交
1350
static void spi_master_release(struct device *dev)
1351 1352 1353
{
	struct spi_master *master;

T
Tony Jones 已提交
1354
	master = container_of(dev, struct spi_master, dev);
1355 1356 1357 1358 1359 1360
	kfree(master);
}

static struct class spi_master_class = {
	.name		= "spi_master",
	.owner		= THIS_MODULE,
T
Tony Jones 已提交
1361
	.dev_release	= spi_master_release,
1362 1363 1364
};


1365

1366 1367 1368
/**
 * spi_alloc_master - allocate SPI master controller
 * @dev: the controller, possibly using the platform_bus
D
David Brownell 已提交
1369
 * @size: how much zeroed driver-private data to allocate; the pointer to this
T
Tony Jones 已提交
1370
 *	memory is in the driver_data field of the returned device,
D
David Brownell 已提交
1371
 *	accessible with spi_master_get_devdata().
D
David Brownell 已提交
1372
 * Context: can sleep
1373 1374 1375
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.  It's how they allocate
D
dmitry pervushin 已提交
1376
 * an spi_master structure, prior to calling spi_register_master().
1377 1378 1379 1380 1381
 *
 * This must be called from context that can sleep.  It returns the SPI
 * master structure on success, else NULL.
 *
 * The caller is responsible for assigning the bus number and initializing
D
dmitry pervushin 已提交
1382
 * the master's methods before calling spi_register_master(); and (after errors
1383 1384
 * adding the device) calling spi_master_put() and kfree() to prevent a memory
 * leak.
1385
 */
1386
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1387 1388 1389
{
	struct spi_master	*master;

D
David Brownell 已提交
1390 1391 1392
	if (!dev)
		return NULL;

J
Jingoo Han 已提交
1393
	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1394 1395 1396
	if (!master)
		return NULL;

T
Tony Jones 已提交
1397
	device_initialize(&master->dev);
1398 1399
	master->bus_num = -1;
	master->num_chipselect = 1;
T
Tony Jones 已提交
1400 1401
	master->dev.class = &spi_master_class;
	master->dev.parent = get_device(dev);
D
David Brownell 已提交
1402
	spi_master_set_devdata(master, &master[1]);
1403 1404 1405 1406 1407

	return master;
}
EXPORT_SYMBOL_GPL(spi_alloc_master);

1408 1409 1410
#ifdef CONFIG_OF
static int of_spi_register_master(struct spi_master *master)
{
1411
	int nb, i, *cs;
1412 1413 1414 1415 1416 1417
	struct device_node *np = master->dev.of_node;

	if (!np)
		return 0;

	nb = of_gpio_named_count(np, "cs-gpios");
J
Jingoo Han 已提交
1418
	master->num_chipselect = max_t(int, nb, master->num_chipselect);
1419

1420 1421
	/* Return error only for an incorrectly formed cs-gpios property */
	if (nb == 0 || nb == -ENOENT)
1422
		return 0;
1423 1424
	else if (nb < 0)
		return nb;
1425 1426 1427 1428 1429 1430 1431 1432 1433

	cs = devm_kzalloc(&master->dev,
			  sizeof(int) * master->num_chipselect,
			  GFP_KERNEL);
	master->cs_gpios = cs;

	if (!master->cs_gpios)
		return -ENOMEM;

1434
	for (i = 0; i < master->num_chipselect; i++)
1435
		cs[i] = -ENOENT;
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448

	for (i = 0; i < nb; i++)
		cs[i] = of_get_named_gpio(np, "cs-gpios", i);

	return 0;
}
#else
static int of_spi_register_master(struct spi_master *master)
{
	return 0;
}
#endif

1449 1450 1451
/**
 * spi_register_master - register SPI master controller
 * @master: initialized master, originally from spi_alloc_master()
D
David Brownell 已提交
1452
 * Context: can sleep
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
 *
 * SPI master controllers connect to their drivers using some non-SPI bus,
 * such as the platform bus.  The final stage of probe() in that code
 * includes calling spi_register_master() to hook up to this SPI bus glue.
 *
 * SPI controllers use board specific (often SOC specific) bus numbers,
 * and board-specific addressing for SPI devices combines those numbers
 * with chip select numbers.  Since SPI does not directly support dynamic
 * device identification, boards need configuration tables telling which
 * chip is at which address.
 *
 * This must be called from context that can sleep.  It returns zero on
 * success, else a negative error code (dropping the master's refcount).
D
David Brownell 已提交
1466 1467
 * After a successful return, the caller is responsible for calling
 * spi_unregister_master().
1468
 */
1469
int spi_register_master(struct spi_master *master)
1470
{
1471
	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
T
Tony Jones 已提交
1472
	struct device		*dev = master->dev.parent;
1473
	struct boardinfo	*bi;
1474 1475 1476
	int			status = -ENODEV;
	int			dynamic = 0;

D
David Brownell 已提交
1477 1478 1479
	if (!dev)
		return -ENODEV;

1480 1481 1482 1483
	status = of_spi_register_master(master);
	if (status)
		return status;

1484 1485 1486 1487 1488 1489
	/* even if it's just one always-selected device, there must
	 * be at least one chipselect
	 */
	if (master->num_chipselect == 0)
		return -EINVAL;

1490 1491 1492
	if ((master->bus_num < 0) && master->dev.of_node)
		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");

1493
	/* convention:  dynamically assigned bus IDs count down from the max */
1494
	if (master->bus_num < 0) {
1495 1496 1497
		/* FIXME switch to an IDR based scheme, something like
		 * I2C now uses, so we can't run out of "dynamic" IDs
		 */
1498
		master->bus_num = atomic_dec_return(&dyn_bus_id);
1499
		dynamic = 1;
1500 1501
	}

1502 1503 1504
	spin_lock_init(&master->bus_lock_spinlock);
	mutex_init(&master->bus_lock_mutex);
	master->bus_lock_flag = 0;
1505
	init_completion(&master->xfer_completion);
1506

1507 1508 1509
	/* register the device, then userspace will see it.
	 * registration fails if the bus ID is in use.
	 */
1510
	dev_set_name(&master->dev, "spi%u", master->bus_num);
T
Tony Jones 已提交
1511
	status = device_add(&master->dev);
1512
	if (status < 0)
1513
		goto done;
1514
	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1515 1516
			dynamic ? " (dynamic)" : "");

1517 1518 1519 1520 1521 1522
	/* If we're using a queued driver, start the queue */
	if (master->transfer)
		dev_info(dev, "master is unqueued, this is deprecated\n");
	else {
		status = spi_master_initialize_queue(master);
		if (status) {
1523
			device_del(&master->dev);
1524 1525 1526 1527
			goto done;
		}
	}

1528 1529 1530 1531 1532 1533
	mutex_lock(&board_lock);
	list_add_tail(&master->list, &spi_master_list);
	list_for_each_entry(bi, &board_list, list)
		spi_match_master_to_boardinfo(master, &bi->board_info);
	mutex_unlock(&board_lock);

1534
	/* Register devices from the device tree and ACPI */
1535
	of_register_spi_devices(master);
1536
	acpi_register_spi_devices(master);
1537 1538 1539 1540 1541
done:
	return status;
}
EXPORT_SYMBOL_GPL(spi_register_master);

1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565
static void devm_spi_unregister(struct device *dev, void *res)
{
	spi_unregister_master(*(struct spi_master **)res);
}

/**
 * dev_spi_register_master - register managed SPI master controller
 * @dev:    device managing SPI master
 * @master: initialized master, originally from spi_alloc_master()
 * Context: can sleep
 *
 * Register a SPI device as with spi_register_master() which will
 * automatically be unregister
 */
int devm_spi_register_master(struct device *dev, struct spi_master *master)
{
	struct spi_master **ptr;
	int ret;

	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = spi_register_master(master);
1566
	if (!ret) {
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576
		*ptr = master;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
	}

	return ret;
}
EXPORT_SYMBOL_GPL(devm_spi_register_master);

1577
static int __unregister(struct device *dev, void *null)
1578
{
1579
	spi_unregister_device(to_spi_device(dev));
1580 1581 1582 1583 1584 1585
	return 0;
}

/**
 * spi_unregister_master - unregister SPI master controller
 * @master: the master being unregistered
D
David Brownell 已提交
1586
 * Context: can sleep
1587 1588 1589 1590 1591 1592 1593 1594
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.
 *
 * This must be called from context that can sleep.
 */
void spi_unregister_master(struct spi_master *master)
{
1595 1596
	int dummy;

1597 1598 1599 1600 1601
	if (master->queued) {
		if (spi_destroy_queue(master))
			dev_err(&master->dev, "queue remove failed\n");
	}

1602 1603 1604 1605
	mutex_lock(&board_lock);
	list_del(&master->list);
	mutex_unlock(&board_lock);

1606
	dummy = device_for_each_child(&master->dev, NULL, __unregister);
T
Tony Jones 已提交
1607
	device_unregister(&master->dev);
1608 1609 1610
}
EXPORT_SYMBOL_GPL(spi_unregister_master);

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
int spi_master_suspend(struct spi_master *master)
{
	int ret;

	/* Basically no-ops for non-queued masters */
	if (!master->queued)
		return 0;

	ret = spi_stop_queue(master);
	if (ret)
		dev_err(&master->dev, "queue stop failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_suspend);

int spi_master_resume(struct spi_master *master)
{
	int ret;

	if (!master->queued)
		return 0;

	ret = spi_start_queue(master);
	if (ret)
		dev_err(&master->dev, "queue restart failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_resume);

1642
static int __spi_master_match(struct device *dev, const void *data)
D
Dave Young 已提交
1643 1644
{
	struct spi_master *m;
1645
	const u16 *bus_num = data;
D
Dave Young 已提交
1646 1647 1648 1649 1650

	m = container_of(dev, struct spi_master, dev);
	return m->bus_num == *bus_num;
}

1651 1652 1653
/**
 * spi_busnum_to_master - look up master associated with bus_num
 * @bus_num: the master's bus number
D
David Brownell 已提交
1654
 * Context: can sleep
1655 1656 1657 1658 1659 1660 1661 1662
 *
 * This call may be used with devices that are registered after
 * arch init time.  It returns a refcounted pointer to the relevant
 * spi_master (which the caller must release), or NULL if there is
 * no such master registered.
 */
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
T
Tony Jones 已提交
1663
	struct device		*dev;
1664
	struct spi_master	*master = NULL;
D
Dave Young 已提交
1665

1666
	dev = class_find_device(&spi_master_class, NULL, &bus_num,
D
Dave Young 已提交
1667 1668 1669 1670
				__spi_master_match);
	if (dev)
		master = container_of(dev, struct spi_master, dev);
	/* reference got in class_find_device */
1671
	return master;
1672 1673 1674 1675 1676 1677
}
EXPORT_SYMBOL_GPL(spi_busnum_to_master);


/*-------------------------------------------------------------------------*/

1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701
/* Core methods for SPI master protocol drivers.  Some of the
 * other core methods are currently defined as inline functions.
 */

/**
 * spi_setup - setup SPI mode and clock rate
 * @spi: the device whose settings are being modified
 * Context: can sleep, and no requests are queued to the device
 *
 * SPI protocol drivers may need to update the transfer mode if the
 * device doesn't work with its default.  They may likewise need
 * to update clock rates or word sizes from initial values.  This function
 * changes those settings, and must be called from a context that can sleep.
 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
 * effect the next time the device is selected and data is transferred to
 * or from it.  When this function returns, the spi device is deselected.
 *
 * Note that this call will fail if the protocol driver specifies an option
 * that the underlying controller or its driver does not support.  For
 * example, not all hardware supports wire transfers using nine bit words,
 * LSB-first wire encoding, or active-high chipselects.
 */
int spi_setup(struct spi_device *spi)
{
1702
	unsigned	bad_bits;
1703
	int		status = 0;
1704

W
wangyuhang 已提交
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
	/* check mode to prevent that DUAL and QUAD set at the same time
	 */
	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
		dev_err(&spi->dev,
		"setup: can not select dual and quad at the same time\n");
		return -EINVAL;
	}
	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
	 */
	if ((spi->mode & SPI_3WIRE) && (spi->mode &
		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
		return -EINVAL;
1718 1719 1720 1721 1722
	/* help drivers fail *cleanly* when they need options
	 * that aren't supported with their current master
	 */
	bad_bits = spi->mode & ~spi->master->mode_bits;
	if (bad_bits) {
1723
		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1724 1725 1726 1727
			bad_bits);
		return -EINVAL;
	}

1728 1729 1730
	if (!spi->bits_per_word)
		spi->bits_per_word = 8;

1731 1732
	if (spi->master->setup)
		status = spi->master->setup(spi);
1733

J
Jingoo Han 已提交
1734
	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
			(spi->mode & SPI_LOOP) ? "loopback, " : "",
			spi->bits_per_word, spi->max_speed_hz,
			status);

	return status;
}
EXPORT_SYMBOL_GPL(spi_setup);

1747
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1748 1749
{
	struct spi_master *master = spi->master;
1750
	struct spi_transfer *xfer;
1751

1752 1753 1754 1755 1756
	if (list_empty(&message->transfers))
		return -EINVAL;
	if (!message->complete)
		return -EINVAL;

1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
	/* Half-duplex links include original MicroWire, and ones with
	 * only one data pin like SPI_3WIRE (switches direction) or where
	 * either MOSI or MISO is missing.  They can also be caused by
	 * software limitations.
	 */
	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
			|| (spi->mode & SPI_3WIRE)) {
		unsigned flags = master->flags;

		list_for_each_entry(xfer, &message->transfers, transfer_list) {
			if (xfer->rx_buf && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
				return -EINVAL;
		}
	}

1776
	/**
1777 1778
	 * Set transfer bits_per_word and max speed as spi device default if
	 * it is not set for this transfer.
W
wangyuhang 已提交
1779 1780
	 * Set transfer tx_nbits and rx_nbits as single transfer default
	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1781 1782
	 */
	list_for_each_entry(xfer, &message->transfers, transfer_list) {
1783
		message->frame_length += xfer->len;
1784 1785
		if (!xfer->bits_per_word)
			xfer->bits_per_word = spi->bits_per_word;
1786
		if (!xfer->speed_hz) {
1787
			xfer->speed_hz = spi->max_speed_hz;
1788 1789 1790 1791 1792
			if (master->max_speed_hz &&
			    xfer->speed_hz > master->max_speed_hz)
				xfer->speed_hz = master->max_speed_hz;
		}

1793 1794 1795 1796 1797 1798 1799 1800
		if (master->bits_per_word_mask) {
			/* Only 32 bits fit in the mask */
			if (xfer->bits_per_word > 32)
				return -EINVAL;
			if (!(master->bits_per_word_mask &
					BIT(xfer->bits_per_word - 1)))
				return -EINVAL;
		}
1801 1802 1803 1804 1805 1806

		if (xfer->speed_hz && master->min_speed_hz &&
		    xfer->speed_hz < master->min_speed_hz)
			return -EINVAL;
		if (xfer->speed_hz && master->max_speed_hz &&
		    xfer->speed_hz > master->max_speed_hz)
W
wangyuhang 已提交
1807
			return -EINVAL;
W
wangyuhang 已提交
1808 1809 1810 1811 1812 1813

		if (xfer->tx_buf && !xfer->tx_nbits)
			xfer->tx_nbits = SPI_NBITS_SINGLE;
		if (xfer->rx_buf && !xfer->rx_nbits)
			xfer->rx_nbits = SPI_NBITS_SINGLE;
		/* check transfer tx/rx_nbits:
1814 1815
		 * 1. check the value matches one of single, dual and quad
		 * 2. check tx/rx_nbits match the mode in spi_device
W
wangyuhang 已提交
1816
		 */
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
		if (xfer->tx_buf) {
			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
				xfer->tx_nbits != SPI_NBITS_DUAL &&
				xfer->tx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_TX_QUAD))
				return -EINVAL;
		}
W
wangyuhang 已提交
1829
		/* check transfer rx_nbits */
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841
		if (xfer->rx_buf) {
			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
				xfer->rx_nbits != SPI_NBITS_DUAL &&
				xfer->rx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_RX_QUAD))
				return -EINVAL;
		}
1842 1843
	}

1844
	message->status = -EINPROGRESS;
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856

	return 0;
}

static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;

	message->spi = spi;

	trace_spi_message_submit(message);

1857 1858 1859
	return master->transfer(spi, message);
}

D
David Brownell 已提交
1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
/**
 * spi_async - asynchronous SPI transfer
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
1892 1893
	int ret;
	unsigned long flags;
D
David Brownell 已提交
1894

1895 1896 1897 1898
	ret = __spi_validate(spi, message);
	if (ret != 0)
		return ret;

1899
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
D
David Brownell 已提交
1900

1901 1902 1903 1904
	if (master->bus_lock_flag)
		ret = -EBUSY;
	else
		ret = __spi_async(spi, message);
D
David Brownell 已提交
1905

1906 1907 1908
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;
D
David Brownell 已提交
1909 1910 1911
}
EXPORT_SYMBOL_GPL(spi_async);

1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
/**
 * spi_async_locked - version of spi_async with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
	int ret;
	unsigned long flags;

1947 1948 1949 1950
	ret = __spi_validate(spi, message);
	if (ret != 0)
		return ret;

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);

	ret = __spi_async(spi, message);

	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;

}
EXPORT_SYMBOL_GPL(spi_async_locked);

1962 1963 1964 1965 1966 1967 1968 1969

/*-------------------------------------------------------------------------*/

/* Utility methods for SPI master protocol drivers, layered on
 * top of the core.  Some other utility methods are defined as
 * inline functions.
 */

1970 1971 1972 1973 1974
static void spi_complete(void *arg)
{
	complete(arg);
}

1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000
static int __spi_sync(struct spi_device *spi, struct spi_message *message,
		      int bus_locked)
{
	DECLARE_COMPLETION_ONSTACK(done);
	int status;
	struct spi_master *master = spi->master;

	message->complete = spi_complete;
	message->context = &done;

	if (!bus_locked)
		mutex_lock(&master->bus_lock_mutex);

	status = spi_async_locked(spi, message);

	if (!bus_locked)
		mutex_unlock(&master->bus_lock_mutex);

	if (status == 0) {
		wait_for_completion(&done);
		status = message->status;
	}
	message->context = NULL;
	return status;
}

2001 2002 2003 2004
/**
 * spi_sync - blocking/synchronous SPI data transfers
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
D
David Brownell 已提交
2005
 * Context: can sleep
2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * Note that the SPI device's chip select is active during the message,
 * and then is normally disabled between messages.  Drivers for some
 * frequently-used devices may want to minimize costs of selecting a chip,
 * by leaving it selected in anticipation that the next message will go
 * to the same chip.  (That may increase power usage.)
 *
D
David Brownell 已提交
2017 2018 2019
 * Also, the caller is guaranteeing that the memory associated with the
 * message will not be freed before this call returns.
 *
2020
 * It returns zero on success, else a negative error code.
2021 2022 2023
 */
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
2024
	return __spi_sync(spi, message, 0);
2025 2026 2027
}
EXPORT_SYMBOL_GPL(spi_sync);

2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038
/**
 * spi_sync_locked - version of spi_sync with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * This call should be used by drivers that require exclusive access to the
L
Lucas De Marchi 已提交
2039
 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
 * be released by a spi_bus_unlock call when the exclusive access is over.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
	return __spi_sync(spi, message, 1);
}
EXPORT_SYMBOL_GPL(spi_sync_locked);

/**
 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
 * @master: SPI bus master that should be locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call should be used by drivers that require exclusive access to the
 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
 * exclusive access is over. Data transfer must be done by spi_sync_locked
 * and spi_async_locked calls when the SPI bus lock is held.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_lock(struct spi_master *master)
{
	unsigned long flags;

	mutex_lock(&master->bus_lock_mutex);

	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
	master->bus_lock_flag = 1;
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	/* mutex remains locked until spi_bus_unlock is called */

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_lock);

/**
 * spi_bus_unlock - release the lock for exclusive SPI bus usage
 * @master: SPI bus master that was locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
 * call.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_unlock(struct spi_master *master)
{
	master->bus_lock_flag = 0;

	mutex_unlock(&master->bus_lock_mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);

2104
/* portable code must never pass more than 32 bytes */
J
Jingoo Han 已提交
2105
#define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
2106 2107 2108 2109 2110 2111 2112 2113

static u8	*buf;

/**
 * spi_write_then_read - SPI synchronous write followed by read
 * @spi: device with which data will be exchanged
 * @txbuf: data to be written (need not be dma-safe)
 * @n_tx: size of txbuf, in bytes
2114 2115
 * @rxbuf: buffer into which data will be read (need not be dma-safe)
 * @n_rx: size of rxbuf, in bytes
D
David Brownell 已提交
2116
 * Context: can sleep
2117 2118 2119 2120
 *
 * This performs a half duplex MicroWire style transaction with the
 * device, sending txbuf and then reading rxbuf.  The return value
 * is zero for success, else a negative errno status code.
2121
 * This call may only be used from a context that may sleep.
2122
 *
D
David Brownell 已提交
2123
 * Parameters to this routine are always copied using a small buffer;
D
David Brownell 已提交
2124 2125
 * portable code should never use this for more than 32 bytes.
 * Performance-sensitive or bulk transfer code should instead use
D
David Brownell 已提交
2126
 * spi_{async,sync}() calls with dma-safe buffers.
2127 2128
 */
int spi_write_then_read(struct spi_device *spi,
2129 2130
		const void *txbuf, unsigned n_tx,
		void *rxbuf, unsigned n_rx)
2131
{
D
David Brownell 已提交
2132
	static DEFINE_MUTEX(lock);
2133 2134 2135

	int			status;
	struct spi_message	message;
2136
	struct spi_transfer	x[2];
2137 2138
	u8			*local_buf;

2139 2140 2141 2142
	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
	 * copying here, (as a pure convenience thing), but we can
	 * keep heap costs out of the hot path unless someone else is
	 * using the pre-allocated buffer or the transfer is too large.
2143
	 */
2144
	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2145 2146
		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
				    GFP_KERNEL | GFP_DMA);
2147 2148 2149 2150 2151
		if (!local_buf)
			return -ENOMEM;
	} else {
		local_buf = buf;
	}
2152

2153
	spi_message_init(&message);
J
Jingoo Han 已提交
2154
	memset(x, 0, sizeof(x));
2155 2156 2157 2158 2159 2160 2161 2162
	if (n_tx) {
		x[0].len = n_tx;
		spi_message_add_tail(&x[0], &message);
	}
	if (n_rx) {
		x[1].len = n_rx;
		spi_message_add_tail(&x[1], &message);
	}
2163

2164
	memcpy(local_buf, txbuf, n_tx);
2165 2166
	x[0].tx_buf = local_buf;
	x[1].rx_buf = local_buf + n_tx;
2167 2168 2169

	/* do the i/o */
	status = spi_sync(spi, &message);
2170
	if (status == 0)
2171
		memcpy(rxbuf, x[1].rx_buf, n_rx);
2172

2173
	if (x[0].tx_buf == buf)
D
David Brownell 已提交
2174
		mutex_unlock(&lock);
2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
	else
		kfree(local_buf);

	return status;
}
EXPORT_SYMBOL_GPL(spi_write_then_read);

/*-------------------------------------------------------------------------*/

static int __init spi_init(void)
{
2186 2187
	int	status;

2188
	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2189 2190 2191 2192 2193 2194 2195 2196
	if (!buf) {
		status = -ENOMEM;
		goto err0;
	}

	status = bus_register(&spi_bus_type);
	if (status < 0)
		goto err1;
2197

2198 2199 2200
	status = class_register(&spi_master_class);
	if (status < 0)
		goto err2;
2201
	return 0;
2202 2203 2204 2205 2206 2207 2208 2209

err2:
	bus_unregister(&spi_bus_type);
err1:
	kfree(buf);
	buf = NULL;
err0:
	return status;
2210
}
2211

2212 2213
/* board_info is normally registered in arch_initcall(),
 * but even essential drivers wait till later
2214 2215 2216 2217
 *
 * REVISIT only boardinfo really needs static linking. the rest (device and
 * driver registration) _could_ be dynamically linked (modular) ... costs
 * include needing to have boardinfo data structures be much more public.
2218
 */
2219
postcore_initcall(spi_init);
2220