spi.c 60.9 KB
Newer Older
1
/*
G
Grant Likely 已提交
2
 * SPI init/core code
3 4
 *
 * Copyright (C) 2005 David Brownell
5
 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/kernel.h>
23
#include <linux/kmod.h>
24 25 26
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
27 28
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
29
#include <linux/mutex.h>
30
#include <linux/of_device.h>
31
#include <linux/of_irq.h>
32
#include <linux/clk/clk-conf.h>
33
#include <linux/slab.h>
34
#include <linux/mod_devicetable.h>
35
#include <linux/spi/spi.h>
36
#include <linux/of_gpio.h>
M
Mark Brown 已提交
37
#include <linux/pm_runtime.h>
38
#include <linux/pm_domain.h>
39
#include <linux/export.h>
40
#include <linux/sched/rt.h>
41 42
#include <linux/delay.h>
#include <linux/kthread.h>
43 44
#include <linux/ioport.h>
#include <linux/acpi.h>
45

46 47 48
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>

49 50
static void spidev_release(struct device *dev)
{
51
	struct spi_device	*spi = to_spi_device(dev);
52 53 54 55 56

	/* spi masters may cleanup for released devices */
	if (spi->master->cleanup)
		spi->master->cleanup(spi);

D
David Brownell 已提交
57
	spi_master_put(spi->master);
58
	kfree(spi);
59 60 61 62 63 64
}

static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
	const struct spi_device	*spi = to_spi_device(dev);
65 66 67 68 69
	int len;

	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
	if (len != -ENODEV)
		return len;
70

71
	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
72
}
73
static DEVICE_ATTR_RO(modalias);
74

75 76 77
static struct attribute *spi_dev_attrs[] = {
	&dev_attr_modalias.attr,
	NULL,
78
};
79
ATTRIBUTE_GROUPS(spi_dev);
80 81 82 83 84

/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 * and the sysfs version makes coldplug work too.
 */

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
						const struct spi_device *sdev)
{
	while (id->name[0]) {
		if (!strcmp(sdev->modalias, id->name))
			return id;
		id++;
	}
	return NULL;
}

const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);

	return spi_match_id(sdrv->id_table, sdev);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);

104 105 106
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
	const struct spi_device	*spi = to_spi_device(dev);
107 108
	const struct spi_driver	*sdrv = to_spi_driver(drv);

109 110 111 112
	/* Attempt an OF style match */
	if (of_driver_match_device(dev, drv))
		return 1;

113 114 115 116
	/* Then try ACPI */
	if (acpi_driver_match_device(dev, drv))
		return 1;

117 118
	if (sdrv->id_table)
		return !!spi_match_id(sdrv->id_table, spi);
119

120
	return strcmp(spi->modalias, drv->name) == 0;
121 122
}

123
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
124 125
{
	const struct spi_device		*spi = to_spi_device(dev);
126 127 128 129 130
	int rc;

	rc = acpi_device_uevent_modalias(dev, env);
	if (rc != -ENODEV)
		return rc;
131

132
	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
133 134 135
	return 0;
}

M
Mark Brown 已提交
136 137
#ifdef CONFIG_PM_SLEEP
static int spi_legacy_suspend(struct device *dev, pm_message_t message)
138
{
139
	int			value = 0;
140
	struct spi_driver	*drv = to_spi_driver(dev->driver);
141 142

	/* suspend will stop irqs and dma; no more i/o */
143 144 145 146 147 148
	if (drv) {
		if (drv->suspend)
			value = drv->suspend(to_spi_device(dev), message);
		else
			dev_dbg(dev, "... can't suspend\n");
	}
149 150 151
	return value;
}

M
Mark Brown 已提交
152
static int spi_legacy_resume(struct device *dev)
153
{
154
	int			value = 0;
155
	struct spi_driver	*drv = to_spi_driver(dev->driver);
156 157

	/* resume may restart the i/o queue */
158 159 160 161 162 163
	if (drv) {
		if (drv->resume)
			value = drv->resume(to_spi_device(dev));
		else
			dev_dbg(dev, "... can't resume\n");
	}
164 165 166
	return value;
}

M
Mark Brown 已提交
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
static int spi_pm_suspend(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_suspend(dev);
	else
		return spi_legacy_suspend(dev, PMSG_SUSPEND);
}

static int spi_pm_resume(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_resume(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_freeze(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_freeze(dev);
	else
		return spi_legacy_suspend(dev, PMSG_FREEZE);
}

static int spi_pm_thaw(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_thaw(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_poweroff(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_poweroff(dev);
	else
		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
}

static int spi_pm_restore(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_restore(dev);
	else
		return spi_legacy_resume(dev);
}
226
#else
M
Mark Brown 已提交
227 228 229 230 231 232
#define spi_pm_suspend	NULL
#define spi_pm_resume	NULL
#define spi_pm_freeze	NULL
#define spi_pm_thaw	NULL
#define spi_pm_poweroff	NULL
#define spi_pm_restore	NULL
233 234
#endif

M
Mark Brown 已提交
235 236 237 238 239 240 241 242 243 244
static const struct dev_pm_ops spi_pm = {
	.suspend = spi_pm_suspend,
	.resume = spi_pm_resume,
	.freeze = spi_pm_freeze,
	.thaw = spi_pm_thaw,
	.poweroff = spi_pm_poweroff,
	.restore = spi_pm_restore,
	SET_RUNTIME_PM_OPS(
		pm_generic_runtime_suspend,
		pm_generic_runtime_resume,
245
		NULL
M
Mark Brown 已提交
246 247 248
	)
};

249 250
struct bus_type spi_bus_type = {
	.name		= "spi",
251
	.dev_groups	= spi_dev_groups,
252 253
	.match		= spi_match_device,
	.uevent		= spi_uevent,
M
Mark Brown 已提交
254
	.pm		= &spi_pm,
255 256 257
};
EXPORT_SYMBOL_GPL(spi_bus_type);

258 259 260 261

static int spi_drv_probe(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
262 263
	int ret;

264 265 266 267
	ret = of_clk_set_defaults(dev->of_node, false);
	if (ret)
		return ret;

268 269 270 271 272 273
	ret = dev_pm_domain_attach(dev, true);
	if (ret != -EPROBE_DEFER) {
		ret = sdrv->probe(to_spi_device(dev));
		if (ret)
			dev_pm_domain_detach(dev, true);
	}
274

275
	return ret;
276 277 278 279 280
}

static int spi_drv_remove(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
281 282
	int ret;

283
	ret = sdrv->remove(to_spi_device(dev));
284
	dev_pm_domain_detach(dev, true);
285

286
	return ret;
287 288 289 290 291 292 293 294 295
}

static void spi_drv_shutdown(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);

	sdrv->shutdown(to_spi_device(dev));
}

D
David Brownell 已提交
296 297 298 299 300
/**
 * spi_register_driver - register a SPI driver
 * @sdrv: the driver to register
 * Context: can sleep
 */
301 302 303 304 305 306 307 308 309 310 311 312 313
int spi_register_driver(struct spi_driver *sdrv)
{
	sdrv->driver.bus = &spi_bus_type;
	if (sdrv->probe)
		sdrv->driver.probe = spi_drv_probe;
	if (sdrv->remove)
		sdrv->driver.remove = spi_drv_remove;
	if (sdrv->shutdown)
		sdrv->driver.shutdown = spi_drv_shutdown;
	return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(spi_register_driver);

314 315 316 317 318 319 320 321 322 323
/*-------------------------------------------------------------------------*/

/* SPI devices should normally not be created by SPI device drivers; that
 * would make them board-specific.  Similarly with SPI master drivers.
 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 * with other readonly (flashable) information about mainboard devices.
 */

struct boardinfo {
	struct list_head	list;
324
	struct spi_board_info	board_info;
325 326 327
};

static LIST_HEAD(board_list);
328 329 330 331 332 333
static LIST_HEAD(spi_master_list);

/*
 * Used to protect add/del opertion for board_info list and
 * spi_master list, and their matching process
 */
334
static DEFINE_MUTEX(board_lock);
335

336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/**
 * spi_alloc_device - Allocate a new SPI device
 * @master: Controller to which device is connected
 * Context: can sleep
 *
 * Allows a driver to allocate and initialize a spi_device without
 * registering it immediately.  This allows a driver to directly
 * fill the spi_device with device parameters before calling
 * spi_add_device() on it.
 *
 * Caller is responsible to call spi_add_device() on the returned
 * spi_device structure to add it to the SPI master.  If the caller
 * needs to discard the spi_device without adding it, then it should
 * call spi_dev_put() on it.
 *
 * Returns a pointer to the new device, or NULL.
 */
struct spi_device *spi_alloc_device(struct spi_master *master)
{
	struct spi_device	*spi;

	if (!spi_master_get(master))
		return NULL;

J
Jingoo Han 已提交
360
	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
361 362 363 364 365 366
	if (!spi) {
		spi_master_put(master);
		return NULL;
	}

	spi->master = master;
367
	spi->dev.parent = &master->dev;
368 369
	spi->dev.bus = &spi_bus_type;
	spi->dev.release = spidev_release;
370
	spi->cs_gpio = -ENOENT;
371 372 373 374 375
	device_initialize(&spi->dev);
	return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);

376 377 378 379 380 381 382 383 384 385 386 387 388
static void spi_dev_set_name(struct spi_device *spi)
{
	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);

	if (adev) {
		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
		return;
	}

	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
		     spi->chip_select);
}

389 390 391 392 393 394 395 396 397 398 399
static int spi_dev_check(struct device *dev, void *data)
{
	struct spi_device *spi = to_spi_device(dev);
	struct spi_device *new_spi = data;

	if (spi->master == new_spi->master &&
	    spi->chip_select == new_spi->chip_select)
		return -EBUSY;
	return 0;
}

400 401 402 403 404 405 406
/**
 * spi_add_device - Add spi_device allocated with spi_alloc_device
 * @spi: spi_device to register
 *
 * Companion function to spi_alloc_device.  Devices allocated with
 * spi_alloc_device can be added onto the spi bus with this function.
 *
407
 * Returns 0 on success; negative errno on failure
408 409 410
 */
int spi_add_device(struct spi_device *spi)
{
411
	static DEFINE_MUTEX(spi_add_lock);
412 413
	struct spi_master *master = spi->master;
	struct device *dev = master->dev.parent;
414 415 416
	int status;

	/* Chipselects are numbered 0..max; validate. */
417
	if (spi->chip_select >= master->num_chipselect) {
418 419
		dev_err(dev, "cs%d >= max %d\n",
			spi->chip_select,
420
			master->num_chipselect);
421 422 423 424
		return -EINVAL;
	}

	/* Set the bus ID string */
425
	spi_dev_set_name(spi);
426 427 428 429 430 431 432

	/* We need to make sure there's no other device with this
	 * chipselect **BEFORE** we call setup(), else we'll trash
	 * its configuration.  Lock against concurrent add() calls.
	 */
	mutex_lock(&spi_add_lock);

433 434
	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
	if (status) {
435 436 437 438 439
		dev_err(dev, "chipselect %d already in use\n",
				spi->chip_select);
		goto done;
	}

440 441 442
	if (master->cs_gpios)
		spi->cs_gpio = master->cs_gpios[spi->chip_select];

443 444 445 446
	/* Drivers may modify this initial i/o setup, but will
	 * normally rely on the device being setup.  Devices
	 * using SPI_CS_HIGH can't coexist well otherwise...
	 */
447
	status = spi_setup(spi);
448
	if (status < 0) {
449 450
		dev_err(dev, "can't setup %s, status %d\n",
				dev_name(&spi->dev), status);
451
		goto done;
452 453
	}

454
	/* Device may be bound to an active driver when this returns */
455
	status = device_add(&spi->dev);
456
	if (status < 0)
457 458
		dev_err(dev, "can't add %s, status %d\n",
				dev_name(&spi->dev), status);
459
	else
460
		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
461

462 463 464
done:
	mutex_unlock(&spi_add_lock);
	return status;
465 466
}
EXPORT_SYMBOL_GPL(spi_add_device);
467

D
David Brownell 已提交
468 469 470 471 472 473 474
/**
 * spi_new_device - instantiate one new SPI device
 * @master: Controller to which device is connected
 * @chip: Describes the SPI device
 * Context: can sleep
 *
 * On typical mainboards, this is purely internal; and it's not needed
475 476 477 478
 * after board init creates the hard-wired devices.  Some development
 * platforms may not be able to use spi_register_board_info though, and
 * this is exported so that for example a USB or parport based adapter
 * driver could add devices (which it would learn about out-of-band).
479 480
 *
 * Returns the new device, or NULL.
481
 */
482 483
struct spi_device *spi_new_device(struct spi_master *master,
				  struct spi_board_info *chip)
484 485 486 487
{
	struct spi_device	*proxy;
	int			status;

488 489 490 491 492 493 494
	/* NOTE:  caller did any chip->bus_num checks necessary.
	 *
	 * Also, unless we change the return value convention to use
	 * error-or-pointer (not NULL-or-pointer), troubleshootability
	 * suggests syslogged diagnostics are best here (ugh).
	 */

495 496
	proxy = spi_alloc_device(master);
	if (!proxy)
497 498
		return NULL;

499 500
	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));

501 502
	proxy->chip_select = chip->chip_select;
	proxy->max_speed_hz = chip->max_speed_hz;
503
	proxy->mode = chip->mode;
504
	proxy->irq = chip->irq;
505
	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
506 507 508 509
	proxy->dev.platform_data = (void *) chip->platform_data;
	proxy->controller_data = chip->controller_data;
	proxy->controller_state = NULL;

510
	status = spi_add_device(proxy);
511
	if (status < 0) {
512 513
		spi_dev_put(proxy);
		return NULL;
514 515 516 517 518 519
	}

	return proxy;
}
EXPORT_SYMBOL_GPL(spi_new_device);

520 521 522 523 524 525 526 527 528 529 530 531 532 533
static void spi_match_master_to_boardinfo(struct spi_master *master,
				struct spi_board_info *bi)
{
	struct spi_device *dev;

	if (master->bus_num != bi->bus_num)
		return;

	dev = spi_new_device(master, bi);
	if (!dev)
		dev_err(master->dev.parent, "can't create new device for %s\n",
			bi->modalias);
}

D
David Brownell 已提交
534 535 536 537 538 539
/**
 * spi_register_board_info - register SPI devices for a given board
 * @info: array of chip descriptors
 * @n: how many descriptors are provided
 * Context: can sleep
 *
540 541 542 543 544 545 546 547 548 549 550 551 552
 * Board-specific early init code calls this (probably during arch_initcall)
 * with segments of the SPI device table.  Any device nodes are created later,
 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 * this table of devices forever, so that reloading a controller driver will
 * not make Linux forget about these hard-wired devices.
 *
 * Other code can also call this, e.g. a particular add-on board might provide
 * SPI devices through its expansion connector, so code initializing that board
 * would naturally declare its SPI devices.
 *
 * The board info passed can safely be __initdata ... but be careful of
 * any embedded pointers (platform_data, etc), they're copied as-is.
 */
553
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
554
{
555 556
	struct boardinfo *bi;
	int i;
557

558 559 560
	if (!n)
		return -EINVAL;

561
	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
562 563 564
	if (!bi)
		return -ENOMEM;

565 566
	for (i = 0; i < n; i++, bi++, info++) {
		struct spi_master *master;
567

568 569 570 571 572 573
		memcpy(&bi->board_info, info, sizeof(*info));
		mutex_lock(&board_lock);
		list_add_tail(&bi->list, &board_list);
		list_for_each_entry(master, &spi_master_list, list)
			spi_match_master_to_boardinfo(master, &bi->board_info);
		mutex_unlock(&board_lock);
574
	}
575 576

	return 0;
577 578 579 580
}

/*-------------------------------------------------------------------------*/

581 582 583 584 585 586 587 588 589 590 591
static void spi_set_cs(struct spi_device *spi, bool enable)
{
	if (spi->mode & SPI_CS_HIGH)
		enable = !enable;

	if (spi->cs_gpio >= 0)
		gpio_set_value(spi->cs_gpio, !enable);
	else if (spi->master->set_cs)
		spi->master->set_cs(spi, !enable);
}

592
#ifdef CONFIG_HAS_DMA
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
static int spi_map_buf(struct spi_master *master, struct device *dev,
		       struct sg_table *sgt, void *buf, size_t len,
		       enum dma_data_direction dir)
{
	const bool vmalloced_buf = is_vmalloc_addr(buf);
	const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
	const int sgs = DIV_ROUND_UP(len, desc_len);
	struct page *vm_page;
	void *sg_buf;
	size_t min;
	int i, ret;

	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
	if (ret != 0)
		return ret;

	for (i = 0; i < sgs; i++) {
		min = min_t(size_t, len, desc_len);

		if (vmalloced_buf) {
			vm_page = vmalloc_to_page(buf);
			if (!vm_page) {
				sg_free_table(sgt);
				return -ENOMEM;
			}
			sg_buf = page_address(vm_page) +
				((size_t)buf & ~PAGE_MASK);
		} else {
			sg_buf = buf;
		}

		sg_set_buf(&sgt->sgl[i], sg_buf, min);

		buf += min;
		len -= min;
	}

	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
631 632
	if (!ret)
		ret = -ENOMEM;
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	if (ret < 0) {
		sg_free_table(sgt);
		return ret;
	}

	sgt->nents = ret;

	return 0;
}

static void spi_unmap_buf(struct spi_master *master, struct device *dev,
			  struct sg_table *sgt, enum dma_data_direction dir)
{
	if (sgt->orig_nents) {
		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
		sg_free_table(sgt);
	}
}

652
static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
653 654 655
{
	struct device *tx_dev, *rx_dev;
	struct spi_transfer *xfer;
656
	int ret;
657

658
	if (!master->can_dma)
659 660
		return 0;

661 662
	tx_dev = master->dma_tx->device->dev;
	rx_dev = master->dma_rx->device->dev;
663 664 665 666 667 668

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		if (!master->can_dma(master, msg->spi, xfer))
			continue;

		if (xfer->tx_buf != NULL) {
669 670 671 672 673
			ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
					  (void *)xfer->tx_buf, xfer->len,
					  DMA_TO_DEVICE);
			if (ret != 0)
				return ret;
674 675 676
		}

		if (xfer->rx_buf != NULL) {
677 678 679 680 681 682 683
			ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
					  xfer->rx_buf, xfer->len,
					  DMA_FROM_DEVICE);
			if (ret != 0) {
				spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
					      DMA_TO_DEVICE);
				return ret;
684 685 686 687 688 689 690 691 692 693 694 695 696 697
			}
		}
	}

	master->cur_msg_mapped = true;

	return 0;
}

static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
{
	struct spi_transfer *xfer;
	struct device *tx_dev, *rx_dev;

698
	if (!master->cur_msg_mapped || !master->can_dma)
699 700
		return 0;

701 702
	tx_dev = master->dma_tx->device->dev;
	rx_dev = master->dma_rx->device->dev;
703 704 705 706 707

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		if (!master->can_dma(master, msg->spi, xfer))
			continue;

708 709
		spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
		spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
710 711 712 713
	}

	return 0;
}
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_master *master,
				struct spi_message *msg)
{
	return 0;
}

static inline int spi_unmap_msg(struct spi_master *master,
				struct spi_message *msg)
{
	return 0;
}
#endif /* !CONFIG_HAS_DMA */

static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
{
	struct spi_transfer *xfer;
	void *tmp;
	unsigned int max_tx, max_rx;

	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
		max_tx = 0;
		max_rx = 0;

		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
			if ((master->flags & SPI_MASTER_MUST_TX) &&
			    !xfer->tx_buf)
				max_tx = max(xfer->len, max_tx);
			if ((master->flags & SPI_MASTER_MUST_RX) &&
			    !xfer->rx_buf)
				max_rx = max(xfer->len, max_rx);
		}

		if (max_tx) {
			tmp = krealloc(master->dummy_tx, max_tx,
				       GFP_KERNEL | GFP_DMA);
			if (!tmp)
				return -ENOMEM;
			master->dummy_tx = tmp;
			memset(tmp, 0, max_tx);
		}

		if (max_rx) {
			tmp = krealloc(master->dummy_rx, max_rx,
				       GFP_KERNEL | GFP_DMA);
			if (!tmp)
				return -ENOMEM;
			master->dummy_rx = tmp;
		}

		if (max_tx || max_rx) {
			list_for_each_entry(xfer, &msg->transfers,
					    transfer_list) {
				if (!xfer->tx_buf)
					xfer->tx_buf = master->dummy_tx;
				if (!xfer->rx_buf)
					xfer->rx_buf = master->dummy_rx;
			}
		}
	}

	return __spi_map_msg(master, msg);
}
777

778 779 780 781 782 783 784 785 786 787 788 789 790
/*
 * spi_transfer_one_message - Default implementation of transfer_one_message()
 *
 * This is a standard implementation of transfer_one_message() for
 * drivers which impelment a transfer_one() operation.  It provides
 * standard handling of delays and chip select management.
 */
static int spi_transfer_one_message(struct spi_master *master,
				    struct spi_message *msg)
{
	struct spi_transfer *xfer;
	bool keep_cs = false;
	int ret = 0;
791
	int ms = 1;
792 793 794 795 796 797

	spi_set_cs(msg->spi, true);

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		trace_spi_transfer_start(msg, xfer);

798 799
		if (xfer->tx_buf || xfer->rx_buf) {
			reinit_completion(&master->xfer_completion);
800

801 802 803 804 805 806
			ret = master->transfer_one(master, msg->spi, xfer);
			if (ret < 0) {
				dev_err(&msg->spi->dev,
					"SPI transfer failed: %d\n", ret);
				goto out;
			}
807

808 809 810 811
			if (ret > 0) {
				ret = 0;
				ms = xfer->len * 8 * 1000 / xfer->speed_hz;
				ms += ms + 100; /* some tolerance */
812

813 814 815
				ms = wait_for_completion_timeout(&master->xfer_completion,
								 msecs_to_jiffies(ms));
			}
816

817 818 819 820 821 822 823 824 825 826
			if (ms == 0) {
				dev_err(&msg->spi->dev,
					"SPI transfer timed out\n");
				msg->status = -ETIMEDOUT;
			}
		} else {
			if (xfer->len)
				dev_err(&msg->spi->dev,
					"Bufferless transfer has length %u\n",
					xfer->len);
827
		}
828 829 830 831 832 833 834 835 836 837 838 839 840 841

		trace_spi_transfer_stop(msg, xfer);

		if (msg->status != -EINPROGRESS)
			goto out;

		if (xfer->delay_usecs)
			udelay(xfer->delay_usecs);

		if (xfer->cs_change) {
			if (list_is_last(&xfer->transfer_list,
					 &msg->transfers)) {
				keep_cs = true;
			} else {
842 843 844
				spi_set_cs(msg->spi, false);
				udelay(10);
				spi_set_cs(msg->spi, true);
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
			}
		}

		msg->actual_length += xfer->len;
	}

out:
	if (ret != 0 || !keep_cs)
		spi_set_cs(msg->spi, false);

	if (msg->status == -EINPROGRESS)
		msg->status = ret;

	spi_finalize_current_message(master);

	return ret;
}

/**
 * spi_finalize_current_transfer - report completion of a transfer
T
Thierry Reding 已提交
865
 * @master: the master reporting completion
866 867 868
 *
 * Called by SPI drivers using the core transfer_one_message()
 * implementation to notify it that the current interrupt driven
869
 * transfer has finished and the next one may be scheduled.
870 871 872 873 874 875 876
 */
void spi_finalize_current_transfer(struct spi_master *master)
{
	complete(&master->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
/**
 * spi_pump_messages - kthread work function which processes spi message queue
 * @work: pointer to kthread work struct contained in the master struct
 *
 * This function checks if there is any spi message in the queue that
 * needs processing and if so call out to the driver to initialize hardware
 * and transfer each message.
 *
 */
static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);
	unsigned long flags;
	bool was_busy = false;
	int ret;

	/* Lock queue and check for queue work */
	spin_lock_irqsave(&master->queue_lock, flags);
	if (list_empty(&master->queue) || !master->running) {
897 898 899
		if (!master->busy) {
			spin_unlock_irqrestore(&master->queue_lock, flags);
			return;
900 901 902
		}
		master->busy = false;
		spin_unlock_irqrestore(&master->queue_lock, flags);
903 904 905 906
		kfree(master->dummy_rx);
		master->dummy_rx = NULL;
		kfree(master->dummy_tx);
		master->dummy_tx = NULL;
907 908 909 910
		if (master->unprepare_transfer_hardware &&
		    master->unprepare_transfer_hardware(master))
			dev_err(&master->dev,
				"failed to unprepare transfer hardware\n");
911 912 913 914
		if (master->auto_runtime_pm) {
			pm_runtime_mark_last_busy(master->dev.parent);
			pm_runtime_put_autosuspend(master->dev.parent);
		}
915
		trace_spi_master_idle(master);
916 917 918 919 920 921 922 923 924 925
		return;
	}

	/* Make sure we are not already running a message */
	if (master->cur_msg) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}
	/* Extract head of queue */
	master->cur_msg =
926
		list_first_entry(&master->queue, struct spi_message, queue);
927 928 929 930 931 932 933 934

	list_del_init(&master->cur_msg->queue);
	if (master->busy)
		was_busy = true;
	else
		master->busy = true;
	spin_unlock_irqrestore(&master->queue_lock, flags);

935 936 937 938 939 940 941 942 943
	if (!was_busy && master->auto_runtime_pm) {
		ret = pm_runtime_get_sync(master->dev.parent);
		if (ret < 0) {
			dev_err(&master->dev, "Failed to power device: %d\n",
				ret);
			return;
		}
	}

944 945 946
	if (!was_busy)
		trace_spi_master_busy(master);

947
	if (!was_busy && master->prepare_transfer_hardware) {
948 949 950 951
		ret = master->prepare_transfer_hardware(master);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare transfer hardware\n");
952 953 954

			if (master->auto_runtime_pm)
				pm_runtime_put(master->dev.parent);
955 956 957 958
			return;
		}
	}

959 960
	trace_spi_message_start(master->cur_msg);

961 962 963 964 965 966 967 968 969 970 971 972
	if (master->prepare_message) {
		ret = master->prepare_message(master, master->cur_msg);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare message: %d\n", ret);
			master->cur_msg->status = ret;
			spi_finalize_current_message(master);
			return;
		}
		master->cur_msg_prepared = true;
	}

973 974 975 976 977 978 979
	ret = spi_map_msg(master, master->cur_msg);
	if (ret) {
		master->cur_msg->status = ret;
		spi_finalize_current_message(master);
		return;
	}

980 981 982
	ret = master->transfer_one_message(master, master->cur_msg);
	if (ret) {
		dev_err(&master->dev,
983
			"failed to transfer one message from queue\n");
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
		return;
	}
}

static int spi_init_queue(struct spi_master *master)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	INIT_LIST_HEAD(&master->queue);
	spin_lock_init(&master->queue_lock);

	master->running = false;
	master->busy = false;

	init_kthread_worker(&master->kworker);
	master->kworker_task = kthread_run(kthread_worker_fn,
1000
					   &master->kworker, "%s",
1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
					   dev_name(&master->dev));
	if (IS_ERR(master->kworker_task)) {
		dev_err(&master->dev, "failed to create message pump task\n");
		return -ENOMEM;
	}
	init_kthread_work(&master->pump_messages, spi_pump_messages);

	/*
	 * Master config will indicate if this controller should run the
	 * message pump with high (realtime) priority to reduce the transfer
	 * latency on the bus by minimising the delay between a transfer
	 * request and the scheduling of the message pump thread. Without this
	 * setting the message pump thread will remain at default priority.
	 */
	if (master->rt) {
		dev_info(&master->dev,
			"will run message pump with realtime priority\n");
		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
	}

	return 0;
}

/**
 * spi_get_next_queued_message() - called by driver to check for queued
 * messages
 * @master: the master to check for queued messages
 *
 * If there are more messages in the queue, the next message is returned from
 * this call.
 */
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
	struct spi_message *next;
	unsigned long flags;

	/* get a pointer to the next message, if any */
	spin_lock_irqsave(&master->queue_lock, flags);
1039 1040
	next = list_first_entry_or_null(&master->queue, struct spi_message,
					queue);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	spin_unlock_irqrestore(&master->queue_lock, flags);

	return next;
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);

/**
 * spi_finalize_current_message() - the current message is complete
 * @master: the master to return the message to
 *
 * Called by the driver to notify the core that the message in the front of the
 * queue is complete and can be removed from the queue.
 */
void spi_finalize_current_message(struct spi_master *master)
{
	struct spi_message *mesg;
	unsigned long flags;
1058
	int ret;
1059 1060 1061 1062 1063 1064 1065 1066

	spin_lock_irqsave(&master->queue_lock, flags);
	mesg = master->cur_msg;
	master->cur_msg = NULL;

	queue_kthread_work(&master->kworker, &master->pump_messages);
	spin_unlock_irqrestore(&master->queue_lock, flags);

1067 1068
	spi_unmap_msg(master, mesg);

1069 1070 1071 1072 1073 1074 1075 1076 1077
	if (master->cur_msg_prepared && master->unprepare_message) {
		ret = master->unprepare_message(master, mesg);
		if (ret) {
			dev_err(&master->dev,
				"failed to unprepare message: %d\n", ret);
		}
	}
	master->cur_msg_prepared = false;

1078 1079 1080
	mesg->state = NULL;
	if (mesg->complete)
		mesg->complete(mesg->context);
1081 1082

	trace_spi_message_done(mesg);
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);

static int spi_start_queue(struct spi_master *master)
{
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (master->running || master->busy) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -EBUSY;
	}

	master->running = true;
	master->cur_msg = NULL;
	spin_unlock_irqrestore(&master->queue_lock, flags);

	queue_kthread_work(&master->kworker, &master->pump_messages);

	return 0;
}

static int spi_stop_queue(struct spi_master *master)
{
	unsigned long flags;
	unsigned limit = 500;
	int ret = 0;

	spin_lock_irqsave(&master->queue_lock, flags);

	/*
	 * This is a bit lame, but is optimized for the common execution path.
	 * A wait_queue on the master->busy could be used, but then the common
	 * execution path (pump_messages) would be required to call wake_up or
	 * friends on every SPI message. Do this instead.
	 */
	while ((!list_empty(&master->queue) || master->busy) && limit--) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
1122
		usleep_range(10000, 11000);
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
		spin_lock_irqsave(&master->queue_lock, flags);
	}

	if (!list_empty(&master->queue) || master->busy)
		ret = -EBUSY;
	else
		master->running = false;

	spin_unlock_irqrestore(&master->queue_lock, flags);

	if (ret) {
		dev_warn(&master->dev,
			 "could not stop message queue\n");
		return ret;
	}
	return ret;
}

static int spi_destroy_queue(struct spi_master *master)
{
	int ret;

	ret = spi_stop_queue(master);

	/*
	 * flush_kthread_worker will block until all work is done.
	 * If the reason that stop_queue timed out is that the work will never
	 * finish, then it does no good to call flush/stop thread, so
	 * return anyway.
	 */
	if (ret) {
		dev_err(&master->dev, "problem destroying queue\n");
		return ret;
	}

	flush_kthread_worker(&master->kworker);
	kthread_stop(master->kworker_task);

	return 0;
}

/**
 * spi_queued_transfer - transfer function for queued transfers
 * @spi: spi device which is requesting transfer
 * @msg: spi message which is to handled is queued to driver queue
 */
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
	struct spi_master *master = spi->master;
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (!master->running) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -ESHUTDOWN;
	}
	msg->actual_length = 0;
	msg->status = -EINPROGRESS;

	list_add_tail(&msg->queue, &master->queue);
1184
	if (!master->busy)
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195
		queue_kthread_work(&master->kworker, &master->pump_messages);

	spin_unlock_irqrestore(&master->queue_lock, flags);
	return 0;
}

static int spi_master_initialize_queue(struct spi_master *master)
{
	int ret;

	master->transfer = spi_queued_transfer;
1196 1197
	if (!master->transfer_one_message)
		master->transfer_one_message = spi_transfer_one_message;
1198 1199 1200 1201 1202 1203 1204

	/* Initialize and start queue */
	ret = spi_init_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem initializing queue\n");
		goto err_init_queue;
	}
1205
	master->queued = true;
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	ret = spi_start_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem starting queue\n");
		goto err_start_queue;
	}

	return 0;

err_start_queue:
	spi_destroy_queue(master);
1216
err_init_queue:
1217 1218 1219 1220 1221
	return ret;
}

/*-------------------------------------------------------------------------*/

1222
#if defined(CONFIG_OF)
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
/**
 * of_register_spi_devices() - Register child devices onto the SPI bus
 * @master:	Pointer to spi_master device
 *
 * Registers an spi_device for each child node of master node which has a 'reg'
 * property.
 */
static void of_register_spi_devices(struct spi_master *master)
{
	struct spi_device *spi;
	struct device_node *nc;
	int rc;
T
Trent Piepho 已提交
1235
	u32 value;
1236 1237 1238 1239

	if (!master->dev.of_node)
		return;

1240
	for_each_available_child_of_node(master->dev.of_node, nc) {
1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
		/* Alloc an spi_device */
		spi = spi_alloc_device(master);
		if (!spi) {
			dev_err(&master->dev, "spi_device alloc error for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Select device driver */
		if (of_modalias_node(nc, spi->modalias,
				     sizeof(spi->modalias)) < 0) {
			dev_err(&master->dev, "cannot find modalias for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Device address */
T
Trent Piepho 已提交
1260 1261 1262 1263
		rc = of_property_read_u32(nc, "reg", &value);
		if (rc) {
			dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
				nc->full_name, rc);
1264 1265 1266
			spi_dev_put(spi);
			continue;
		}
T
Trent Piepho 已提交
1267
		spi->chip_select = value;
1268 1269 1270 1271 1272 1273 1274 1275

		/* Mode (clock phase/polarity/etc.) */
		if (of_find_property(nc, "spi-cpha", NULL))
			spi->mode |= SPI_CPHA;
		if (of_find_property(nc, "spi-cpol", NULL))
			spi->mode |= SPI_CPOL;
		if (of_find_property(nc, "spi-cs-high", NULL))
			spi->mode |= SPI_CS_HIGH;
1276 1277
		if (of_find_property(nc, "spi-3wire", NULL))
			spi->mode |= SPI_3WIRE;
1278 1279
		if (of_find_property(nc, "spi-lsb-first", NULL))
			spi->mode |= SPI_LSB_FIRST;
1280

W
wangyuhang 已提交
1281
		/* Device DUAL/QUAD mode */
T
Trent Piepho 已提交
1282 1283 1284
		if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
			switch (value) {
			case 1:
1285
				break;
T
Trent Piepho 已提交
1286
			case 2:
1287 1288
				spi->mode |= SPI_TX_DUAL;
				break;
T
Trent Piepho 已提交
1289
			case 4:
1290 1291 1292
				spi->mode |= SPI_TX_QUAD;
				break;
			default:
1293 1294 1295 1296
				dev_warn(&master->dev,
					 "spi-tx-bus-width %d not supported\n",
					 value);
				break;
1297
			}
W
wangyuhang 已提交
1298 1299
		}

T
Trent Piepho 已提交
1300 1301 1302
		if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
			switch (value) {
			case 1:
1303
				break;
T
Trent Piepho 已提交
1304
			case 2:
1305 1306
				spi->mode |= SPI_RX_DUAL;
				break;
T
Trent Piepho 已提交
1307
			case 4:
1308 1309 1310
				spi->mode |= SPI_RX_QUAD;
				break;
			default:
1311 1312 1313 1314
				dev_warn(&master->dev,
					 "spi-rx-bus-width %d not supported\n",
					 value);
				break;
1315
			}
W
wangyuhang 已提交
1316 1317
		}

1318
		/* Device speed */
T
Trent Piepho 已提交
1319 1320 1321 1322
		rc = of_property_read_u32(nc, "spi-max-frequency", &value);
		if (rc) {
			dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
				nc->full_name, rc);
1323 1324 1325
			spi_dev_put(spi);
			continue;
		}
T
Trent Piepho 已提交
1326
		spi->max_speed_hz = value;
1327 1328 1329 1330 1331 1332 1333 1334 1335

		/* IRQ */
		spi->irq = irq_of_parse_and_map(nc, 0);

		/* Store a pointer to the node in the device structure */
		of_node_get(nc);
		spi->dev.of_node = nc;

		/* Register the new device */
1336
		request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
		rc = spi_add_device(spi);
		if (rc) {
			dev_err(&master->dev, "spi_device register error %s\n",
				nc->full_name);
			spi_dev_put(spi);
		}

	}
}
#else
static void of_register_spi_devices(struct spi_master *master) { }
#endif

1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
#ifdef CONFIG_ACPI
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
	struct spi_device *spi = data;

	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
		struct acpi_resource_spi_serialbus *sb;

		sb = &ares->data.spi_serial_bus;
		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
			spi->chip_select = sb->device_selection;
			spi->max_speed_hz = sb->connection_speed;

			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
				spi->mode |= SPI_CPHA;
			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
				spi->mode |= SPI_CPOL;
			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
				spi->mode |= SPI_CS_HIGH;
		}
	} else if (spi->irq < 0) {
		struct resource r;

		if (acpi_dev_resource_interrupt(ares, 0, &r))
			spi->irq = r.start;
	}

	/* Always tell the ACPI core to skip this resource */
	return 1;
}

static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
				       void *data, void **return_value)
{
	struct spi_master *master = data;
	struct list_head resource_list;
	struct acpi_device *adev;
	struct spi_device *spi;
	int ret;

	if (acpi_bus_get_device(handle, &adev))
		return AE_OK;
	if (acpi_bus_get_status(adev) || !adev->status.present)
		return AE_OK;

	spi = spi_alloc_device(master);
	if (!spi) {
		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
			dev_name(&adev->dev));
		return AE_NO_MEMORY;
	}

1402
	ACPI_COMPANION_SET(&spi->dev, adev);
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
	spi->irq = -1;

	INIT_LIST_HEAD(&resource_list);
	ret = acpi_dev_get_resources(adev, &resource_list,
				     acpi_spi_add_resource, spi);
	acpi_dev_free_resource_list(&resource_list);

	if (ret < 0 || !spi->max_speed_hz) {
		spi_dev_put(spi);
		return AE_OK;
	}

1415
	adev->power.flags.ignore_parent = true;
1416
	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1417
	if (spi_add_device(spi)) {
1418
		adev->power.flags.ignore_parent = false;
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
			dev_name(&adev->dev));
		spi_dev_put(spi);
	}

	return AE_OK;
}

static void acpi_register_spi_devices(struct spi_master *master)
{
	acpi_status status;
	acpi_handle handle;

1432
	handle = ACPI_HANDLE(master->dev.parent);
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
	if (!handle)
		return;

	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
				     acpi_spi_add_device, NULL,
				     master, NULL);
	if (ACPI_FAILURE(status))
		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_master *master) {}
#endif /* CONFIG_ACPI */

T
Tony Jones 已提交
1446
static void spi_master_release(struct device *dev)
1447 1448 1449
{
	struct spi_master *master;

T
Tony Jones 已提交
1450
	master = container_of(dev, struct spi_master, dev);
1451 1452 1453 1454 1455 1456
	kfree(master);
}

static struct class spi_master_class = {
	.name		= "spi_master",
	.owner		= THIS_MODULE,
T
Tony Jones 已提交
1457
	.dev_release	= spi_master_release,
1458 1459 1460
};


1461

1462 1463 1464
/**
 * spi_alloc_master - allocate SPI master controller
 * @dev: the controller, possibly using the platform_bus
D
David Brownell 已提交
1465
 * @size: how much zeroed driver-private data to allocate; the pointer to this
T
Tony Jones 已提交
1466
 *	memory is in the driver_data field of the returned device,
D
David Brownell 已提交
1467
 *	accessible with spi_master_get_devdata().
D
David Brownell 已提交
1468
 * Context: can sleep
1469 1470 1471
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.  It's how they allocate
D
dmitry pervushin 已提交
1472
 * an spi_master structure, prior to calling spi_register_master().
1473 1474 1475 1476 1477
 *
 * This must be called from context that can sleep.  It returns the SPI
 * master structure on success, else NULL.
 *
 * The caller is responsible for assigning the bus number and initializing
D
dmitry pervushin 已提交
1478
 * the master's methods before calling spi_register_master(); and (after errors
1479 1480
 * adding the device) calling spi_master_put() and kfree() to prevent a memory
 * leak.
1481
 */
1482
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1483 1484 1485
{
	struct spi_master	*master;

D
David Brownell 已提交
1486 1487 1488
	if (!dev)
		return NULL;

J
Jingoo Han 已提交
1489
	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1490 1491 1492
	if (!master)
		return NULL;

T
Tony Jones 已提交
1493
	device_initialize(&master->dev);
1494 1495
	master->bus_num = -1;
	master->num_chipselect = 1;
T
Tony Jones 已提交
1496 1497
	master->dev.class = &spi_master_class;
	master->dev.parent = get_device(dev);
D
David Brownell 已提交
1498
	spi_master_set_devdata(master, &master[1]);
1499 1500 1501 1502 1503

	return master;
}
EXPORT_SYMBOL_GPL(spi_alloc_master);

1504 1505 1506
#ifdef CONFIG_OF
static int of_spi_register_master(struct spi_master *master)
{
1507
	int nb, i, *cs;
1508 1509 1510 1511 1512 1513
	struct device_node *np = master->dev.of_node;

	if (!np)
		return 0;

	nb = of_gpio_named_count(np, "cs-gpios");
J
Jingoo Han 已提交
1514
	master->num_chipselect = max_t(int, nb, master->num_chipselect);
1515

1516 1517
	/* Return error only for an incorrectly formed cs-gpios property */
	if (nb == 0 || nb == -ENOENT)
1518
		return 0;
1519 1520
	else if (nb < 0)
		return nb;
1521 1522 1523 1524 1525 1526 1527 1528 1529

	cs = devm_kzalloc(&master->dev,
			  sizeof(int) * master->num_chipselect,
			  GFP_KERNEL);
	master->cs_gpios = cs;

	if (!master->cs_gpios)
		return -ENOMEM;

1530
	for (i = 0; i < master->num_chipselect; i++)
1531
		cs[i] = -ENOENT;
1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544

	for (i = 0; i < nb; i++)
		cs[i] = of_get_named_gpio(np, "cs-gpios", i);

	return 0;
}
#else
static int of_spi_register_master(struct spi_master *master)
{
	return 0;
}
#endif

1545 1546 1547
/**
 * spi_register_master - register SPI master controller
 * @master: initialized master, originally from spi_alloc_master()
D
David Brownell 已提交
1548
 * Context: can sleep
1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
 *
 * SPI master controllers connect to their drivers using some non-SPI bus,
 * such as the platform bus.  The final stage of probe() in that code
 * includes calling spi_register_master() to hook up to this SPI bus glue.
 *
 * SPI controllers use board specific (often SOC specific) bus numbers,
 * and board-specific addressing for SPI devices combines those numbers
 * with chip select numbers.  Since SPI does not directly support dynamic
 * device identification, boards need configuration tables telling which
 * chip is at which address.
 *
 * This must be called from context that can sleep.  It returns zero on
 * success, else a negative error code (dropping the master's refcount).
D
David Brownell 已提交
1562 1563
 * After a successful return, the caller is responsible for calling
 * spi_unregister_master().
1564
 */
1565
int spi_register_master(struct spi_master *master)
1566
{
1567
	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
T
Tony Jones 已提交
1568
	struct device		*dev = master->dev.parent;
1569
	struct boardinfo	*bi;
1570 1571 1572
	int			status = -ENODEV;
	int			dynamic = 0;

D
David Brownell 已提交
1573 1574 1575
	if (!dev)
		return -ENODEV;

1576 1577 1578 1579
	status = of_spi_register_master(master);
	if (status)
		return status;

1580 1581 1582 1583 1584 1585
	/* even if it's just one always-selected device, there must
	 * be at least one chipselect
	 */
	if (master->num_chipselect == 0)
		return -EINVAL;

1586 1587 1588
	if ((master->bus_num < 0) && master->dev.of_node)
		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");

1589
	/* convention:  dynamically assigned bus IDs count down from the max */
1590
	if (master->bus_num < 0) {
1591 1592 1593
		/* FIXME switch to an IDR based scheme, something like
		 * I2C now uses, so we can't run out of "dynamic" IDs
		 */
1594
		master->bus_num = atomic_dec_return(&dyn_bus_id);
1595
		dynamic = 1;
1596 1597
	}

1598 1599 1600
	spin_lock_init(&master->bus_lock_spinlock);
	mutex_init(&master->bus_lock_mutex);
	master->bus_lock_flag = 0;
1601
	init_completion(&master->xfer_completion);
1602 1603
	if (!master->max_dma_len)
		master->max_dma_len = INT_MAX;
1604

1605 1606 1607
	/* register the device, then userspace will see it.
	 * registration fails if the bus ID is in use.
	 */
1608
	dev_set_name(&master->dev, "spi%u", master->bus_num);
T
Tony Jones 已提交
1609
	status = device_add(&master->dev);
1610
	if (status < 0)
1611
		goto done;
1612
	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1613 1614
			dynamic ? " (dynamic)" : "");

1615 1616 1617 1618 1619 1620
	/* If we're using a queued driver, start the queue */
	if (master->transfer)
		dev_info(dev, "master is unqueued, this is deprecated\n");
	else {
		status = spi_master_initialize_queue(master);
		if (status) {
1621
			device_del(&master->dev);
1622 1623 1624 1625
			goto done;
		}
	}

1626 1627 1628 1629 1630 1631
	mutex_lock(&board_lock);
	list_add_tail(&master->list, &spi_master_list);
	list_for_each_entry(bi, &board_list, list)
		spi_match_master_to_boardinfo(master, &bi->board_info);
	mutex_unlock(&board_lock);

1632
	/* Register devices from the device tree and ACPI */
1633
	of_register_spi_devices(master);
1634
	acpi_register_spi_devices(master);
1635 1636 1637 1638 1639
done:
	return status;
}
EXPORT_SYMBOL_GPL(spi_register_master);

1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663
static void devm_spi_unregister(struct device *dev, void *res)
{
	spi_unregister_master(*(struct spi_master **)res);
}

/**
 * dev_spi_register_master - register managed SPI master controller
 * @dev:    device managing SPI master
 * @master: initialized master, originally from spi_alloc_master()
 * Context: can sleep
 *
 * Register a SPI device as with spi_register_master() which will
 * automatically be unregister
 */
int devm_spi_register_master(struct device *dev, struct spi_master *master)
{
	struct spi_master **ptr;
	int ret;

	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = spi_register_master(master);
1664
	if (!ret) {
1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
		*ptr = master;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
	}

	return ret;
}
EXPORT_SYMBOL_GPL(devm_spi_register_master);

1675
static int __unregister(struct device *dev, void *null)
1676
{
1677
	spi_unregister_device(to_spi_device(dev));
1678 1679 1680 1681 1682 1683
	return 0;
}

/**
 * spi_unregister_master - unregister SPI master controller
 * @master: the master being unregistered
D
David Brownell 已提交
1684
 * Context: can sleep
1685 1686 1687 1688 1689 1690 1691 1692
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.
 *
 * This must be called from context that can sleep.
 */
void spi_unregister_master(struct spi_master *master)
{
1693 1694
	int dummy;

1695 1696 1697 1698 1699
	if (master->queued) {
		if (spi_destroy_queue(master))
			dev_err(&master->dev, "queue remove failed\n");
	}

1700 1701 1702 1703
	mutex_lock(&board_lock);
	list_del(&master->list);
	mutex_unlock(&board_lock);

1704
	dummy = device_for_each_child(&master->dev, NULL, __unregister);
T
Tony Jones 已提交
1705
	device_unregister(&master->dev);
1706 1707 1708
}
EXPORT_SYMBOL_GPL(spi_unregister_master);

1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739
int spi_master_suspend(struct spi_master *master)
{
	int ret;

	/* Basically no-ops for non-queued masters */
	if (!master->queued)
		return 0;

	ret = spi_stop_queue(master);
	if (ret)
		dev_err(&master->dev, "queue stop failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_suspend);

int spi_master_resume(struct spi_master *master)
{
	int ret;

	if (!master->queued)
		return 0;

	ret = spi_start_queue(master);
	if (ret)
		dev_err(&master->dev, "queue restart failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_resume);

1740
static int __spi_master_match(struct device *dev, const void *data)
D
Dave Young 已提交
1741 1742
{
	struct spi_master *m;
1743
	const u16 *bus_num = data;
D
Dave Young 已提交
1744 1745 1746 1747 1748

	m = container_of(dev, struct spi_master, dev);
	return m->bus_num == *bus_num;
}

1749 1750 1751
/**
 * spi_busnum_to_master - look up master associated with bus_num
 * @bus_num: the master's bus number
D
David Brownell 已提交
1752
 * Context: can sleep
1753 1754 1755 1756 1757 1758 1759 1760
 *
 * This call may be used with devices that are registered after
 * arch init time.  It returns a refcounted pointer to the relevant
 * spi_master (which the caller must release), or NULL if there is
 * no such master registered.
 */
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
T
Tony Jones 已提交
1761
	struct device		*dev;
1762
	struct spi_master	*master = NULL;
D
Dave Young 已提交
1763

1764
	dev = class_find_device(&spi_master_class, NULL, &bus_num,
D
Dave Young 已提交
1765 1766 1767 1768
				__spi_master_match);
	if (dev)
		master = container_of(dev, struct spi_master, dev);
	/* reference got in class_find_device */
1769
	return master;
1770 1771 1772 1773 1774 1775
}
EXPORT_SYMBOL_GPL(spi_busnum_to_master);


/*-------------------------------------------------------------------------*/

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
/* Core methods for SPI master protocol drivers.  Some of the
 * other core methods are currently defined as inline functions.
 */

/**
 * spi_setup - setup SPI mode and clock rate
 * @spi: the device whose settings are being modified
 * Context: can sleep, and no requests are queued to the device
 *
 * SPI protocol drivers may need to update the transfer mode if the
 * device doesn't work with its default.  They may likewise need
 * to update clock rates or word sizes from initial values.  This function
 * changes those settings, and must be called from a context that can sleep.
 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
 * effect the next time the device is selected and data is transferred to
 * or from it.  When this function returns, the spi device is deselected.
 *
 * Note that this call will fail if the protocol driver specifies an option
 * that the underlying controller or its driver does not support.  For
 * example, not all hardware supports wire transfers using nine bit words,
 * LSB-first wire encoding, or active-high chipselects.
 */
int spi_setup(struct spi_device *spi)
{
1800
	unsigned	bad_bits, ugly_bits;
1801
	int		status = 0;
1802

W
wangyuhang 已提交
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
	/* check mode to prevent that DUAL and QUAD set at the same time
	 */
	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
		dev_err(&spi->dev,
		"setup: can not select dual and quad at the same time\n");
		return -EINVAL;
	}
	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
	 */
	if ((spi->mode & SPI_3WIRE) && (spi->mode &
		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
		return -EINVAL;
1816 1817 1818 1819
	/* help drivers fail *cleanly* when they need options
	 * that aren't supported with their current master
	 */
	bad_bits = spi->mode & ~spi->master->mode_bits;
1820 1821 1822 1823 1824 1825 1826 1827 1828
	ugly_bits = bad_bits &
		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
	if (ugly_bits) {
		dev_warn(&spi->dev,
			 "setup: ignoring unsupported mode bits %x\n",
			 ugly_bits);
		spi->mode &= ~ugly_bits;
		bad_bits &= ~ugly_bits;
	}
1829
	if (bad_bits) {
1830
		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1831 1832 1833 1834
			bad_bits);
		return -EINVAL;
	}

1835 1836 1837
	if (!spi->bits_per_word)
		spi->bits_per_word = 8;

1838 1839 1840
	if (!spi->max_speed_hz)
		spi->max_speed_hz = spi->master->max_speed_hz;

1841 1842
	if (spi->master->setup)
		status = spi->master->setup(spi);
1843

J
Jingoo Han 已提交
1844
	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
			(spi->mode & SPI_LOOP) ? "loopback, " : "",
			spi->bits_per_word, spi->max_speed_hz,
			status);

	return status;
}
EXPORT_SYMBOL_GPL(spi_setup);

1857
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1858 1859
{
	struct spi_master *master = spi->master;
1860
	struct spi_transfer *xfer;
1861
	int w_size;
1862

1863 1864 1865
	if (list_empty(&message->transfers))
		return -EINVAL;

1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
	/* Half-duplex links include original MicroWire, and ones with
	 * only one data pin like SPI_3WIRE (switches direction) or where
	 * either MOSI or MISO is missing.  They can also be caused by
	 * software limitations.
	 */
	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
			|| (spi->mode & SPI_3WIRE)) {
		unsigned flags = master->flags;

		list_for_each_entry(xfer, &message->transfers, transfer_list) {
			if (xfer->rx_buf && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
				return -EINVAL;
		}
	}

1885
	/**
1886 1887
	 * Set transfer bits_per_word and max speed as spi device default if
	 * it is not set for this transfer.
W
wangyuhang 已提交
1888 1889
	 * Set transfer tx_nbits and rx_nbits as single transfer default
	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1890 1891
	 */
	list_for_each_entry(xfer, &message->transfers, transfer_list) {
1892
		message->frame_length += xfer->len;
1893 1894
		if (!xfer->bits_per_word)
			xfer->bits_per_word = spi->bits_per_word;
1895 1896

		if (!xfer->speed_hz)
1897
			xfer->speed_hz = spi->max_speed_hz;
1898 1899 1900 1901

		if (master->max_speed_hz &&
		    xfer->speed_hz > master->max_speed_hz)
			xfer->speed_hz = master->max_speed_hz;
1902

1903 1904 1905 1906 1907 1908 1909 1910
		if (master->bits_per_word_mask) {
			/* Only 32 bits fit in the mask */
			if (xfer->bits_per_word > 32)
				return -EINVAL;
			if (!(master->bits_per_word_mask &
					BIT(xfer->bits_per_word - 1)))
				return -EINVAL;
		}
1911

1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
		/*
		 * SPI transfer length should be multiple of SPI word size
		 * where SPI word size should be power-of-two multiple
		 */
		if (xfer->bits_per_word <= 8)
			w_size = 1;
		else if (xfer->bits_per_word <= 16)
			w_size = 2;
		else
			w_size = 4;

		/* No partial transfers accepted */
1924
		if (xfer->len % w_size)
1925 1926
			return -EINVAL;

1927 1928 1929
		if (xfer->speed_hz && master->min_speed_hz &&
		    xfer->speed_hz < master->min_speed_hz)
			return -EINVAL;
W
wangyuhang 已提交
1930 1931 1932 1933 1934 1935

		if (xfer->tx_buf && !xfer->tx_nbits)
			xfer->tx_nbits = SPI_NBITS_SINGLE;
		if (xfer->rx_buf && !xfer->rx_nbits)
			xfer->rx_nbits = SPI_NBITS_SINGLE;
		/* check transfer tx/rx_nbits:
1936 1937
		 * 1. check the value matches one of single, dual and quad
		 * 2. check tx/rx_nbits match the mode in spi_device
W
wangyuhang 已提交
1938
		 */
1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950
		if (xfer->tx_buf) {
			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
				xfer->tx_nbits != SPI_NBITS_DUAL &&
				xfer->tx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_TX_QUAD))
				return -EINVAL;
		}
W
wangyuhang 已提交
1951
		/* check transfer rx_nbits */
1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
		if (xfer->rx_buf) {
			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
				xfer->rx_nbits != SPI_NBITS_DUAL &&
				xfer->rx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_RX_QUAD))
				return -EINVAL;
		}
1964 1965
	}

1966
	message->status = -EINPROGRESS;
1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978

	return 0;
}

static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;

	message->spi = spi;

	trace_spi_message_submit(message);

1979 1980 1981
	return master->transfer(spi, message);
}

D
David Brownell 已提交
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013
/**
 * spi_async - asynchronous SPI transfer
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
2014 2015
	int ret;
	unsigned long flags;
D
David Brownell 已提交
2016

2017 2018 2019 2020
	ret = __spi_validate(spi, message);
	if (ret != 0)
		return ret;

2021
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
D
David Brownell 已提交
2022

2023 2024 2025 2026
	if (master->bus_lock_flag)
		ret = -EBUSY;
	else
		ret = __spi_async(spi, message);
D
David Brownell 已提交
2027

2028 2029 2030
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;
D
David Brownell 已提交
2031 2032 2033
}
EXPORT_SYMBOL_GPL(spi_async);

2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
/**
 * spi_async_locked - version of spi_async with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
	int ret;
	unsigned long flags;

2069 2070 2071 2072
	ret = __spi_validate(spi, message);
	if (ret != 0)
		return ret;

2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);

	ret = __spi_async(spi, message);

	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;

}
EXPORT_SYMBOL_GPL(spi_async_locked);

2084 2085 2086 2087 2088 2089 2090 2091

/*-------------------------------------------------------------------------*/

/* Utility methods for SPI master protocol drivers, layered on
 * top of the core.  Some other utility methods are defined as
 * inline functions.
 */

2092 2093 2094 2095 2096
static void spi_complete(void *arg)
{
	complete(arg);
}

2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122
static int __spi_sync(struct spi_device *spi, struct spi_message *message,
		      int bus_locked)
{
	DECLARE_COMPLETION_ONSTACK(done);
	int status;
	struct spi_master *master = spi->master;

	message->complete = spi_complete;
	message->context = &done;

	if (!bus_locked)
		mutex_lock(&master->bus_lock_mutex);

	status = spi_async_locked(spi, message);

	if (!bus_locked)
		mutex_unlock(&master->bus_lock_mutex);

	if (status == 0) {
		wait_for_completion(&done);
		status = message->status;
	}
	message->context = NULL;
	return status;
}

2123 2124 2125 2126
/**
 * spi_sync - blocking/synchronous SPI data transfers
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
D
David Brownell 已提交
2127
 * Context: can sleep
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * Note that the SPI device's chip select is active during the message,
 * and then is normally disabled between messages.  Drivers for some
 * frequently-used devices may want to minimize costs of selecting a chip,
 * by leaving it selected in anticipation that the next message will go
 * to the same chip.  (That may increase power usage.)
 *
D
David Brownell 已提交
2139 2140 2141
 * Also, the caller is guaranteeing that the memory associated with the
 * message will not be freed before this call returns.
 *
2142
 * It returns zero on success, else a negative error code.
2143 2144 2145
 */
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
2146
	return __spi_sync(spi, message, 0);
2147 2148 2149
}
EXPORT_SYMBOL_GPL(spi_sync);

2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160
/**
 * spi_sync_locked - version of spi_sync with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * This call should be used by drivers that require exclusive access to the
L
Lucas De Marchi 已提交
2161
 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225
 * be released by a spi_bus_unlock call when the exclusive access is over.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
	return __spi_sync(spi, message, 1);
}
EXPORT_SYMBOL_GPL(spi_sync_locked);

/**
 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
 * @master: SPI bus master that should be locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call should be used by drivers that require exclusive access to the
 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
 * exclusive access is over. Data transfer must be done by spi_sync_locked
 * and spi_async_locked calls when the SPI bus lock is held.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_lock(struct spi_master *master)
{
	unsigned long flags;

	mutex_lock(&master->bus_lock_mutex);

	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
	master->bus_lock_flag = 1;
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	/* mutex remains locked until spi_bus_unlock is called */

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_lock);

/**
 * spi_bus_unlock - release the lock for exclusive SPI bus usage
 * @master: SPI bus master that was locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
 * call.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_unlock(struct spi_master *master)
{
	master->bus_lock_flag = 0;

	mutex_unlock(&master->bus_lock_mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);

2226
/* portable code must never pass more than 32 bytes */
J
Jingoo Han 已提交
2227
#define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
2228 2229 2230 2231 2232 2233 2234 2235

static u8	*buf;

/**
 * spi_write_then_read - SPI synchronous write followed by read
 * @spi: device with which data will be exchanged
 * @txbuf: data to be written (need not be dma-safe)
 * @n_tx: size of txbuf, in bytes
2236 2237
 * @rxbuf: buffer into which data will be read (need not be dma-safe)
 * @n_rx: size of rxbuf, in bytes
D
David Brownell 已提交
2238
 * Context: can sleep
2239 2240 2241 2242
 *
 * This performs a half duplex MicroWire style transaction with the
 * device, sending txbuf and then reading rxbuf.  The return value
 * is zero for success, else a negative errno status code.
2243
 * This call may only be used from a context that may sleep.
2244
 *
D
David Brownell 已提交
2245
 * Parameters to this routine are always copied using a small buffer;
D
David Brownell 已提交
2246 2247
 * portable code should never use this for more than 32 bytes.
 * Performance-sensitive or bulk transfer code should instead use
D
David Brownell 已提交
2248
 * spi_{async,sync}() calls with dma-safe buffers.
2249 2250
 */
int spi_write_then_read(struct spi_device *spi,
2251 2252
		const void *txbuf, unsigned n_tx,
		void *rxbuf, unsigned n_rx)
2253
{
D
David Brownell 已提交
2254
	static DEFINE_MUTEX(lock);
2255 2256 2257

	int			status;
	struct spi_message	message;
2258
	struct spi_transfer	x[2];
2259 2260
	u8			*local_buf;

2261 2262 2263 2264
	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
	 * copying here, (as a pure convenience thing), but we can
	 * keep heap costs out of the hot path unless someone else is
	 * using the pre-allocated buffer or the transfer is too large.
2265
	 */
2266
	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2267 2268
		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
				    GFP_KERNEL | GFP_DMA);
2269 2270 2271 2272 2273
		if (!local_buf)
			return -ENOMEM;
	} else {
		local_buf = buf;
	}
2274

2275
	spi_message_init(&message);
J
Jingoo Han 已提交
2276
	memset(x, 0, sizeof(x));
2277 2278 2279 2280 2281 2282 2283 2284
	if (n_tx) {
		x[0].len = n_tx;
		spi_message_add_tail(&x[0], &message);
	}
	if (n_rx) {
		x[1].len = n_rx;
		spi_message_add_tail(&x[1], &message);
	}
2285

2286
	memcpy(local_buf, txbuf, n_tx);
2287 2288
	x[0].tx_buf = local_buf;
	x[1].rx_buf = local_buf + n_tx;
2289 2290 2291

	/* do the i/o */
	status = spi_sync(spi, &message);
2292
	if (status == 0)
2293
		memcpy(rxbuf, x[1].rx_buf, n_rx);
2294

2295
	if (x[0].tx_buf == buf)
D
David Brownell 已提交
2296
		mutex_unlock(&lock);
2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
	else
		kfree(local_buf);

	return status;
}
EXPORT_SYMBOL_GPL(spi_write_then_read);

/*-------------------------------------------------------------------------*/

static int __init spi_init(void)
{
2308 2309
	int	status;

2310
	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2311 2312 2313 2314 2315 2316 2317 2318
	if (!buf) {
		status = -ENOMEM;
		goto err0;
	}

	status = bus_register(&spi_bus_type);
	if (status < 0)
		goto err1;
2319

2320 2321 2322
	status = class_register(&spi_master_class);
	if (status < 0)
		goto err2;
2323
	return 0;
2324 2325 2326 2327 2328 2329 2330 2331

err2:
	bus_unregister(&spi_bus_type);
err1:
	kfree(buf);
	buf = NULL;
err0:
	return status;
2332
}
2333

2334 2335
/* board_info is normally registered in arch_initcall(),
 * but even essential drivers wait till later
2336 2337 2338 2339
 *
 * REVISIT only boardinfo really needs static linking. the rest (device and
 * driver registration) _could_ be dynamically linked (modular) ... costs
 * include needing to have boardinfo data structures be much more public.
2340
 */
2341
postcore_initcall(spi_init);
2342