spi.c 60.7 KB
Newer Older
1
/*
G
Grant Likely 已提交
2
 * SPI init/core code
3 4
 *
 * Copyright (C) 2005 David Brownell
5
 * Copyright (C) 2008 Secret Lab Technologies Ltd.
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/kernel.h>
23
#include <linux/kmod.h>
24 25 26
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
27 28
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
29
#include <linux/mutex.h>
30
#include <linux/of_device.h>
31
#include <linux/of_irq.h>
32
#include <linux/clk/clk-conf.h>
33
#include <linux/slab.h>
34
#include <linux/mod_devicetable.h>
35
#include <linux/spi/spi.h>
36
#include <linux/of_gpio.h>
M
Mark Brown 已提交
37
#include <linux/pm_runtime.h>
38
#include <linux/export.h>
39
#include <linux/sched/rt.h>
40 41
#include <linux/delay.h>
#include <linux/kthread.h>
42 43
#include <linux/ioport.h>
#include <linux/acpi.h>
44

45 46 47
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>

48 49
static void spidev_release(struct device *dev)
{
50
	struct spi_device	*spi = to_spi_device(dev);
51 52 53 54 55

	/* spi masters may cleanup for released devices */
	if (spi->master->cleanup)
		spi->master->cleanup(spi);

D
David Brownell 已提交
56
	spi_master_put(spi->master);
57
	kfree(spi);
58 59 60 61 62 63
}

static ssize_t
modalias_show(struct device *dev, struct device_attribute *a, char *buf)
{
	const struct spi_device	*spi = to_spi_device(dev);
64 65 66 67 68
	int len;

	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
	if (len != -ENODEV)
		return len;
69

70
	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
71
}
72
static DEVICE_ATTR_RO(modalias);
73

74 75 76
static struct attribute *spi_dev_attrs[] = {
	&dev_attr_modalias.attr,
	NULL,
77
};
78
ATTRIBUTE_GROUPS(spi_dev);
79 80 81 82 83

/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
 * and the sysfs version makes coldplug work too.
 */

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
						const struct spi_device *sdev)
{
	while (id->name[0]) {
		if (!strcmp(sdev->modalias, id->name))
			return id;
		id++;
	}
	return NULL;
}

const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
{
	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);

	return spi_match_id(sdrv->id_table, sdev);
}
EXPORT_SYMBOL_GPL(spi_get_device_id);

103 104 105
static int spi_match_device(struct device *dev, struct device_driver *drv)
{
	const struct spi_device	*spi = to_spi_device(dev);
106 107
	const struct spi_driver	*sdrv = to_spi_driver(drv);

108 109 110 111
	/* Attempt an OF style match */
	if (of_driver_match_device(dev, drv))
		return 1;

112 113 114 115
	/* Then try ACPI */
	if (acpi_driver_match_device(dev, drv))
		return 1;

116 117
	if (sdrv->id_table)
		return !!spi_match_id(sdrv->id_table, spi);
118

119
	return strcmp(spi->modalias, drv->name) == 0;
120 121
}

122
static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
123 124
{
	const struct spi_device		*spi = to_spi_device(dev);
125 126 127 128 129
	int rc;

	rc = acpi_device_uevent_modalias(dev, env);
	if (rc != -ENODEV)
		return rc;
130

131
	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
132 133 134
	return 0;
}

M
Mark Brown 已提交
135 136
#ifdef CONFIG_PM_SLEEP
static int spi_legacy_suspend(struct device *dev, pm_message_t message)
137
{
138
	int			value = 0;
139
	struct spi_driver	*drv = to_spi_driver(dev->driver);
140 141

	/* suspend will stop irqs and dma; no more i/o */
142 143 144 145 146 147
	if (drv) {
		if (drv->suspend)
			value = drv->suspend(to_spi_device(dev), message);
		else
			dev_dbg(dev, "... can't suspend\n");
	}
148 149 150
	return value;
}

M
Mark Brown 已提交
151
static int spi_legacy_resume(struct device *dev)
152
{
153
	int			value = 0;
154
	struct spi_driver	*drv = to_spi_driver(dev->driver);
155 156

	/* resume may restart the i/o queue */
157 158 159 160 161 162
	if (drv) {
		if (drv->resume)
			value = drv->resume(to_spi_device(dev));
		else
			dev_dbg(dev, "... can't resume\n");
	}
163 164 165
	return value;
}

M
Mark Brown 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static int spi_pm_suspend(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_suspend(dev);
	else
		return spi_legacy_suspend(dev, PMSG_SUSPEND);
}

static int spi_pm_resume(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_resume(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_freeze(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_freeze(dev);
	else
		return spi_legacy_suspend(dev, PMSG_FREEZE);
}

static int spi_pm_thaw(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_thaw(dev);
	else
		return spi_legacy_resume(dev);
}

static int spi_pm_poweroff(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_poweroff(dev);
	else
		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
}

static int spi_pm_restore(struct device *dev)
{
	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;

	if (pm)
		return pm_generic_restore(dev);
	else
		return spi_legacy_resume(dev);
}
225
#else
M
Mark Brown 已提交
226 227 228 229 230 231
#define spi_pm_suspend	NULL
#define spi_pm_resume	NULL
#define spi_pm_freeze	NULL
#define spi_pm_thaw	NULL
#define spi_pm_poweroff	NULL
#define spi_pm_restore	NULL
232 233
#endif

M
Mark Brown 已提交
234 235 236 237 238 239 240 241 242 243
static const struct dev_pm_ops spi_pm = {
	.suspend = spi_pm_suspend,
	.resume = spi_pm_resume,
	.freeze = spi_pm_freeze,
	.thaw = spi_pm_thaw,
	.poweroff = spi_pm_poweroff,
	.restore = spi_pm_restore,
	SET_RUNTIME_PM_OPS(
		pm_generic_runtime_suspend,
		pm_generic_runtime_resume,
244
		NULL
M
Mark Brown 已提交
245 246 247
	)
};

248 249
struct bus_type spi_bus_type = {
	.name		= "spi",
250
	.dev_groups	= spi_dev_groups,
251 252
	.match		= spi_match_device,
	.uevent		= spi_uevent,
M
Mark Brown 已提交
253
	.pm		= &spi_pm,
254 255 256
};
EXPORT_SYMBOL_GPL(spi_bus_type);

257 258 259 260

static int spi_drv_probe(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
261 262
	int ret;

263 264 265 266
	ret = of_clk_set_defaults(dev->of_node, false);
	if (ret)
		return ret;

267 268
	acpi_dev_pm_attach(dev, true);
	ret = sdrv->probe(to_spi_device(dev));
269
	if (ret)
270
		acpi_dev_pm_detach(dev, true);
271

272
	return ret;
273 274 275 276 277
}

static int spi_drv_remove(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
278 279
	int ret;

280 281
	ret = sdrv->remove(to_spi_device(dev));
	acpi_dev_pm_detach(dev, true);
282

283
	return ret;
284 285 286 287 288 289 290 291 292
}

static void spi_drv_shutdown(struct device *dev)
{
	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);

	sdrv->shutdown(to_spi_device(dev));
}

D
David Brownell 已提交
293 294 295 296 297
/**
 * spi_register_driver - register a SPI driver
 * @sdrv: the driver to register
 * Context: can sleep
 */
298 299 300 301 302 303 304 305 306 307 308 309 310
int spi_register_driver(struct spi_driver *sdrv)
{
	sdrv->driver.bus = &spi_bus_type;
	if (sdrv->probe)
		sdrv->driver.probe = spi_drv_probe;
	if (sdrv->remove)
		sdrv->driver.remove = spi_drv_remove;
	if (sdrv->shutdown)
		sdrv->driver.shutdown = spi_drv_shutdown;
	return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(spi_register_driver);

311 312 313 314 315 316 317 318 319 320
/*-------------------------------------------------------------------------*/

/* SPI devices should normally not be created by SPI device drivers; that
 * would make them board-specific.  Similarly with SPI master drivers.
 * Device registration normally goes into like arch/.../mach.../board-YYY.c
 * with other readonly (flashable) information about mainboard devices.
 */

struct boardinfo {
	struct list_head	list;
321
	struct spi_board_info	board_info;
322 323 324
};

static LIST_HEAD(board_list);
325 326 327 328 329 330
static LIST_HEAD(spi_master_list);

/*
 * Used to protect add/del opertion for board_info list and
 * spi_master list, and their matching process
 */
331
static DEFINE_MUTEX(board_lock);
332

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
/**
 * spi_alloc_device - Allocate a new SPI device
 * @master: Controller to which device is connected
 * Context: can sleep
 *
 * Allows a driver to allocate and initialize a spi_device without
 * registering it immediately.  This allows a driver to directly
 * fill the spi_device with device parameters before calling
 * spi_add_device() on it.
 *
 * Caller is responsible to call spi_add_device() on the returned
 * spi_device structure to add it to the SPI master.  If the caller
 * needs to discard the spi_device without adding it, then it should
 * call spi_dev_put() on it.
 *
 * Returns a pointer to the new device, or NULL.
 */
struct spi_device *spi_alloc_device(struct spi_master *master)
{
	struct spi_device	*spi;

	if (!spi_master_get(master))
		return NULL;

J
Jingoo Han 已提交
357
	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
358 359 360 361 362 363
	if (!spi) {
		spi_master_put(master);
		return NULL;
	}

	spi->master = master;
364
	spi->dev.parent = &master->dev;
365 366
	spi->dev.bus = &spi_bus_type;
	spi->dev.release = spidev_release;
367
	spi->cs_gpio = -ENOENT;
368 369 370 371 372
	device_initialize(&spi->dev);
	return spi;
}
EXPORT_SYMBOL_GPL(spi_alloc_device);

373 374 375 376 377 378 379 380 381 382 383 384 385
static void spi_dev_set_name(struct spi_device *spi)
{
	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);

	if (adev) {
		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
		return;
	}

	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
		     spi->chip_select);
}

386 387 388 389 390 391 392 393 394 395 396
static int spi_dev_check(struct device *dev, void *data)
{
	struct spi_device *spi = to_spi_device(dev);
	struct spi_device *new_spi = data;

	if (spi->master == new_spi->master &&
	    spi->chip_select == new_spi->chip_select)
		return -EBUSY;
	return 0;
}

397 398 399 400 401 402 403
/**
 * spi_add_device - Add spi_device allocated with spi_alloc_device
 * @spi: spi_device to register
 *
 * Companion function to spi_alloc_device.  Devices allocated with
 * spi_alloc_device can be added onto the spi bus with this function.
 *
404
 * Returns 0 on success; negative errno on failure
405 406 407
 */
int spi_add_device(struct spi_device *spi)
{
408
	static DEFINE_MUTEX(spi_add_lock);
409 410
	struct spi_master *master = spi->master;
	struct device *dev = master->dev.parent;
411 412 413
	int status;

	/* Chipselects are numbered 0..max; validate. */
414
	if (spi->chip_select >= master->num_chipselect) {
415 416
		dev_err(dev, "cs%d >= max %d\n",
			spi->chip_select,
417
			master->num_chipselect);
418 419 420 421
		return -EINVAL;
	}

	/* Set the bus ID string */
422
	spi_dev_set_name(spi);
423 424 425 426 427 428 429

	/* We need to make sure there's no other device with this
	 * chipselect **BEFORE** we call setup(), else we'll trash
	 * its configuration.  Lock against concurrent add() calls.
	 */
	mutex_lock(&spi_add_lock);

430 431
	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
	if (status) {
432 433 434 435 436
		dev_err(dev, "chipselect %d already in use\n",
				spi->chip_select);
		goto done;
	}

437 438 439
	if (master->cs_gpios)
		spi->cs_gpio = master->cs_gpios[spi->chip_select];

440 441 442 443
	/* Drivers may modify this initial i/o setup, but will
	 * normally rely on the device being setup.  Devices
	 * using SPI_CS_HIGH can't coexist well otherwise...
	 */
444
	status = spi_setup(spi);
445
	if (status < 0) {
446 447
		dev_err(dev, "can't setup %s, status %d\n",
				dev_name(&spi->dev), status);
448
		goto done;
449 450
	}

451
	/* Device may be bound to an active driver when this returns */
452
	status = device_add(&spi->dev);
453
	if (status < 0)
454 455
		dev_err(dev, "can't add %s, status %d\n",
				dev_name(&spi->dev), status);
456
	else
457
		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
458

459 460 461
done:
	mutex_unlock(&spi_add_lock);
	return status;
462 463
}
EXPORT_SYMBOL_GPL(spi_add_device);
464

D
David Brownell 已提交
465 466 467 468 469 470 471
/**
 * spi_new_device - instantiate one new SPI device
 * @master: Controller to which device is connected
 * @chip: Describes the SPI device
 * Context: can sleep
 *
 * On typical mainboards, this is purely internal; and it's not needed
472 473 474 475
 * after board init creates the hard-wired devices.  Some development
 * platforms may not be able to use spi_register_board_info though, and
 * this is exported so that for example a USB or parport based adapter
 * driver could add devices (which it would learn about out-of-band).
476 477
 *
 * Returns the new device, or NULL.
478
 */
479 480
struct spi_device *spi_new_device(struct spi_master *master,
				  struct spi_board_info *chip)
481 482 483 484
{
	struct spi_device	*proxy;
	int			status;

485 486 487 488 489 490 491
	/* NOTE:  caller did any chip->bus_num checks necessary.
	 *
	 * Also, unless we change the return value convention to use
	 * error-or-pointer (not NULL-or-pointer), troubleshootability
	 * suggests syslogged diagnostics are best here (ugh).
	 */

492 493
	proxy = spi_alloc_device(master);
	if (!proxy)
494 495
		return NULL;

496 497
	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));

498 499
	proxy->chip_select = chip->chip_select;
	proxy->max_speed_hz = chip->max_speed_hz;
500
	proxy->mode = chip->mode;
501
	proxy->irq = chip->irq;
502
	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
503 504 505 506
	proxy->dev.platform_data = (void *) chip->platform_data;
	proxy->controller_data = chip->controller_data;
	proxy->controller_state = NULL;

507
	status = spi_add_device(proxy);
508
	if (status < 0) {
509 510
		spi_dev_put(proxy);
		return NULL;
511 512 513 514 515 516
	}

	return proxy;
}
EXPORT_SYMBOL_GPL(spi_new_device);

517 518 519 520 521 522 523 524 525 526 527 528 529 530
static void spi_match_master_to_boardinfo(struct spi_master *master,
				struct spi_board_info *bi)
{
	struct spi_device *dev;

	if (master->bus_num != bi->bus_num)
		return;

	dev = spi_new_device(master, bi);
	if (!dev)
		dev_err(master->dev.parent, "can't create new device for %s\n",
			bi->modalias);
}

D
David Brownell 已提交
531 532 533 534 535 536
/**
 * spi_register_board_info - register SPI devices for a given board
 * @info: array of chip descriptors
 * @n: how many descriptors are provided
 * Context: can sleep
 *
537 538 539 540 541 542 543 544 545 546 547 548 549
 * Board-specific early init code calls this (probably during arch_initcall)
 * with segments of the SPI device table.  Any device nodes are created later,
 * after the relevant parent SPI controller (bus_num) is defined.  We keep
 * this table of devices forever, so that reloading a controller driver will
 * not make Linux forget about these hard-wired devices.
 *
 * Other code can also call this, e.g. a particular add-on board might provide
 * SPI devices through its expansion connector, so code initializing that board
 * would naturally declare its SPI devices.
 *
 * The board info passed can safely be __initdata ... but be careful of
 * any embedded pointers (platform_data, etc), they're copied as-is.
 */
550
int spi_register_board_info(struct spi_board_info const *info, unsigned n)
551
{
552 553
	struct boardinfo *bi;
	int i;
554

555
	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
556 557 558
	if (!bi)
		return -ENOMEM;

559 560
	for (i = 0; i < n; i++, bi++, info++) {
		struct spi_master *master;
561

562 563 564 565 566 567
		memcpy(&bi->board_info, info, sizeof(*info));
		mutex_lock(&board_lock);
		list_add_tail(&bi->list, &board_list);
		list_for_each_entry(master, &spi_master_list, list)
			spi_match_master_to_boardinfo(master, &bi->board_info);
		mutex_unlock(&board_lock);
568
	}
569 570

	return 0;
571 572 573 574
}

/*-------------------------------------------------------------------------*/

575 576 577 578 579 580 581 582 583 584 585
static void spi_set_cs(struct spi_device *spi, bool enable)
{
	if (spi->mode & SPI_CS_HIGH)
		enable = !enable;

	if (spi->cs_gpio >= 0)
		gpio_set_value(spi->cs_gpio, !enable);
	else if (spi->master->set_cs)
		spi->master->set_cs(spi, !enable);
}

586
#ifdef CONFIG_HAS_DMA
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
static int spi_map_buf(struct spi_master *master, struct device *dev,
		       struct sg_table *sgt, void *buf, size_t len,
		       enum dma_data_direction dir)
{
	const bool vmalloced_buf = is_vmalloc_addr(buf);
	const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
	const int sgs = DIV_ROUND_UP(len, desc_len);
	struct page *vm_page;
	void *sg_buf;
	size_t min;
	int i, ret;

	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
	if (ret != 0)
		return ret;

	for (i = 0; i < sgs; i++) {
		min = min_t(size_t, len, desc_len);

		if (vmalloced_buf) {
			vm_page = vmalloc_to_page(buf);
			if (!vm_page) {
				sg_free_table(sgt);
				return -ENOMEM;
			}
			sg_buf = page_address(vm_page) +
				((size_t)buf & ~PAGE_MASK);
		} else {
			sg_buf = buf;
		}

		sg_set_buf(&sgt->sgl[i], sg_buf, min);

		buf += min;
		len -= min;
	}

	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
625 626
	if (!ret)
		ret = -ENOMEM;
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
	if (ret < 0) {
		sg_free_table(sgt);
		return ret;
	}

	sgt->nents = ret;

	return 0;
}

static void spi_unmap_buf(struct spi_master *master, struct device *dev,
			  struct sg_table *sgt, enum dma_data_direction dir)
{
	if (sgt->orig_nents) {
		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
		sg_free_table(sgt);
	}
}

646
static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
647 648 649
{
	struct device *tx_dev, *rx_dev;
	struct spi_transfer *xfer;
650
	int ret;
651

652
	if (!master->can_dma)
653 654
		return 0;

655 656
	tx_dev = master->dma_tx->device->dev;
	rx_dev = master->dma_rx->device->dev;
657 658 659 660 661 662

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		if (!master->can_dma(master, msg->spi, xfer))
			continue;

		if (xfer->tx_buf != NULL) {
663 664 665 666 667
			ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
					  (void *)xfer->tx_buf, xfer->len,
					  DMA_TO_DEVICE);
			if (ret != 0)
				return ret;
668 669 670
		}

		if (xfer->rx_buf != NULL) {
671 672 673 674 675 676 677
			ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
					  xfer->rx_buf, xfer->len,
					  DMA_FROM_DEVICE);
			if (ret != 0) {
				spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
					      DMA_TO_DEVICE);
				return ret;
678 679 680 681 682 683 684 685 686 687 688 689 690 691
			}
		}
	}

	master->cur_msg_mapped = true;

	return 0;
}

static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
{
	struct spi_transfer *xfer;
	struct device *tx_dev, *rx_dev;

692
	if (!master->cur_msg_mapped || !master->can_dma)
693 694
		return 0;

695 696
	tx_dev = master->dma_tx->device->dev;
	rx_dev = master->dma_rx->device->dev;
697 698 699 700 701

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		if (!master->can_dma(master, msg->spi, xfer))
			continue;

702 703
		spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
		spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
704 705 706 707
	}

	return 0;
}
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_master *master,
				struct spi_message *msg)
{
	return 0;
}

static inline int spi_unmap_msg(struct spi_master *master,
				struct spi_message *msg)
{
	return 0;
}
#endif /* !CONFIG_HAS_DMA */

static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
{
	struct spi_transfer *xfer;
	void *tmp;
	unsigned int max_tx, max_rx;

	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
		max_tx = 0;
		max_rx = 0;

		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
			if ((master->flags & SPI_MASTER_MUST_TX) &&
			    !xfer->tx_buf)
				max_tx = max(xfer->len, max_tx);
			if ((master->flags & SPI_MASTER_MUST_RX) &&
			    !xfer->rx_buf)
				max_rx = max(xfer->len, max_rx);
		}

		if (max_tx) {
			tmp = krealloc(master->dummy_tx, max_tx,
				       GFP_KERNEL | GFP_DMA);
			if (!tmp)
				return -ENOMEM;
			master->dummy_tx = tmp;
			memset(tmp, 0, max_tx);
		}

		if (max_rx) {
			tmp = krealloc(master->dummy_rx, max_rx,
				       GFP_KERNEL | GFP_DMA);
			if (!tmp)
				return -ENOMEM;
			master->dummy_rx = tmp;
		}

		if (max_tx || max_rx) {
			list_for_each_entry(xfer, &msg->transfers,
					    transfer_list) {
				if (!xfer->tx_buf)
					xfer->tx_buf = master->dummy_tx;
				if (!xfer->rx_buf)
					xfer->rx_buf = master->dummy_rx;
			}
		}
	}

	return __spi_map_msg(master, msg);
}
771

772 773 774 775 776 777 778 779 780 781 782 783 784
/*
 * spi_transfer_one_message - Default implementation of transfer_one_message()
 *
 * This is a standard implementation of transfer_one_message() for
 * drivers which impelment a transfer_one() operation.  It provides
 * standard handling of delays and chip select management.
 */
static int spi_transfer_one_message(struct spi_master *master,
				    struct spi_message *msg)
{
	struct spi_transfer *xfer;
	bool keep_cs = false;
	int ret = 0;
785
	int ms = 1;
786 787 788 789 790 791

	spi_set_cs(msg->spi, true);

	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
		trace_spi_transfer_start(msg, xfer);

792 793 794 795 796 797 798 799 800
		if (xfer->tx_buf || xfer->rx_buf) {
			reinit_completion(&master->xfer_completion);

			ret = master->transfer_one(master, msg->spi, xfer);
			if (ret < 0) {
				dev_err(&msg->spi->dev,
					"SPI transfer failed: %d\n", ret);
				goto out;
			}
801

802 803 804 805
			if (ret > 0) {
				ret = 0;
				ms = xfer->len * 8 * 1000 / xfer->speed_hz;
				ms += ms + 100; /* some tolerance */
806

807 808 809
				ms = wait_for_completion_timeout(&master->xfer_completion,
								 msecs_to_jiffies(ms));
			}
810

811 812 813 814 815 816 817 818 819 820
			if (ms == 0) {
				dev_err(&msg->spi->dev,
					"SPI transfer timed out\n");
				msg->status = -ETIMEDOUT;
			}
		} else {
			if (xfer->len)
				dev_err(&msg->spi->dev,
					"Bufferless transfer has length %u\n",
					xfer->len);
821
		}
822 823 824 825 826 827 828 829 830 831 832 833 834 835

		trace_spi_transfer_stop(msg, xfer);

		if (msg->status != -EINPROGRESS)
			goto out;

		if (xfer->delay_usecs)
			udelay(xfer->delay_usecs);

		if (xfer->cs_change) {
			if (list_is_last(&xfer->transfer_list,
					 &msg->transfers)) {
				keep_cs = true;
			} else {
836 837 838
				spi_set_cs(msg->spi, false);
				udelay(10);
				spi_set_cs(msg->spi, true);
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
			}
		}

		msg->actual_length += xfer->len;
	}

out:
	if (ret != 0 || !keep_cs)
		spi_set_cs(msg->spi, false);

	if (msg->status == -EINPROGRESS)
		msg->status = ret;

	spi_finalize_current_message(master);

	return ret;
}

/**
 * spi_finalize_current_transfer - report completion of a transfer
 *
 * Called by SPI drivers using the core transfer_one_message()
 * implementation to notify it that the current interrupt driven
862
 * transfer has finished and the next one may be scheduled.
863 864 865 866 867 868 869
 */
void spi_finalize_current_transfer(struct spi_master *master)
{
	complete(&master->xfer_completion);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
/**
 * spi_pump_messages - kthread work function which processes spi message queue
 * @work: pointer to kthread work struct contained in the master struct
 *
 * This function checks if there is any spi message in the queue that
 * needs processing and if so call out to the driver to initialize hardware
 * and transfer each message.
 *
 */
static void spi_pump_messages(struct kthread_work *work)
{
	struct spi_master *master =
		container_of(work, struct spi_master, pump_messages);
	unsigned long flags;
	bool was_busy = false;
	int ret;

	/* Lock queue and check for queue work */
	spin_lock_irqsave(&master->queue_lock, flags);
	if (list_empty(&master->queue) || !master->running) {
890 891 892
		if (!master->busy) {
			spin_unlock_irqrestore(&master->queue_lock, flags);
			return;
893 894 895
		}
		master->busy = false;
		spin_unlock_irqrestore(&master->queue_lock, flags);
896 897 898 899
		kfree(master->dummy_rx);
		master->dummy_rx = NULL;
		kfree(master->dummy_tx);
		master->dummy_tx = NULL;
900 901 902 903
		if (master->unprepare_transfer_hardware &&
		    master->unprepare_transfer_hardware(master))
			dev_err(&master->dev,
				"failed to unprepare transfer hardware\n");
904 905 906 907
		if (master->auto_runtime_pm) {
			pm_runtime_mark_last_busy(master->dev.parent);
			pm_runtime_put_autosuspend(master->dev.parent);
		}
908
		trace_spi_master_idle(master);
909 910 911 912 913 914 915 916 917 918
		return;
	}

	/* Make sure we are not already running a message */
	if (master->cur_msg) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return;
	}
	/* Extract head of queue */
	master->cur_msg =
919
		list_first_entry(&master->queue, struct spi_message, queue);
920 921 922 923 924 925 926 927

	list_del_init(&master->cur_msg->queue);
	if (master->busy)
		was_busy = true;
	else
		master->busy = true;
	spin_unlock_irqrestore(&master->queue_lock, flags);

928 929 930 931 932 933 934 935 936
	if (!was_busy && master->auto_runtime_pm) {
		ret = pm_runtime_get_sync(master->dev.parent);
		if (ret < 0) {
			dev_err(&master->dev, "Failed to power device: %d\n",
				ret);
			return;
		}
	}

937 938 939
	if (!was_busy)
		trace_spi_master_busy(master);

940
	if (!was_busy && master->prepare_transfer_hardware) {
941 942 943 944
		ret = master->prepare_transfer_hardware(master);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare transfer hardware\n");
945 946 947

			if (master->auto_runtime_pm)
				pm_runtime_put(master->dev.parent);
948 949 950 951
			return;
		}
	}

952 953
	trace_spi_message_start(master->cur_msg);

954 955 956 957 958 959 960 961 962 963 964 965
	if (master->prepare_message) {
		ret = master->prepare_message(master, master->cur_msg);
		if (ret) {
			dev_err(&master->dev,
				"failed to prepare message: %d\n", ret);
			master->cur_msg->status = ret;
			spi_finalize_current_message(master);
			return;
		}
		master->cur_msg_prepared = true;
	}

966 967 968 969 970 971 972
	ret = spi_map_msg(master, master->cur_msg);
	if (ret) {
		master->cur_msg->status = ret;
		spi_finalize_current_message(master);
		return;
	}

973 974 975
	ret = master->transfer_one_message(master, master->cur_msg);
	if (ret) {
		dev_err(&master->dev,
976
			"failed to transfer one message from queue\n");
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
		return;
	}
}

static int spi_init_queue(struct spi_master *master)
{
	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };

	INIT_LIST_HEAD(&master->queue);
	spin_lock_init(&master->queue_lock);

	master->running = false;
	master->busy = false;

	init_kthread_worker(&master->kworker);
	master->kworker_task = kthread_run(kthread_worker_fn,
993
					   &master->kworker, "%s",
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
					   dev_name(&master->dev));
	if (IS_ERR(master->kworker_task)) {
		dev_err(&master->dev, "failed to create message pump task\n");
		return -ENOMEM;
	}
	init_kthread_work(&master->pump_messages, spi_pump_messages);

	/*
	 * Master config will indicate if this controller should run the
	 * message pump with high (realtime) priority to reduce the transfer
	 * latency on the bus by minimising the delay between a transfer
	 * request and the scheduling of the message pump thread. Without this
	 * setting the message pump thread will remain at default priority.
	 */
	if (master->rt) {
		dev_info(&master->dev,
			"will run message pump with realtime priority\n");
		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
	}

	return 0;
}

/**
 * spi_get_next_queued_message() - called by driver to check for queued
 * messages
 * @master: the master to check for queued messages
 *
 * If there are more messages in the queue, the next message is returned from
 * this call.
 */
struct spi_message *spi_get_next_queued_message(struct spi_master *master)
{
	struct spi_message *next;
	unsigned long flags;

	/* get a pointer to the next message, if any */
	spin_lock_irqsave(&master->queue_lock, flags);
1032 1033
	next = list_first_entry_or_null(&master->queue, struct spi_message,
					queue);
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	spin_unlock_irqrestore(&master->queue_lock, flags);

	return next;
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);

/**
 * spi_finalize_current_message() - the current message is complete
 * @master: the master to return the message to
 *
 * Called by the driver to notify the core that the message in the front of the
 * queue is complete and can be removed from the queue.
 */
void spi_finalize_current_message(struct spi_master *master)
{
	struct spi_message *mesg;
	unsigned long flags;
1051
	int ret;
1052 1053 1054 1055 1056 1057 1058 1059

	spin_lock_irqsave(&master->queue_lock, flags);
	mesg = master->cur_msg;
	master->cur_msg = NULL;

	queue_kthread_work(&master->kworker, &master->pump_messages);
	spin_unlock_irqrestore(&master->queue_lock, flags);

1060 1061
	spi_unmap_msg(master, mesg);

1062 1063 1064 1065 1066 1067 1068 1069 1070
	if (master->cur_msg_prepared && master->unprepare_message) {
		ret = master->unprepare_message(master, mesg);
		if (ret) {
			dev_err(&master->dev,
				"failed to unprepare message: %d\n", ret);
		}
	}
	master->cur_msg_prepared = false;

1071 1072 1073
	mesg->state = NULL;
	if (mesg->complete)
		mesg->complete(mesg->context);
1074 1075

	trace_spi_message_done(mesg);
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);

static int spi_start_queue(struct spi_master *master)
{
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (master->running || master->busy) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -EBUSY;
	}

	master->running = true;
	master->cur_msg = NULL;
	spin_unlock_irqrestore(&master->queue_lock, flags);

	queue_kthread_work(&master->kworker, &master->pump_messages);

	return 0;
}

static int spi_stop_queue(struct spi_master *master)
{
	unsigned long flags;
	unsigned limit = 500;
	int ret = 0;

	spin_lock_irqsave(&master->queue_lock, flags);

	/*
	 * This is a bit lame, but is optimized for the common execution path.
	 * A wait_queue on the master->busy could be used, but then the common
	 * execution path (pump_messages) would be required to call wake_up or
	 * friends on every SPI message. Do this instead.
	 */
	while ((!list_empty(&master->queue) || master->busy) && limit--) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
1115
		usleep_range(10000, 11000);
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
		spin_lock_irqsave(&master->queue_lock, flags);
	}

	if (!list_empty(&master->queue) || master->busy)
		ret = -EBUSY;
	else
		master->running = false;

	spin_unlock_irqrestore(&master->queue_lock, flags);

	if (ret) {
		dev_warn(&master->dev,
			 "could not stop message queue\n");
		return ret;
	}
	return ret;
}

static int spi_destroy_queue(struct spi_master *master)
{
	int ret;

	ret = spi_stop_queue(master);

	/*
	 * flush_kthread_worker will block until all work is done.
	 * If the reason that stop_queue timed out is that the work will never
	 * finish, then it does no good to call flush/stop thread, so
	 * return anyway.
	 */
	if (ret) {
		dev_err(&master->dev, "problem destroying queue\n");
		return ret;
	}

	flush_kthread_worker(&master->kworker);
	kthread_stop(master->kworker_task);

	return 0;
}

/**
 * spi_queued_transfer - transfer function for queued transfers
 * @spi: spi device which is requesting transfer
 * @msg: spi message which is to handled is queued to driver queue
 */
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
	struct spi_master *master = spi->master;
	unsigned long flags;

	spin_lock_irqsave(&master->queue_lock, flags);

	if (!master->running) {
		spin_unlock_irqrestore(&master->queue_lock, flags);
		return -ESHUTDOWN;
	}
	msg->actual_length = 0;
	msg->status = -EINPROGRESS;

	list_add_tail(&msg->queue, &master->queue);
1177
	if (!master->busy)
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
		queue_kthread_work(&master->kworker, &master->pump_messages);

	spin_unlock_irqrestore(&master->queue_lock, flags);
	return 0;
}

static int spi_master_initialize_queue(struct spi_master *master)
{
	int ret;

	master->transfer = spi_queued_transfer;
1189 1190
	if (!master->transfer_one_message)
		master->transfer_one_message = spi_transfer_one_message;
1191 1192 1193 1194 1195 1196 1197

	/* Initialize and start queue */
	ret = spi_init_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem initializing queue\n");
		goto err_init_queue;
	}
1198
	master->queued = true;
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
	ret = spi_start_queue(master);
	if (ret) {
		dev_err(&master->dev, "problem starting queue\n");
		goto err_start_queue;
	}

	return 0;

err_start_queue:
	spi_destroy_queue(master);
1209
err_init_queue:
1210 1211 1212 1213 1214
	return ret;
}

/*-------------------------------------------------------------------------*/

1215
#if defined(CONFIG_OF)
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
/**
 * of_register_spi_devices() - Register child devices onto the SPI bus
 * @master:	Pointer to spi_master device
 *
 * Registers an spi_device for each child node of master node which has a 'reg'
 * property.
 */
static void of_register_spi_devices(struct spi_master *master)
{
	struct spi_device *spi;
	struct device_node *nc;
	int rc;
T
Trent Piepho 已提交
1228
	u32 value;
1229 1230 1231 1232

	if (!master->dev.of_node)
		return;

1233
	for_each_available_child_of_node(master->dev.of_node, nc) {
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
		/* Alloc an spi_device */
		spi = spi_alloc_device(master);
		if (!spi) {
			dev_err(&master->dev, "spi_device alloc error for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Select device driver */
		if (of_modalias_node(nc, spi->modalias,
				     sizeof(spi->modalias)) < 0) {
			dev_err(&master->dev, "cannot find modalias for %s\n",
				nc->full_name);
			spi_dev_put(spi);
			continue;
		}

		/* Device address */
T
Trent Piepho 已提交
1253 1254 1255 1256
		rc = of_property_read_u32(nc, "reg", &value);
		if (rc) {
			dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
				nc->full_name, rc);
1257 1258 1259
			spi_dev_put(spi);
			continue;
		}
T
Trent Piepho 已提交
1260
		spi->chip_select = value;
1261 1262 1263 1264 1265 1266 1267 1268

		/* Mode (clock phase/polarity/etc.) */
		if (of_find_property(nc, "spi-cpha", NULL))
			spi->mode |= SPI_CPHA;
		if (of_find_property(nc, "spi-cpol", NULL))
			spi->mode |= SPI_CPOL;
		if (of_find_property(nc, "spi-cs-high", NULL))
			spi->mode |= SPI_CS_HIGH;
1269 1270
		if (of_find_property(nc, "spi-3wire", NULL))
			spi->mode |= SPI_3WIRE;
1271 1272
		if (of_find_property(nc, "spi-lsb-first", NULL))
			spi->mode |= SPI_LSB_FIRST;
1273

W
wangyuhang 已提交
1274
		/* Device DUAL/QUAD mode */
T
Trent Piepho 已提交
1275 1276 1277
		if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
			switch (value) {
			case 1:
1278
				break;
T
Trent Piepho 已提交
1279
			case 2:
1280 1281
				spi->mode |= SPI_TX_DUAL;
				break;
T
Trent Piepho 已提交
1282
			case 4:
1283 1284 1285
				spi->mode |= SPI_TX_QUAD;
				break;
			default:
1286 1287 1288 1289
				dev_warn(&master->dev,
					 "spi-tx-bus-width %d not supported\n",
					 value);
				break;
1290
			}
W
wangyuhang 已提交
1291 1292
		}

T
Trent Piepho 已提交
1293 1294 1295
		if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
			switch (value) {
			case 1:
1296
				break;
T
Trent Piepho 已提交
1297
			case 2:
1298 1299
				spi->mode |= SPI_RX_DUAL;
				break;
T
Trent Piepho 已提交
1300
			case 4:
1301 1302 1303
				spi->mode |= SPI_RX_QUAD;
				break;
			default:
1304 1305 1306 1307
				dev_warn(&master->dev,
					 "spi-rx-bus-width %d not supported\n",
					 value);
				break;
1308
			}
W
wangyuhang 已提交
1309 1310
		}

1311
		/* Device speed */
T
Trent Piepho 已提交
1312 1313 1314 1315
		rc = of_property_read_u32(nc, "spi-max-frequency", &value);
		if (rc) {
			dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
				nc->full_name, rc);
1316 1317 1318
			spi_dev_put(spi);
			continue;
		}
T
Trent Piepho 已提交
1319
		spi->max_speed_hz = value;
1320 1321 1322 1323 1324 1325 1326 1327 1328

		/* IRQ */
		spi->irq = irq_of_parse_and_map(nc, 0);

		/* Store a pointer to the node in the device structure */
		of_node_get(nc);
		spi->dev.of_node = nc;

		/* Register the new device */
1329
		request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
		rc = spi_add_device(spi);
		if (rc) {
			dev_err(&master->dev, "spi_device register error %s\n",
				nc->full_name);
			spi_dev_put(spi);
		}

	}
}
#else
static void of_register_spi_devices(struct spi_master *master) { }
#endif

1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
#ifdef CONFIG_ACPI
static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
{
	struct spi_device *spi = data;

	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
		struct acpi_resource_spi_serialbus *sb;

		sb = &ares->data.spi_serial_bus;
		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
			spi->chip_select = sb->device_selection;
			spi->max_speed_hz = sb->connection_speed;

			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
				spi->mode |= SPI_CPHA;
			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
				spi->mode |= SPI_CPOL;
			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
				spi->mode |= SPI_CS_HIGH;
		}
	} else if (spi->irq < 0) {
		struct resource r;

		if (acpi_dev_resource_interrupt(ares, 0, &r))
			spi->irq = r.start;
	}

	/* Always tell the ACPI core to skip this resource */
	return 1;
}

static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
				       void *data, void **return_value)
{
	struct spi_master *master = data;
	struct list_head resource_list;
	struct acpi_device *adev;
	struct spi_device *spi;
	int ret;

	if (acpi_bus_get_device(handle, &adev))
		return AE_OK;
	if (acpi_bus_get_status(adev) || !adev->status.present)
		return AE_OK;

	spi = spi_alloc_device(master);
	if (!spi) {
		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
			dev_name(&adev->dev));
		return AE_NO_MEMORY;
	}

1395
	ACPI_COMPANION_SET(&spi->dev, adev);
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
	spi->irq = -1;

	INIT_LIST_HEAD(&resource_list);
	ret = acpi_dev_get_resources(adev, &resource_list,
				     acpi_spi_add_resource, spi);
	acpi_dev_free_resource_list(&resource_list);

	if (ret < 0 || !spi->max_speed_hz) {
		spi_dev_put(spi);
		return AE_OK;
	}

1408
	adev->power.flags.ignore_parent = true;
1409
	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1410
	if (spi_add_device(spi)) {
1411
		adev->power.flags.ignore_parent = false;
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424
		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
			dev_name(&adev->dev));
		spi_dev_put(spi);
	}

	return AE_OK;
}

static void acpi_register_spi_devices(struct spi_master *master)
{
	acpi_status status;
	acpi_handle handle;

1425
	handle = ACPI_HANDLE(master->dev.parent);
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
	if (!handle)
		return;

	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
				     acpi_spi_add_device, NULL,
				     master, NULL);
	if (ACPI_FAILURE(status))
		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
}
#else
static inline void acpi_register_spi_devices(struct spi_master *master) {}
#endif /* CONFIG_ACPI */

T
Tony Jones 已提交
1439
static void spi_master_release(struct device *dev)
1440 1441 1442
{
	struct spi_master *master;

T
Tony Jones 已提交
1443
	master = container_of(dev, struct spi_master, dev);
1444 1445 1446 1447 1448 1449
	kfree(master);
}

static struct class spi_master_class = {
	.name		= "spi_master",
	.owner		= THIS_MODULE,
T
Tony Jones 已提交
1450
	.dev_release	= spi_master_release,
1451 1452 1453
};


1454

1455 1456 1457
/**
 * spi_alloc_master - allocate SPI master controller
 * @dev: the controller, possibly using the platform_bus
D
David Brownell 已提交
1458
 * @size: how much zeroed driver-private data to allocate; the pointer to this
T
Tony Jones 已提交
1459
 *	memory is in the driver_data field of the returned device,
D
David Brownell 已提交
1460
 *	accessible with spi_master_get_devdata().
D
David Brownell 已提交
1461
 * Context: can sleep
1462 1463 1464
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.  It's how they allocate
D
dmitry pervushin 已提交
1465
 * an spi_master structure, prior to calling spi_register_master().
1466 1467 1468 1469 1470
 *
 * This must be called from context that can sleep.  It returns the SPI
 * master structure on success, else NULL.
 *
 * The caller is responsible for assigning the bus number and initializing
D
dmitry pervushin 已提交
1471
 * the master's methods before calling spi_register_master(); and (after errors
1472 1473
 * adding the device) calling spi_master_put() and kfree() to prevent a memory
 * leak.
1474
 */
1475
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1476 1477 1478
{
	struct spi_master	*master;

D
David Brownell 已提交
1479 1480 1481
	if (!dev)
		return NULL;

J
Jingoo Han 已提交
1482
	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1483 1484 1485
	if (!master)
		return NULL;

T
Tony Jones 已提交
1486
	device_initialize(&master->dev);
1487 1488
	master->bus_num = -1;
	master->num_chipselect = 1;
T
Tony Jones 已提交
1489 1490
	master->dev.class = &spi_master_class;
	master->dev.parent = get_device(dev);
D
David Brownell 已提交
1491
	spi_master_set_devdata(master, &master[1]);
1492 1493 1494 1495 1496

	return master;
}
EXPORT_SYMBOL_GPL(spi_alloc_master);

1497 1498 1499
#ifdef CONFIG_OF
static int of_spi_register_master(struct spi_master *master)
{
1500
	int nb, i, *cs;
1501 1502 1503 1504 1505 1506
	struct device_node *np = master->dev.of_node;

	if (!np)
		return 0;

	nb = of_gpio_named_count(np, "cs-gpios");
J
Jingoo Han 已提交
1507
	master->num_chipselect = max_t(int, nb, master->num_chipselect);
1508

1509 1510
	/* Return error only for an incorrectly formed cs-gpios property */
	if (nb == 0 || nb == -ENOENT)
1511
		return 0;
1512 1513
	else if (nb < 0)
		return nb;
1514 1515 1516 1517 1518 1519 1520 1521 1522

	cs = devm_kzalloc(&master->dev,
			  sizeof(int) * master->num_chipselect,
			  GFP_KERNEL);
	master->cs_gpios = cs;

	if (!master->cs_gpios)
		return -ENOMEM;

1523
	for (i = 0; i < master->num_chipselect; i++)
1524
		cs[i] = -ENOENT;
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537

	for (i = 0; i < nb; i++)
		cs[i] = of_get_named_gpio(np, "cs-gpios", i);

	return 0;
}
#else
static int of_spi_register_master(struct spi_master *master)
{
	return 0;
}
#endif

1538 1539 1540
/**
 * spi_register_master - register SPI master controller
 * @master: initialized master, originally from spi_alloc_master()
D
David Brownell 已提交
1541
 * Context: can sleep
1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
 *
 * SPI master controllers connect to their drivers using some non-SPI bus,
 * such as the platform bus.  The final stage of probe() in that code
 * includes calling spi_register_master() to hook up to this SPI bus glue.
 *
 * SPI controllers use board specific (often SOC specific) bus numbers,
 * and board-specific addressing for SPI devices combines those numbers
 * with chip select numbers.  Since SPI does not directly support dynamic
 * device identification, boards need configuration tables telling which
 * chip is at which address.
 *
 * This must be called from context that can sleep.  It returns zero on
 * success, else a negative error code (dropping the master's refcount).
D
David Brownell 已提交
1555 1556
 * After a successful return, the caller is responsible for calling
 * spi_unregister_master().
1557
 */
1558
int spi_register_master(struct spi_master *master)
1559
{
1560
	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
T
Tony Jones 已提交
1561
	struct device		*dev = master->dev.parent;
1562
	struct boardinfo	*bi;
1563 1564 1565
	int			status = -ENODEV;
	int			dynamic = 0;

D
David Brownell 已提交
1566 1567 1568
	if (!dev)
		return -ENODEV;

1569 1570 1571 1572
	status = of_spi_register_master(master);
	if (status)
		return status;

1573 1574 1575 1576 1577 1578
	/* even if it's just one always-selected device, there must
	 * be at least one chipselect
	 */
	if (master->num_chipselect == 0)
		return -EINVAL;

1579 1580 1581
	if ((master->bus_num < 0) && master->dev.of_node)
		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");

1582
	/* convention:  dynamically assigned bus IDs count down from the max */
1583
	if (master->bus_num < 0) {
1584 1585 1586
		/* FIXME switch to an IDR based scheme, something like
		 * I2C now uses, so we can't run out of "dynamic" IDs
		 */
1587
		master->bus_num = atomic_dec_return(&dyn_bus_id);
1588
		dynamic = 1;
1589 1590
	}

1591 1592 1593
	spin_lock_init(&master->bus_lock_spinlock);
	mutex_init(&master->bus_lock_mutex);
	master->bus_lock_flag = 0;
1594
	init_completion(&master->xfer_completion);
1595 1596
	if (!master->max_dma_len)
		master->max_dma_len = INT_MAX;
1597

1598 1599 1600
	/* register the device, then userspace will see it.
	 * registration fails if the bus ID is in use.
	 */
1601
	dev_set_name(&master->dev, "spi%u", master->bus_num);
T
Tony Jones 已提交
1602
	status = device_add(&master->dev);
1603
	if (status < 0)
1604
		goto done;
1605
	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1606 1607
			dynamic ? " (dynamic)" : "");

1608 1609 1610 1611 1612 1613
	/* If we're using a queued driver, start the queue */
	if (master->transfer)
		dev_info(dev, "master is unqueued, this is deprecated\n");
	else {
		status = spi_master_initialize_queue(master);
		if (status) {
1614
			device_del(&master->dev);
1615 1616 1617 1618
			goto done;
		}
	}

1619 1620 1621 1622 1623 1624
	mutex_lock(&board_lock);
	list_add_tail(&master->list, &spi_master_list);
	list_for_each_entry(bi, &board_list, list)
		spi_match_master_to_boardinfo(master, &bi->board_info);
	mutex_unlock(&board_lock);

1625
	/* Register devices from the device tree and ACPI */
1626
	of_register_spi_devices(master);
1627
	acpi_register_spi_devices(master);
1628 1629 1630 1631 1632
done:
	return status;
}
EXPORT_SYMBOL_GPL(spi_register_master);

1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656
static void devm_spi_unregister(struct device *dev, void *res)
{
	spi_unregister_master(*(struct spi_master **)res);
}

/**
 * dev_spi_register_master - register managed SPI master controller
 * @dev:    device managing SPI master
 * @master: initialized master, originally from spi_alloc_master()
 * Context: can sleep
 *
 * Register a SPI device as with spi_register_master() which will
 * automatically be unregister
 */
int devm_spi_register_master(struct device *dev, struct spi_master *master)
{
	struct spi_master **ptr;
	int ret;

	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = spi_register_master(master);
1657
	if (!ret) {
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
		*ptr = master;
		devres_add(dev, ptr);
	} else {
		devres_free(ptr);
	}

	return ret;
}
EXPORT_SYMBOL_GPL(devm_spi_register_master);

1668
static int __unregister(struct device *dev, void *null)
1669
{
1670
	spi_unregister_device(to_spi_device(dev));
1671 1672 1673 1674 1675 1676
	return 0;
}

/**
 * spi_unregister_master - unregister SPI master controller
 * @master: the master being unregistered
D
David Brownell 已提交
1677
 * Context: can sleep
1678 1679 1680 1681 1682 1683 1684 1685
 *
 * This call is used only by SPI master controller drivers, which are the
 * only ones directly touching chip registers.
 *
 * This must be called from context that can sleep.
 */
void spi_unregister_master(struct spi_master *master)
{
1686 1687
	int dummy;

1688 1689 1690 1691 1692
	if (master->queued) {
		if (spi_destroy_queue(master))
			dev_err(&master->dev, "queue remove failed\n");
	}

1693 1694 1695 1696
	mutex_lock(&board_lock);
	list_del(&master->list);
	mutex_unlock(&board_lock);

1697
	dummy = device_for_each_child(&master->dev, NULL, __unregister);
T
Tony Jones 已提交
1698
	device_unregister(&master->dev);
1699 1700 1701
}
EXPORT_SYMBOL_GPL(spi_unregister_master);

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
int spi_master_suspend(struct spi_master *master)
{
	int ret;

	/* Basically no-ops for non-queued masters */
	if (!master->queued)
		return 0;

	ret = spi_stop_queue(master);
	if (ret)
		dev_err(&master->dev, "queue stop failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_suspend);

int spi_master_resume(struct spi_master *master)
{
	int ret;

	if (!master->queued)
		return 0;

	ret = spi_start_queue(master);
	if (ret)
		dev_err(&master->dev, "queue restart failed\n");

	return ret;
}
EXPORT_SYMBOL_GPL(spi_master_resume);

1733
static int __spi_master_match(struct device *dev, const void *data)
D
Dave Young 已提交
1734 1735
{
	struct spi_master *m;
1736
	const u16 *bus_num = data;
D
Dave Young 已提交
1737 1738 1739 1740 1741

	m = container_of(dev, struct spi_master, dev);
	return m->bus_num == *bus_num;
}

1742 1743 1744
/**
 * spi_busnum_to_master - look up master associated with bus_num
 * @bus_num: the master's bus number
D
David Brownell 已提交
1745
 * Context: can sleep
1746 1747 1748 1749 1750 1751 1752 1753
 *
 * This call may be used with devices that are registered after
 * arch init time.  It returns a refcounted pointer to the relevant
 * spi_master (which the caller must release), or NULL if there is
 * no such master registered.
 */
struct spi_master *spi_busnum_to_master(u16 bus_num)
{
T
Tony Jones 已提交
1754
	struct device		*dev;
1755
	struct spi_master	*master = NULL;
D
Dave Young 已提交
1756

1757
	dev = class_find_device(&spi_master_class, NULL, &bus_num,
D
Dave Young 已提交
1758 1759 1760 1761
				__spi_master_match);
	if (dev)
		master = container_of(dev, struct spi_master, dev);
	/* reference got in class_find_device */
1762
	return master;
1763 1764 1765 1766 1767 1768
}
EXPORT_SYMBOL_GPL(spi_busnum_to_master);


/*-------------------------------------------------------------------------*/

1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792
/* Core methods for SPI master protocol drivers.  Some of the
 * other core methods are currently defined as inline functions.
 */

/**
 * spi_setup - setup SPI mode and clock rate
 * @spi: the device whose settings are being modified
 * Context: can sleep, and no requests are queued to the device
 *
 * SPI protocol drivers may need to update the transfer mode if the
 * device doesn't work with its default.  They may likewise need
 * to update clock rates or word sizes from initial values.  This function
 * changes those settings, and must be called from a context that can sleep.
 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
 * effect the next time the device is selected and data is transferred to
 * or from it.  When this function returns, the spi device is deselected.
 *
 * Note that this call will fail if the protocol driver specifies an option
 * that the underlying controller or its driver does not support.  For
 * example, not all hardware supports wire transfers using nine bit words,
 * LSB-first wire encoding, or active-high chipselects.
 */
int spi_setup(struct spi_device *spi)
{
1793
	unsigned	bad_bits, ugly_bits;
1794
	int		status = 0;
1795

W
wangyuhang 已提交
1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808
	/* check mode to prevent that DUAL and QUAD set at the same time
	 */
	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
		dev_err(&spi->dev,
		"setup: can not select dual and quad at the same time\n");
		return -EINVAL;
	}
	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
	 */
	if ((spi->mode & SPI_3WIRE) && (spi->mode &
		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
		return -EINVAL;
1809 1810 1811 1812
	/* help drivers fail *cleanly* when they need options
	 * that aren't supported with their current master
	 */
	bad_bits = spi->mode & ~spi->master->mode_bits;
1813 1814 1815 1816 1817 1818 1819 1820 1821
	ugly_bits = bad_bits &
		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
	if (ugly_bits) {
		dev_warn(&spi->dev,
			 "setup: ignoring unsupported mode bits %x\n",
			 ugly_bits);
		spi->mode &= ~ugly_bits;
		bad_bits &= ~ugly_bits;
	}
1822
	if (bad_bits) {
1823
		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1824 1825 1826 1827
			bad_bits);
		return -EINVAL;
	}

1828 1829 1830
	if (!spi->bits_per_word)
		spi->bits_per_word = 8;

1831 1832 1833
	if (!spi->max_speed_hz)
		spi->max_speed_hz = spi->master->max_speed_hz;

1834 1835
	if (spi->master->setup)
		status = spi->master->setup(spi);
1836

J
Jingoo Han 已提交
1837
	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
			(spi->mode & SPI_LOOP) ? "loopback, " : "",
			spi->bits_per_word, spi->max_speed_hz,
			status);

	return status;
}
EXPORT_SYMBOL_GPL(spi_setup);

1850
static int __spi_validate(struct spi_device *spi, struct spi_message *message)
1851 1852
{
	struct spi_master *master = spi->master;
1853
	struct spi_transfer *xfer;
1854
	int w_size;
1855

1856 1857 1858
	if (list_empty(&message->transfers))
		return -EINVAL;

1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
	/* Half-duplex links include original MicroWire, and ones with
	 * only one data pin like SPI_3WIRE (switches direction) or where
	 * either MOSI or MISO is missing.  They can also be caused by
	 * software limitations.
	 */
	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
			|| (spi->mode & SPI_3WIRE)) {
		unsigned flags = master->flags;

		list_for_each_entry(xfer, &message->transfers, transfer_list) {
			if (xfer->rx_buf && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
				return -EINVAL;
			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
				return -EINVAL;
		}
	}

1878
	/**
1879 1880
	 * Set transfer bits_per_word and max speed as spi device default if
	 * it is not set for this transfer.
W
wangyuhang 已提交
1881 1882
	 * Set transfer tx_nbits and rx_nbits as single transfer default
	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1883 1884
	 */
	list_for_each_entry(xfer, &message->transfers, transfer_list) {
1885
		message->frame_length += xfer->len;
1886 1887
		if (!xfer->bits_per_word)
			xfer->bits_per_word = spi->bits_per_word;
1888 1889

		if (!xfer->speed_hz)
1890
			xfer->speed_hz = spi->max_speed_hz;
1891 1892 1893 1894

		if (master->max_speed_hz &&
		    xfer->speed_hz > master->max_speed_hz)
			xfer->speed_hz = master->max_speed_hz;
1895

1896 1897 1898 1899 1900 1901 1902 1903
		if (master->bits_per_word_mask) {
			/* Only 32 bits fit in the mask */
			if (xfer->bits_per_word > 32)
				return -EINVAL;
			if (!(master->bits_per_word_mask &
					BIT(xfer->bits_per_word - 1)))
				return -EINVAL;
		}
1904

1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916
		/*
		 * SPI transfer length should be multiple of SPI word size
		 * where SPI word size should be power-of-two multiple
		 */
		if (xfer->bits_per_word <= 8)
			w_size = 1;
		else if (xfer->bits_per_word <= 16)
			w_size = 2;
		else
			w_size = 4;

		/* No partial transfers accepted */
1917
		if (xfer->len % w_size)
1918 1919
			return -EINVAL;

1920 1921 1922
		if (xfer->speed_hz && master->min_speed_hz &&
		    xfer->speed_hz < master->min_speed_hz)
			return -EINVAL;
W
wangyuhang 已提交
1923 1924 1925 1926 1927 1928

		if (xfer->tx_buf && !xfer->tx_nbits)
			xfer->tx_nbits = SPI_NBITS_SINGLE;
		if (xfer->rx_buf && !xfer->rx_nbits)
			xfer->rx_nbits = SPI_NBITS_SINGLE;
		/* check transfer tx/rx_nbits:
1929 1930
		 * 1. check the value matches one of single, dual and quad
		 * 2. check tx/rx_nbits match the mode in spi_device
W
wangyuhang 已提交
1931
		 */
1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
		if (xfer->tx_buf) {
			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
				xfer->tx_nbits != SPI_NBITS_DUAL &&
				xfer->tx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
				return -EINVAL;
			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_TX_QUAD))
				return -EINVAL;
		}
W
wangyuhang 已提交
1944
		/* check transfer rx_nbits */
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956
		if (xfer->rx_buf) {
			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
				xfer->rx_nbits != SPI_NBITS_DUAL &&
				xfer->rx_nbits != SPI_NBITS_QUAD)
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
				return -EINVAL;
			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
				!(spi->mode & SPI_RX_QUAD))
				return -EINVAL;
		}
1957 1958
	}

1959
	message->status = -EINPROGRESS;
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971

	return 0;
}

static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;

	message->spi = spi;

	trace_spi_message_submit(message);

1972 1973 1974
	return master->transfer(spi, message);
}

D
David Brownell 已提交
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006
/**
 * spi_async - asynchronous SPI transfer
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
2007 2008
	int ret;
	unsigned long flags;
D
David Brownell 已提交
2009

2010 2011 2012 2013
	ret = __spi_validate(spi, message);
	if (ret != 0)
		return ret;

2014
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
D
David Brownell 已提交
2015

2016 2017 2018 2019
	if (master->bus_lock_flag)
		ret = -EBUSY;
	else
		ret = __spi_async(spi, message);
D
David Brownell 已提交
2020

2021 2022 2023
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;
D
David Brownell 已提交
2024 2025 2026
}
EXPORT_SYMBOL_GPL(spi_async);

2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
/**
 * spi_async_locked - version of spi_async with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers, including completion callback
 * Context: any (irqs may be blocked, etc)
 *
 * This call may be used in_irq and other contexts which can't sleep,
 * as well as from task contexts which can sleep.
 *
 * The completion callback is invoked in a context which can't sleep.
 * Before that invocation, the value of message->status is undefined.
 * When the callback is issued, message->status holds either zero (to
 * indicate complete success) or a negative error code.  After that
 * callback returns, the driver which issued the transfer request may
 * deallocate the associated memory; it's no longer in use by any SPI
 * core or controller driver code.
 *
 * Note that although all messages to a spi_device are handled in
 * FIFO order, messages may go to different devices in other orders.
 * Some device might be higher priority, or have various "hard" access
 * time requirements, for example.
 *
 * On detection of any fault during the transfer, processing of
 * the entire message is aborted, and the device is deselected.
 * Until returning from the associated message completion callback,
 * no other spi_message queued to that device will be processed.
 * (This rule applies equally to all the synchronous transfer calls,
 * which are wrappers around this core asynchronous primitive.)
 */
int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
	struct spi_master *master = spi->master;
	int ret;
	unsigned long flags;

2062 2063 2064 2065
	ret = __spi_validate(spi, message);
	if (ret != 0)
		return ret;

2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
	spin_lock_irqsave(&master->bus_lock_spinlock, flags);

	ret = __spi_async(spi, message);

	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	return ret;

}
EXPORT_SYMBOL_GPL(spi_async_locked);

2077 2078 2079 2080 2081 2082 2083 2084

/*-------------------------------------------------------------------------*/

/* Utility methods for SPI master protocol drivers, layered on
 * top of the core.  Some other utility methods are defined as
 * inline functions.
 */

2085 2086 2087 2088 2089
static void spi_complete(void *arg)
{
	complete(arg);
}

2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
static int __spi_sync(struct spi_device *spi, struct spi_message *message,
		      int bus_locked)
{
	DECLARE_COMPLETION_ONSTACK(done);
	int status;
	struct spi_master *master = spi->master;

	message->complete = spi_complete;
	message->context = &done;

	if (!bus_locked)
		mutex_lock(&master->bus_lock_mutex);

	status = spi_async_locked(spi, message);

	if (!bus_locked)
		mutex_unlock(&master->bus_lock_mutex);

	if (status == 0) {
		wait_for_completion(&done);
		status = message->status;
	}
	message->context = NULL;
	return status;
}

2116 2117 2118 2119
/**
 * spi_sync - blocking/synchronous SPI data transfers
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
D
David Brownell 已提交
2120
 * Context: can sleep
2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * Note that the SPI device's chip select is active during the message,
 * and then is normally disabled between messages.  Drivers for some
 * frequently-used devices may want to minimize costs of selecting a chip,
 * by leaving it selected in anticipation that the next message will go
 * to the same chip.  (That may increase power usage.)
 *
D
David Brownell 已提交
2132 2133 2134
 * Also, the caller is guaranteeing that the memory associated with the
 * message will not be freed before this call returns.
 *
2135
 * It returns zero on success, else a negative error code.
2136 2137 2138
 */
int spi_sync(struct spi_device *spi, struct spi_message *message)
{
2139
	return __spi_sync(spi, message, 0);
2140 2141 2142
}
EXPORT_SYMBOL_GPL(spi_sync);

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
/**
 * spi_sync_locked - version of spi_sync with exclusive bus usage
 * @spi: device with which data will be exchanged
 * @message: describes the data transfers
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.  Low-overhead controller
 * drivers may DMA directly into and out of the message buffers.
 *
 * This call should be used by drivers that require exclusive access to the
L
Lucas De Marchi 已提交
2154
 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
 * be released by a spi_bus_unlock call when the exclusive access is over.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
{
	return __spi_sync(spi, message, 1);
}
EXPORT_SYMBOL_GPL(spi_sync_locked);

/**
 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
 * @master: SPI bus master that should be locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call should be used by drivers that require exclusive access to the
 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
 * exclusive access is over. Data transfer must be done by spi_sync_locked
 * and spi_async_locked calls when the SPI bus lock is held.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_lock(struct spi_master *master)
{
	unsigned long flags;

	mutex_lock(&master->bus_lock_mutex);

	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
	master->bus_lock_flag = 1;
	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);

	/* mutex remains locked until spi_bus_unlock is called */

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_lock);

/**
 * spi_bus_unlock - release the lock for exclusive SPI bus usage
 * @master: SPI bus master that was locked for exclusive bus access
 * Context: can sleep
 *
 * This call may only be used from a context that may sleep.  The sleep
 * is non-interruptible, and has no timeout.
 *
 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
 * call.
 *
 * It returns zero on success, else a negative error code.
 */
int spi_bus_unlock(struct spi_master *master)
{
	master->bus_lock_flag = 0;

	mutex_unlock(&master->bus_lock_mutex);

	return 0;
}
EXPORT_SYMBOL_GPL(spi_bus_unlock);

2219
/* portable code must never pass more than 32 bytes */
J
Jingoo Han 已提交
2220
#define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
2221 2222 2223 2224 2225 2226 2227 2228

static u8	*buf;

/**
 * spi_write_then_read - SPI synchronous write followed by read
 * @spi: device with which data will be exchanged
 * @txbuf: data to be written (need not be dma-safe)
 * @n_tx: size of txbuf, in bytes
2229 2230
 * @rxbuf: buffer into which data will be read (need not be dma-safe)
 * @n_rx: size of rxbuf, in bytes
D
David Brownell 已提交
2231
 * Context: can sleep
2232 2233 2234 2235
 *
 * This performs a half duplex MicroWire style transaction with the
 * device, sending txbuf and then reading rxbuf.  The return value
 * is zero for success, else a negative errno status code.
2236
 * This call may only be used from a context that may sleep.
2237
 *
D
David Brownell 已提交
2238
 * Parameters to this routine are always copied using a small buffer;
D
David Brownell 已提交
2239 2240
 * portable code should never use this for more than 32 bytes.
 * Performance-sensitive or bulk transfer code should instead use
D
David Brownell 已提交
2241
 * spi_{async,sync}() calls with dma-safe buffers.
2242 2243
 */
int spi_write_then_read(struct spi_device *spi,
2244 2245
		const void *txbuf, unsigned n_tx,
		void *rxbuf, unsigned n_rx)
2246
{
D
David Brownell 已提交
2247
	static DEFINE_MUTEX(lock);
2248 2249 2250

	int			status;
	struct spi_message	message;
2251
	struct spi_transfer	x[2];
2252 2253
	u8			*local_buf;

2254 2255 2256 2257
	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
	 * copying here, (as a pure convenience thing), but we can
	 * keep heap costs out of the hot path unless someone else is
	 * using the pre-allocated buffer or the transfer is too large.
2258
	 */
2259
	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
2260 2261
		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
				    GFP_KERNEL | GFP_DMA);
2262 2263 2264 2265 2266
		if (!local_buf)
			return -ENOMEM;
	} else {
		local_buf = buf;
	}
2267

2268
	spi_message_init(&message);
J
Jingoo Han 已提交
2269
	memset(x, 0, sizeof(x));
2270 2271 2272 2273 2274 2275 2276 2277
	if (n_tx) {
		x[0].len = n_tx;
		spi_message_add_tail(&x[0], &message);
	}
	if (n_rx) {
		x[1].len = n_rx;
		spi_message_add_tail(&x[1], &message);
	}
2278

2279
	memcpy(local_buf, txbuf, n_tx);
2280 2281
	x[0].tx_buf = local_buf;
	x[1].rx_buf = local_buf + n_tx;
2282 2283 2284

	/* do the i/o */
	status = spi_sync(spi, &message);
2285
	if (status == 0)
2286
		memcpy(rxbuf, x[1].rx_buf, n_rx);
2287

2288
	if (x[0].tx_buf == buf)
D
David Brownell 已提交
2289
		mutex_unlock(&lock);
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300
	else
		kfree(local_buf);

	return status;
}
EXPORT_SYMBOL_GPL(spi_write_then_read);

/*-------------------------------------------------------------------------*/

static int __init spi_init(void)
{
2301 2302
	int	status;

2303
	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2304 2305 2306 2307 2308 2309 2310 2311
	if (!buf) {
		status = -ENOMEM;
		goto err0;
	}

	status = bus_register(&spi_bus_type);
	if (status < 0)
		goto err1;
2312

2313 2314 2315
	status = class_register(&spi_master_class);
	if (status < 0)
		goto err2;
2316
	return 0;
2317 2318 2319 2320 2321 2322 2323 2324

err2:
	bus_unregister(&spi_bus_type);
err1:
	kfree(buf);
	buf = NULL;
err0:
	return status;
2325
}
2326

2327 2328
/* board_info is normally registered in arch_initcall(),
 * but even essential drivers wait till later
2329 2330 2331 2332
 *
 * REVISIT only boardinfo really needs static linking. the rest (device and
 * driver registration) _could_ be dynamically linked (modular) ... costs
 * include needing to have boardinfo data structures be much more public.
2333
 */
2334
postcore_initcall(spi_init);
2335