spi-pxa2xx.c 48.4 KB
Newer Older
1 2
/*
 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3
 * Copyright (C) 2013, Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

16
#include <linux/bitops.h>
17 18 19 20 21
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
22
#include <linux/err.h>
23
#include <linux/interrupt.h>
24
#include <linux/kernel.h>
25
#include <linux/pci.h>
26
#include <linux/platform_device.h>
27
#include <linux/spi/pxa2xx_spi.h>
28 29
#include <linux/spi/spi.h>
#include <linux/delay.h>
30
#include <linux/gpio.h>
31
#include <linux/gpio/consumer.h>
32
#include <linux/slab.h>
33
#include <linux/clk.h>
34
#include <linux/pm_runtime.h>
35
#include <linux/acpi.h>
36

37
#include "spi-pxa2xx.h"
38 39

MODULE_AUTHOR("Stephen Street");
W
Will Newton 已提交
40
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
41
MODULE_LICENSE("GPL");
42
MODULE_ALIAS("platform:pxa2xx-spi");
43

44 45
#define TIMOUT_DFLT		1000

46 47 48 49 50 51 52 53
/*
 * for testing SSCR1 changes that require SSP restart, basically
 * everything except the service and interrupt enables, the pxa270 developer
 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
 * list, but the PXA255 dev man says all bits without really meaning the
 * service and interrupt enables
 */
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
54
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
55 56 57 58
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
59

60 61 62 63 64 65
#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF	\
				| QUARK_X1000_SSCR1_EFWR	\
				| QUARK_X1000_SSCR1_RFT		\
				| QUARK_X1000_SSCR1_TFT		\
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

66 67 68 69 70 71 72
#define CE4100_SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| CE4100_SSCR1_RFT | CE4100_SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

73 74 75
#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE	BIT(24)
#define LPSS_CS_CONTROL_SW_MODE			BIT(0)
#define LPSS_CS_CONTROL_CS_HIGH			BIT(1)
76 77
#define LPSS_CAPS_CS_EN_SHIFT			9
#define LPSS_CAPS_CS_EN_MASK			(0xf << LPSS_CAPS_CS_EN_SHIFT)
78

79 80 81 82 83 84 85
struct lpss_config {
	/* LPSS offset from drv_data->ioaddr */
	unsigned offset;
	/* Register offsets from drv_data->lpss_base or -1 */
	int reg_general;
	int reg_ssp;
	int reg_cs_ctrl;
86
	int reg_capabilities;
87 88 89 90
	/* FIFO thresholds */
	u32 rx_threshold;
	u32 tx_threshold_lo;
	u32 tx_threshold_hi;
91 92 93
	/* Chip select control */
	unsigned cs_sel_shift;
	unsigned cs_sel_mask;
94
	unsigned cs_num;
95 96 97 98 99 100 101 102 103
};

/* Keep these sorted with enum pxa_ssp_type */
static const struct lpss_config lpss_platforms[] = {
	{	/* LPSS_LPT_SSP */
		.offset = 0x800,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
104
		.reg_capabilities = -1,
105 106 107 108 109 110 111 112 113
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
	{	/* LPSS_BYT_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
114
		.reg_capabilities = -1,
115 116 117 118
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
119 120 121 122 123 124 125 126 127 128 129 130 131
	{	/* LPSS_BSW_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.reg_capabilities = -1,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
		.cs_sel_shift = 2,
		.cs_sel_mask = 1 << 2,
		.cs_num = 2,
	},
132 133 134 135 136
	{	/* LPSS_SPT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
137
		.reg_capabilities = -1,
138 139 140 141
		.rx_threshold = 1,
		.tx_threshold_lo = 32,
		.tx_threshold_hi = 56,
	},
142 143 144 145 146 147 148 149 150
	{	/* LPSS_BXT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
		.reg_capabilities = 0xfc,
		.rx_threshold = 1,
		.tx_threshold_lo = 16,
		.tx_threshold_hi = 48,
151 152
		.cs_sel_shift = 8,
		.cs_sel_mask = 3 << 8,
153
	},
154 155 156 157 158 159 160 161
};

static inline const struct lpss_config
*lpss_get_config(const struct driver_data *drv_data)
{
	return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
}

162 163
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
164 165 166
	switch (drv_data->ssp_type) {
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
167
	case LPSS_BSW_SSP:
168
	case LPSS_SPT_SSP:
169
	case LPSS_BXT_SSP:
170 171 172 173
		return true;
	default:
		return false;
	}
174 175
}

176 177 178 179 180
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
{
	return drv_data->ssp_type == QUARK_X1000_SSP;
}

181 182 183
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
184 185
	case QUARK_X1000_SSP:
		return QUARK_X1000_SSCR1_CHANGE_MASK;
186 187
	case CE4100_SSP:
		return CE4100_SSCR1_CHANGE_MASK;
188 189 190 191 192 193 194 195 196
	default:
		return SSCR1_CHANGE_MASK;
	}
}

static u32
pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
197 198
	case QUARK_X1000_SSP:
		return RX_THRESH_QUARK_X1000_DFLT;
199 200
	case CE4100_SSP:
		return RX_THRESH_CE4100_DFLT;
201 202 203 204 205 206 207 208 209 210
	default:
		return RX_THRESH_DFLT;
	}
}

static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
	u32 mask;

	switch (drv_data->ssp_type) {
211 212 213
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSSR_TFL_MASK;
		break;
214 215 216
	case CE4100_SSP:
		mask = CE4100_SSSR_TFL_MASK;
		break;
217 218 219 220 221
	default:
		mask = SSSR_TFL_MASK;
		break;
	}

222
	return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
223 224 225 226 227 228 229 230
}

static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
				     u32 *sccr1_reg)
{
	u32 mask;

	switch (drv_data->ssp_type) {
231 232 233
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSCR1_RFT;
		break;
234 235 236
	case CE4100_SSP:
		mask = CE4100_SSCR1_RFT;
		break;
237 238 239 240 241 242 243 244 245 246 247
	default:
		mask = SSCR1_RFT;
		break;
	}
	*sccr1_reg &= ~mask;
}

static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
				   u32 *sccr1_reg, u32 threshold)
{
	switch (drv_data->ssp_type) {
248 249 250
	case QUARK_X1000_SSP:
		*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
		break;
251 252 253
	case CE4100_SSP:
		*sccr1_reg |= CE4100_SSCR1_RxTresh(threshold);
		break;
254 255 256 257 258 259 260 261 262 263
	default:
		*sccr1_reg |= SSCR1_RxTresh(threshold);
		break;
	}
}

static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
				  u32 clk_div, u8 bits)
{
	switch (drv_data->ssp_type) {
264 265 266 267 268
	case QUARK_X1000_SSP:
		return clk_div
			| QUARK_X1000_SSCR0_Motorola
			| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
			| SSCR0_SSE;
269 270 271 272 273 274 275 276 277
	default:
		return clk_div
			| SSCR0_Motorola
			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
			| SSCR0_SSE
			| (bits > 16 ? SSCR0_EDSS : 0);
	}
}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303
/*
 * Read and write LPSS SSP private registers. Caller must first check that
 * is_lpss_ssp() returns true before these can be called.
 */
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
	WARN_ON(!drv_data->lpss_base);
	return readl(drv_data->lpss_base + offset);
}

static void __lpss_ssp_write_priv(struct driver_data *drv_data,
				  unsigned offset, u32 value)
{
	WARN_ON(!drv_data->lpss_base);
	writel(value, drv_data->lpss_base + offset);
}

/*
 * lpss_ssp_setup - perform LPSS SSP specific setup
 * @drv_data: pointer to the driver private data
 *
 * Perform LPSS SSP specific setup. This function must be called first if
 * one is going to use LPSS SSP private registers.
 */
static void lpss_ssp_setup(struct driver_data *drv_data)
{
304 305
	const struct lpss_config *config;
	u32 value;
306

307 308
	config = lpss_get_config(drv_data);
	drv_data->lpss_base = drv_data->ioaddr + config->offset;
309 310

	/* Enable software chip select control */
311
	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
312 313
	value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
	value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
314
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
315 316

	/* Enable multiblock DMA transfers */
317
	if (drv_data->master_info->enable_dma) {
318
		__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
319

320 321 322
		if (config->reg_general >= 0) {
			value = __lpss_ssp_read_priv(drv_data,
						     config->reg_general);
323
			value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
324 325 326
			__lpss_ssp_write_priv(drv_data,
					      config->reg_general, value);
		}
327
	}
328 329
}

330 331 332 333 334 335 336 337 338 339
static void lpss_ssp_select_cs(struct driver_data *drv_data,
			       const struct lpss_config *config)
{
	u32 value, cs;

	if (!config->cs_sel_mask)
		return;

	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);

340
	cs = drv_data->master->cur_msg->spi->chip_select;
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
	cs <<= config->cs_sel_shift;
	if (cs != (value & config->cs_sel_mask)) {
		/*
		 * When switching another chip select output active the
		 * output must be selected first and wait 2 ssp_clk cycles
		 * before changing state to active. Otherwise a short
		 * glitch will occur on the previous chip select since
		 * output select is latched but state control is not.
		 */
		value &= ~config->cs_sel_mask;
		value |= cs;
		__lpss_ssp_write_priv(drv_data,
				      config->reg_cs_ctrl, value);
		ndelay(1000000000 /
		       (drv_data->master->max_speed_hz / 2));
	}
}

359 360
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
361
	const struct lpss_config *config;
362
	u32 value;
363

364 365
	config = lpss_get_config(drv_data);

366 367 368
	if (enable)
		lpss_ssp_select_cs(drv_data, config);

369
	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
370
	if (enable)
371
		value &= ~LPSS_CS_CONTROL_CS_HIGH;
372
	else
373
		value |= LPSS_CS_CONTROL_CS_HIGH;
374
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
375 376
}

377 378
static void cs_assert(struct driver_data *drv_data)
{
379 380
	struct chip_data *chip =
		spi_get_ctldata(drv_data->master->cur_msg->spi);
381

382
	if (drv_data->ssp_type == CE4100_SSP) {
383
		pxa2xx_spi_write(drv_data, SSSR, chip->frm);
384 385 386
		return;
	}

387 388 389 390 391
	if (chip->cs_control) {
		chip->cs_control(PXA2XX_CS_ASSERT);
		return;
	}

392
	if (gpio_is_valid(chip->gpio_cs)) {
393
		gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
394 395 396
		return;
	}

397 398
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, true);
399 400 401 402
}

static void cs_deassert(struct driver_data *drv_data)
{
403 404
	struct chip_data *chip =
		spi_get_ctldata(drv_data->master->cur_msg->spi);
405

406 407 408
	if (drv_data->ssp_type == CE4100_SSP)
		return;

409
	if (chip->cs_control) {
410
		chip->cs_control(PXA2XX_CS_DEASSERT);
411 412 413
		return;
	}

414
	if (gpio_is_valid(chip->gpio_cs)) {
415
		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
416 417 418
		return;
	}

419 420
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, false);
421 422
}

423
int pxa2xx_spi_flush(struct driver_data *drv_data)
424 425 426 427
{
	unsigned long limit = loops_per_jiffy << 1;

	do {
428 429 430
		while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
			pxa2xx_spi_read(drv_data, SSDR);
	} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
431
	write_SSSR_CS(drv_data, SSSR_ROR);
432 433 434 435

	return limit;
}

436
static int null_writer(struct driver_data *drv_data)
437
{
438
	u8 n_bytes = drv_data->n_bytes;
439

440
	if (pxa2xx_spi_txfifo_full(drv_data)
441 442 443
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

444
	pxa2xx_spi_write(drv_data, SSDR, 0);
445 446 447
	drv_data->tx += n_bytes;

	return 1;
448 449
}

450
static int null_reader(struct driver_data *drv_data)
451
{
452
	u8 n_bytes = drv_data->n_bytes;
453

454 455 456
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		pxa2xx_spi_read(drv_data, SSDR);
457 458
		drv_data->rx += n_bytes;
	}
459 460

	return drv_data->rx == drv_data->rx_end;
461 462
}

463
static int u8_writer(struct driver_data *drv_data)
464
{
465
	if (pxa2xx_spi_txfifo_full(drv_data)
466 467 468
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

469
	pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
470 471 472
	++drv_data->tx;

	return 1;
473 474
}

475
static int u8_reader(struct driver_data *drv_data)
476
{
477 478 479
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
480 481
		++drv_data->rx;
	}
482 483

	return drv_data->rx == drv_data->rx_end;
484 485
}

486
static int u16_writer(struct driver_data *drv_data)
487
{
488
	if (pxa2xx_spi_txfifo_full(drv_data)
489 490 491
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

492
	pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
493 494 495
	drv_data->tx += 2;

	return 1;
496 497
}

498
static int u16_reader(struct driver_data *drv_data)
499
{
500 501 502
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
503 504
		drv_data->rx += 2;
	}
505 506

	return drv_data->rx == drv_data->rx_end;
507
}
508 509

static int u32_writer(struct driver_data *drv_data)
510
{
511
	if (pxa2xx_spi_txfifo_full(drv_data)
512 513 514
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

515
	pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
516 517 518
	drv_data->tx += 4;

	return 1;
519 520
}

521
static int u32_reader(struct driver_data *drv_data)
522
{
523 524 525
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
526 527
		drv_data->rx += 4;
	}
528 529

	return drv_data->rx == drv_data->rx_end;
530 531
}

532
void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
533
{
534
	struct spi_message *msg = drv_data->master->cur_msg;
535 536 537 538 539 540 541 542 543 544 545 546 547 548
	struct spi_transfer *trans = drv_data->cur_transfer;

	/* Move to next transfer */
	if (trans->transfer_list.next != &msg->transfers) {
		drv_data->cur_transfer =
			list_entry(trans->transfer_list.next,
					struct spi_transfer,
					transfer_list);
		return RUNNING_STATE;
	} else
		return DONE_STATE;
}

/* caller already set message->status; dma and pio irqs are blocked */
S
Stephen Street 已提交
549
static void giveback(struct driver_data *drv_data)
550 551
{
	struct spi_transfer* last_transfer;
S
Stephen Street 已提交
552
	struct spi_message *msg;
553
	unsigned long timeout;
554

555
	msg = drv_data->master->cur_msg;
S
Stephen Street 已提交
556 557
	drv_data->cur_transfer = NULL;

558
	last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
559 560
					transfer_list);

N
Ned Forrester 已提交
561 562 563 564
	/* Delay if requested before any change in chip select */
	if (last_transfer->delay_usecs)
		udelay(last_transfer->delay_usecs);

565 566 567 568 569 570
	/* Wait until SSP becomes idle before deasserting the CS */
	timeout = jiffies + msecs_to_jiffies(10);
	while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
	       !time_after(jiffies, timeout))
		cpu_relax();

N
Ned Forrester 已提交
571 572 573
	/* Drop chip select UNLESS cs_change is true or we are returning
	 * a message with an error, or next message is for another chip
	 */
574
	if (!last_transfer->cs_change)
575
		cs_deassert(drv_data);
N
Ned Forrester 已提交
576 577 578 579 580 581 582 583 584 585 586 587 588 589
	else {
		struct spi_message *next_msg;

		/* Holding of cs was hinted, but we need to make sure
		 * the next message is for the same chip.  Don't waste
		 * time with the following tests unless this was hinted.
		 *
		 * We cannot postpone this until pump_messages, because
		 * after calling msg->complete (below) the driver that
		 * sent the current message could be unloaded, which
		 * could invalidate the cs_control() callback...
		 */

		/* get a pointer to the next message, if any */
590
		next_msg = spi_get_next_queued_message(drv_data->master);
N
Ned Forrester 已提交
591 592 593 594

		/* see if the next and current messages point
		 * to the same chip
		 */
595 596
		if ((next_msg && next_msg->spi != msg->spi) ||
		    msg->state == ERROR_STATE)
597
			cs_deassert(drv_data);
N
Ned Forrester 已提交
598
	}
599

600
	spi_finalize_current_message(drv_data->master);
601 602
}

603 604
static void reset_sccr1(struct driver_data *drv_data)
{
605 606
	struct chip_data *chip =
		spi_get_ctldata(drv_data->master->cur_msg->spi);
607 608
	u32 sccr1_reg;

609
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
610 611 612 613
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
		break;
614 615 616
	case CE4100_SSP:
		sccr1_reg &= ~CE4100_SSCR1_RFT;
		break;
617 618 619 620
	default:
		sccr1_reg &= ~SSCR1_RFT;
		break;
	}
621
	sccr1_reg |= chip->threshold;
622
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
623 624
}

625
static void int_error_stop(struct driver_data *drv_data, const char* msg)
626
{
627
	/* Stop and reset SSP */
628
	write_SSSR_CS(drv_data, drv_data->clear_sr);
629
	reset_sccr1(drv_data);
630
	if (!pxa25x_ssp_comp(drv_data))
631
		pxa2xx_spi_write(drv_data, SSTO, 0);
632
	pxa2xx_spi_flush(drv_data);
633 634
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
635

636
	dev_err(&drv_data->pdev->dev, "%s\n", msg);
637

638
	drv_data->master->cur_msg->state = ERROR_STATE;
639 640
	tasklet_schedule(&drv_data->pump_transfers);
}
S
Stephen Street 已提交
641

642 643
static void int_transfer_complete(struct driver_data *drv_data)
{
644
	/* Clear and disable interrupts */
645
	write_SSSR_CS(drv_data, drv_data->clear_sr);
646
	reset_sccr1(drv_data);
647
	if (!pxa25x_ssp_comp(drv_data))
648
		pxa2xx_spi_write(drv_data, SSTO, 0);
649

L
Lucas De Marchi 已提交
650
	/* Update total byte transferred return count actual bytes read */
651
	drv_data->master->cur_msg->actual_length += drv_data->len -
652
				(drv_data->rx_end - drv_data->rx);
653

N
Ned Forrester 已提交
654 655 656
	/* Transfer delays and chip select release are
	 * handled in pump_transfers or giveback
	 */
657

658
	/* Move to next transfer */
659
	drv_data->master->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
660

661 662 663
	/* Schedule transfer tasklet */
	tasklet_schedule(&drv_data->pump_transfers);
}
664

665 666
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
667 668
	u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
		       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
669

670
	u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
671

672 673 674 675
	if (irq_status & SSSR_ROR) {
		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
		return IRQ_HANDLED;
	}
676

677
	if (irq_status & SSSR_TINT) {
678
		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
679 680 681 682 683
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	}
684

685 686 687 688 689 690 691
	/* Drain rx fifo, Fill tx fifo and prevent overruns */
	do {
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	} while (drv_data->write(drv_data));
692

693 694 695 696
	if (drv_data->read(drv_data)) {
		int_transfer_complete(drv_data);
		return IRQ_HANDLED;
	}
697

698
	if (drv_data->tx == drv_data->tx_end) {
699 700 701
		u32 bytes_left;
		u32 sccr1_reg;

702
		sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
703 704 705 706
		sccr1_reg &= ~SSCR1_TIE;

		/*
		 * PXA25x_SSP has no timeout, set up rx threshould for the
L
Lucas De Marchi 已提交
707
		 * remaining RX bytes.
708
		 */
709
		if (pxa25x_ssp_comp(drv_data)) {
710
			u32 rx_thre;
711

712
			pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
713 714 715 716 717 718 719

			bytes_left = drv_data->rx_end - drv_data->rx;
			switch (drv_data->n_bytes) {
			case 4:
				bytes_left >>= 1;
			case 2:
				bytes_left >>= 1;
720
			}
721

722 723 724
			rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
			if (rx_thre > bytes_left)
				rx_thre = bytes_left;
725

726
			pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
727
		}
728
		pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
729 730
	}

S
Stephen Street 已提交
731 732
	/* We did something */
	return IRQ_HANDLED;
733 734
}

735
static irqreturn_t ssp_int(int irq, void *dev_id)
736
{
737
	struct driver_data *drv_data = dev_id;
738
	u32 sccr1_reg;
739 740 741
	u32 mask = drv_data->mask_sr;
	u32 status;

742 743 744 745 746 747 748 749 750
	/*
	 * The IRQ might be shared with other peripherals so we must first
	 * check that are we RPM suspended or not. If we are we assume that
	 * the IRQ was not for us (we shouldn't be RPM suspended when the
	 * interrupt is enabled).
	 */
	if (pm_runtime_suspended(&drv_data->pdev->dev))
		return IRQ_NONE;

751 752 753 754 755 756
	/*
	 * If the device is not yet in RPM suspended state and we get an
	 * interrupt that is meant for another device, check if status bits
	 * are all set to one. That means that the device is already
	 * powered off.
	 */
757
	status = pxa2xx_spi_read(drv_data, SSSR);
758 759 760
	if (status == ~0)
		return IRQ_NONE;

761
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
762 763 764 765 766

	/* Ignore possible writes if we don't need to write */
	if (!(sccr1_reg & SSCR1_TIE))
		mask &= ~SSSR_TFS;

767 768 769 770
	/* Ignore RX timeout interrupt if it is disabled */
	if (!(sccr1_reg & SSCR1_TINTE))
		mask &= ~SSSR_TINT;

771 772
	if (!(status & mask))
		return IRQ_NONE;
773

774
	if (!drv_data->master->cur_msg) {
S
Stephen Street 已提交
775

776 777 778 779 780 781
		pxa2xx_spi_write(drv_data, SSCR0,
				 pxa2xx_spi_read(drv_data, SSCR0)
				 & ~SSCR0_SSE);
		pxa2xx_spi_write(drv_data, SSCR1,
				 pxa2xx_spi_read(drv_data, SSCR1)
				 & ~drv_data->int_cr1);
782
		if (!pxa25x_ssp_comp(drv_data))
783
			pxa2xx_spi_write(drv_data, SSTO, 0);
784
		write_SSSR_CS(drv_data, drv_data->clear_sr);
S
Stephen Street 已提交
785

786 787
		dev_err(&drv_data->pdev->dev,
			"bad message state in interrupt handler\n");
S
Stephen Street 已提交
788

789 790 791 792 793 794 795
		/* Never fail */
		return IRQ_HANDLED;
	}

	return drv_data->transfer_handler(drv_data);
}

796
/*
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
 * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
 * input frequency by fractions of 2^24. It also has a divider by 5.
 *
 * There are formulas to get baud rate value for given input frequency and
 * divider parameters, such as DDS_CLK_RATE and SCR:
 *
 * Fsys = 200MHz
 *
 * Fssp = Fsys * DDS_CLK_RATE / 2^24			(1)
 * Baud rate = Fsclk = Fssp / (2 * (SCR + 1))		(2)
 *
 * DDS_CLK_RATE either 2^n or 2^n / 5.
 * SCR is in range 0 .. 255
 *
 * Divisor = 5^i * 2^j * 2 * k
 *       i = [0, 1]      i = 1 iff j = 0 or j > 3
 *       j = [0, 23]     j = 0 iff i = 1
 *       k = [1, 256]
 * Special case: j = 0, i = 1: Divisor = 2 / 5
 *
 * Accordingly to the specification the recommended values for DDS_CLK_RATE
 * are:
 *	Case 1:		2^n, n = [0, 23]
 *	Case 2:		2^24 * 2 / 5 (0x666666)
 *	Case 3:		less than or equal to 2^24 / 5 / 16 (0x33333)
 *
 * In all cases the lowest possible value is better.
 *
 * The function calculates parameters for all cases and chooses the one closest
 * to the asked baud rate.
827
 */
828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
	unsigned long xtal = 200000000;
	unsigned long fref = xtal / 2;		/* mandatory division by 2,
						   see (2) */
						/* case 3 */
	unsigned long fref1 = fref / 2;		/* case 1 */
	unsigned long fref2 = fref * 2 / 5;	/* case 2 */
	unsigned long scale;
	unsigned long q, q1, q2;
	long r, r1, r2;
	u32 mul;

	/* Case 1 */

	/* Set initial value for DDS_CLK_RATE */
	mul = (1 << 24) >> 1;

	/* Calculate initial quot */
847
	q1 = DIV_ROUND_UP(fref1, rate);
848 849 850 851 852 853 854 855

	/* Scale q1 if it's too big */
	if (q1 > 256) {
		/* Scale q1 to range [1, 512] */
		scale = fls_long(q1 - 1);
		if (scale > 9) {
			q1 >>= scale - 9;
			mul >>= scale - 9;
856
		}
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871

		/* Round the result if we have a remainder */
		q1 += q1 & 1;
	}

	/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
	scale = __ffs(q1);
	q1 >>= scale;
	mul >>= scale;

	/* Get the remainder */
	r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);

	/* Case 2 */

872
	q2 = DIV_ROUND_UP(fref2, rate);
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
	r2 = abs(fref2 / q2 - rate);

	/*
	 * Choose the best between two: less remainder we have the better. We
	 * can't go case 2 if q2 is greater than 256 since SCR register can
	 * hold only values 0 .. 255.
	 */
	if (r2 >= r1 || q2 > 256) {
		/* case 1 is better */
		r = r1;
		q = q1;
	} else {
		/* case 2 is better */
		r = r2;
		q = q2;
		mul = (1 << 24) * 2 / 5;
889 890
	}

891
	/* Check case 3 only if the divisor is big enough */
892 893 894 895 896
	if (fref / rate >= 80) {
		u64 fssp;
		u32 m;

		/* Calculate initial quot */
897
		q1 = DIV_ROUND_UP(fref, rate);
898 899 900 901 902 903 904 905 906 907 908 909 910 911
		m = (1 << 24) / q1;

		/* Get the remainder */
		fssp = (u64)fref * m;
		do_div(fssp, 1 << 24);
		r1 = abs(fssp - rate);

		/* Choose this one if it suits better */
		if (r1 < r) {
			/* case 3 is better */
			q = 1;
			mul = m;
		}
	}
912

913 914
	*dds = mul;
	return q - 1;
915 916
}

917
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
918
{
919
	unsigned long ssp_clk = drv_data->master->max_speed_hz;
920 921 922
	const struct ssp_device *ssp = drv_data->ssp;

	rate = min_t(int, ssp_clk, rate);
923

924
	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
925
		return (ssp_clk / (2 * rate) - 1) & 0xff;
926
	else
927
		return (ssp_clk / rate - 1) & 0xfff;
928 929
}

930
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
931
					   int rate)
932
{
933 934
	struct chip_data *chip =
		spi_get_ctldata(drv_data->master->cur_msg->spi);
935
	unsigned int clk_div;
936 937 938

	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
939
		clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
940
		break;
941
	default:
942
		clk_div = ssp_get_clk_div(drv_data, rate);
943
		break;
944
	}
945
	return clk_div << 8;
946 947
}

948 949 950 951 952 953 954 955 956 957 958
static bool pxa2xx_spi_can_dma(struct spi_master *master,
			       struct spi_device *spi,
			       struct spi_transfer *xfer)
{
	struct chip_data *chip = spi_get_ctldata(spi);

	return chip->enable_dma &&
	       xfer->len <= MAX_DMA_LEN &&
	       xfer->len >= chip->dma_burst_size;
}

959 960 961
static void pump_transfers(unsigned long data)
{
	struct driver_data *drv_data = (struct driver_data *)data;
962
	struct spi_master *master = drv_data->master;
963
	struct spi_message *message = master->cur_msg;
964 965 966 967
	struct chip_data *chip = spi_get_ctldata(message->spi);
	u32 dma_thresh = chip->dma_threshold;
	u32 dma_burst = chip->dma_burst_size;
	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
968 969 970 971 972
	struct spi_transfer *transfer;
	struct spi_transfer *previous;
	u32 clk_div;
	u8 bits;
	u32 speed;
973
	u32 cr0;
974
	u32 cr1;
975
	int err;
976
	int dma_mapped;
977 978 979 980 981 982 983

	/* Get current state information */
	transfer = drv_data->cur_transfer;

	/* Handle for abort */
	if (message->state == ERROR_STATE) {
		message->status = -EIO;
S
Stephen Street 已提交
984
		giveback(drv_data);
985 986 987 988 989 990
		return;
	}

	/* Handle end of message */
	if (message->state == DONE_STATE) {
		message->status = 0;
S
Stephen Street 已提交
991
		giveback(drv_data);
992 993 994
		return;
	}

N
Ned Forrester 已提交
995
	/* Delay if requested at end of transfer before CS change */
996 997 998 999 1000 1001
	if (message->state == RUNNING_STATE) {
		previous = list_entry(transfer->transfer_list.prev,
					struct spi_transfer,
					transfer_list);
		if (previous->delay_usecs)
			udelay(previous->delay_usecs);
N
Ned Forrester 已提交
1002 1003 1004

		/* Drop chip select only if cs_change is requested */
		if (previous->cs_change)
1005
			cs_deassert(drv_data);
1006 1007
	}

1008
	/* Check if we can DMA this transfer */
1009
	if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
N
Ned Forrester 已提交
1010 1011 1012 1013 1014

		/* reject already-mapped transfers; PIO won't always work */
		if (message->is_dma_mapped
				|| transfer->rx_dma || transfer->tx_dma) {
			dev_err(&drv_data->pdev->dev,
1015 1016
				"pump_transfers: mapped transfer length of "
				"%u is greater than %d\n",
N
Ned Forrester 已提交
1017 1018 1019 1020 1021 1022 1023
				transfer->len, MAX_DMA_LEN);
			message->status = -EINVAL;
			giveback(drv_data);
			return;
		}

		/* warn ... we force this to PIO mode */
1024 1025 1026 1027
		dev_warn_ratelimited(&message->spi->dev,
				     "pump_transfers: DMA disabled for transfer length %ld "
				     "greater than %d\n",
				     (long)drv_data->len, MAX_DMA_LEN);
1028 1029
	}

1030
	/* Setup the transfer state based on the type of transfer */
1031
	if (pxa2xx_spi_flush(drv_data) == 0) {
1032 1033
		dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
		message->status = -EIO;
S
Stephen Street 已提交
1034
		giveback(drv_data);
1035 1036
		return;
	}
1037
	drv_data->n_bytes = chip->n_bytes;
1038 1039 1040 1041
	drv_data->tx = (void *)transfer->tx_buf;
	drv_data->tx_end = drv_data->tx + transfer->len;
	drv_data->rx = transfer->rx_buf;
	drv_data->rx_end = drv_data->rx + transfer->len;
1042
	drv_data->len = transfer->len;
1043 1044
	drv_data->write = drv_data->tx ? chip->write : null_writer;
	drv_data->read = drv_data->rx ? chip->read : null_reader;
1045 1046

	/* Change speed and bit per word on a per transfer */
1047 1048 1049
	bits = transfer->bits_per_word;
	speed = transfer->speed_hz;

1050
	clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069

	if (bits <= 8) {
		drv_data->n_bytes = 1;
		drv_data->read = drv_data->read != null_reader ?
					u8_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u8_writer : null_writer;
	} else if (bits <= 16) {
		drv_data->n_bytes = 2;
		drv_data->read = drv_data->read != null_reader ?
					u16_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u16_writer : null_writer;
	} else if (bits <= 32) {
		drv_data->n_bytes = 4;
		drv_data->read = drv_data->read != null_reader ?
					u32_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u32_writer : null_writer;
1070
	}
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	/*
	 * if bits/word is changed in dma mode, then must check the
	 * thresholds and burst also
	 */
	if (chip->enable_dma) {
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
						message->spi,
						bits, &dma_burst,
						&dma_thresh))
			dev_warn_ratelimited(&message->spi->dev,
					     "pump_transfers: DMA burst size reduced to match bits_per_word\n");
1082 1083
	}

1084 1085
	message->state = RUNNING_STATE;

1086 1087 1088 1089
	dma_mapped = master->can_dma &&
		     master->can_dma(master, message->spi, transfer) &&
		     master->cur_msg_mapped;
	if (dma_mapped) {
1090 1091

		/* Ensure we have the correct interrupt handler */
1092 1093
		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;

1094 1095 1096 1097 1098 1099
		err = pxa2xx_spi_dma_prepare(drv_data, dma_burst);
		if (err) {
			message->status = err;
			giveback(drv_data);
			return;
		}
1100

1101 1102
		/* Clear status and start DMA engine */
		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1103
		pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
1104 1105

		pxa2xx_spi_dma_start(drv_data);
1106 1107 1108 1109
	} else {
		/* Ensure we have the correct interrupt handler	*/
		drv_data->transfer_handler = interrupt_transfer;

1110 1111
		/* Clear status  */
		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
1112
		write_SSSR_CS(drv_data, drv_data->clear_sr);
1113 1114
	}

1115 1116 1117 1118
	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
	cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
	if (!pxa25x_ssp_comp(drv_data))
		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1119
			master->max_speed_hz
1120
				/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
1121
			dma_mapped ? "DMA" : "PIO");
1122 1123
	else
		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1124
			master->max_speed_hz / 2
1125
				/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1126
			dma_mapped ? "DMA" : "PIO");
1127

1128
	if (is_lpss_ssp(drv_data)) {
1129 1130 1131 1132 1133 1134 1135 1136
		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
		    != chip->lpss_rx_threshold)
			pxa2xx_spi_write(drv_data, SSIRF,
					 chip->lpss_rx_threshold);
		if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
		    != chip->lpss_tx_threshold)
			pxa2xx_spi_write(drv_data, SSITF,
					 chip->lpss_tx_threshold);
1137 1138
	}

1139
	if (is_quark_x1000_ssp(drv_data) &&
1140 1141
	    (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
		pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
1142

1143
	/* see if we need to reload the config registers */
1144 1145 1146
	if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
	    || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
	    != (cr1 & change_mask)) {
1147
		/* stop the SSP, and update the other bits */
1148
		pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
1149
		if (!pxa25x_ssp_comp(drv_data))
1150
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1151
		/* first set CR1 without interrupt and service enables */
1152
		pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
1153
		/* restart the SSP */
1154
		pxa2xx_spi_write(drv_data, SSCR0, cr0);
1155

1156
	} else {
1157
		if (!pxa25x_ssp_comp(drv_data))
1158
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1159
	}
1160

1161
	cs_assert(drv_data);
1162 1163 1164

	/* after chip select, release the data by enabling service
	 * requests and interrupts, without changing any mode bits */
1165
	pxa2xx_spi_write(drv_data, SSCR1, cr1);
1166 1167
}

1168 1169
static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
					   struct spi_message *msg)
1170
{
1171
	struct driver_data *drv_data = spi_master_get_devdata(master);
1172 1173

	/* Initial message state*/
1174 1175
	msg->state = START_STATE;
	drv_data->cur_transfer = list_entry(msg->transfers.next,
1176 1177 1178 1179 1180 1181 1182 1183
						struct spi_transfer,
						transfer_list);

	/* Mark as busy and launch transfers */
	tasklet_schedule(&drv_data->pump_transfers);
	return 0;
}

1184 1185 1186 1187 1188
static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
	struct driver_data *drv_data = spi_master_get_devdata(master);

	/* Disable the SSP now */
1189 1190
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1191 1192 1193 1194

	return 0;
}

1195 1196 1197
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
		    struct pxa2xx_spi_chip *chip_info)
{
1198
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1199 1200
	int err = 0;

1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
	if (chip == NULL)
		return 0;

	if (drv_data->cs_gpiods) {
		struct gpio_desc *gpiod;

		gpiod = drv_data->cs_gpiods[spi->chip_select];
		if (gpiod) {
			chip->gpio_cs = desc_to_gpio(gpiod);
			chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
			gpiod_set_value(gpiod, chip->gpio_cs_inverted);
		}

		return 0;
	}

	if (chip_info == NULL)
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
		return 0;

	/* NOTE: setup() can be called multiple times, possibly with
	 * different chip_info, release previously requested GPIO
	 */
	if (gpio_is_valid(chip->gpio_cs))
		gpio_free(chip->gpio_cs);

	/* If (*cs_control) is provided, ignore GPIO chip select */
	if (chip_info->cs_control) {
		chip->cs_control = chip_info->cs_control;
		return 0;
	}

	if (gpio_is_valid(chip_info->gpio_cs)) {
		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
		if (err) {
1235 1236
			dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
				chip_info->gpio_cs);
1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
			return err;
		}

		chip->gpio_cs = chip_info->gpio_cs;
		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;

		err = gpio_direction_output(chip->gpio_cs,
					!chip->gpio_cs_inverted);
	}

	return err;
}

1250 1251
static int setup(struct spi_device *spi)
{
1252
	struct pxa2xx_spi_chip *chip_info;
1253
	struct chip_data *chip;
1254
	const struct lpss_config *config;
1255
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1256 1257
	uint tx_thres, tx_hi_thres, rx_thres;

1258 1259 1260 1261 1262 1263
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		tx_thres = TX_THRESH_QUARK_X1000_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_QUARK_X1000_DFLT;
		break;
1264 1265 1266 1267 1268
	case CE4100_SSP:
		tx_thres = TX_THRESH_CE4100_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_CE4100_DFLT;
		break;
1269 1270
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
1271
	case LPSS_BSW_SSP:
1272
	case LPSS_SPT_SSP:
1273
	case LPSS_BXT_SSP:
1274 1275 1276 1277
		config = lpss_get_config(drv_data);
		tx_thres = config->tx_threshold_lo;
		tx_hi_thres = config->tx_threshold_hi;
		rx_thres = config->rx_threshold;
1278 1279
		break;
	default:
1280 1281 1282
		tx_thres = TX_THRESH_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_DFLT;
1283
		break;
1284
	}
1285

1286
	/* Only alloc on first setup */
1287
	chip = spi_get_ctldata(spi);
1288
	if (!chip) {
1289
		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1290
		if (!chip)
1291 1292
			return -ENOMEM;

1293 1294
		if (drv_data->ssp_type == CE4100_SSP) {
			if (spi->chip_select > 4) {
1295 1296
				dev_err(&spi->dev,
					"failed setup: cs number must not be > 4.\n");
1297 1298 1299 1300 1301 1302 1303
				kfree(chip);
				return -EINVAL;
			}

			chip->frm = spi->chip_select;
		} else
			chip->gpio_cs = -1;
1304
		chip->enable_dma = drv_data->master_info->enable_dma;
1305
		chip->timeout = TIMOUT_DFLT;
1306 1307
	}

1308 1309 1310 1311
	/* protocol drivers may change the chip settings, so...
	 * if chip_info exists, use it */
	chip_info = spi->controller_data;

1312
	/* chip_info isn't always needed */
1313
	chip->cr1 = 0;
1314
	if (chip_info) {
1315 1316 1317 1318
		if (chip_info->timeout)
			chip->timeout = chip_info->timeout;
		if (chip_info->tx_threshold)
			tx_thres = chip_info->tx_threshold;
1319 1320
		if (chip_info->tx_hi_threshold)
			tx_hi_thres = chip_info->tx_hi_threshold;
1321 1322
		if (chip_info->rx_threshold)
			rx_thres = chip_info->rx_threshold;
1323 1324 1325 1326 1327
		chip->dma_threshold = 0;
		if (chip_info->enable_loopback)
			chip->cr1 = SSCR1_LBM;
	}

1328 1329 1330 1331
	chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
	chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
				| SSITF_TxHiThresh(tx_hi_thres);

1332 1333 1334 1335 1336
	/* set dma burst and threshold outside of chip_info path so that if
	 * chip_info goes away after setting chip->enable_dma, the
	 * burst and threshold can still respond to changes in bits_per_word */
	if (chip->enable_dma) {
		/* set up legal burst and threshold for dma */
1337 1338
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
						spi->bits_per_word,
1339 1340
						&chip->dma_burst_size,
						&chip->dma_threshold)) {
1341 1342
			dev_warn(&spi->dev,
				 "in setup: DMA burst size reduced to match bits_per_word\n");
1343 1344 1345
		}
	}

1346 1347 1348 1349 1350 1351 1352
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
				   & QUARK_X1000_SSCR1_RFT)
				   | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
				   & QUARK_X1000_SSCR1_TFT);
		break;
1353 1354 1355 1356
	case CE4100_SSP:
		chip->threshold = (CE4100_SSCR1_RxTresh(rx_thres) & CE4100_SSCR1_RFT) |
			(CE4100_SSCR1_TxTresh(tx_thres) & CE4100_SSCR1_TFT);
		break;
1357 1358 1359 1360 1361 1362
	default:
		chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
		break;
	}

1363 1364 1365
	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1366

1367 1368 1369
	if (spi->mode & SPI_LOOP)
		chip->cr1 |= SSCR1_LBM;

1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
	if (spi->bits_per_word <= 8) {
		chip->n_bytes = 1;
		chip->read = u8_reader;
		chip->write = u8_writer;
	} else if (spi->bits_per_word <= 16) {
		chip->n_bytes = 2;
		chip->read = u16_reader;
		chip->write = u16_writer;
	} else if (spi->bits_per_word <= 32) {
		chip->n_bytes = 4;
		chip->read = u32_reader;
		chip->write = u32_writer;
	}

	spi_set_ctldata(spi, chip);

1386 1387 1388
	if (drv_data->ssp_type == CE4100_SSP)
		return 0;

1389
	return setup_cs(spi, chip, chip_info);
1390 1391
}

1392
static void cleanup(struct spi_device *spi)
1393
{
1394
	struct chip_data *chip = spi_get_ctldata(spi);
1395
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1396

1397 1398 1399
	if (!chip)
		return;

1400 1401
	if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
	    gpio_is_valid(chip->gpio_cs))
1402 1403
		gpio_free(chip->gpio_cs);

1404 1405 1406
	kfree(chip);
}

1407
#ifdef CONFIG_PCI
1408
#ifdef CONFIG_ACPI
1409

1410
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1411 1412 1413 1414 1415
	{ "INT33C0", LPSS_LPT_SSP },
	{ "INT33C1", LPSS_LPT_SSP },
	{ "INT3430", LPSS_LPT_SSP },
	{ "INT3431", LPSS_LPT_SSP },
	{ "80860F0E", LPSS_BYT_SSP },
1416
	{ "8086228E", LPSS_BSW_SSP },
1417 1418 1419 1420
	{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
	unsigned int devid;
	int port_id = -1;

	if (adev && adev->pnp.unique_id &&
	    !kstrtouint(adev->pnp.unique_id, 0, &devid))
		port_id = devid;
	return port_id;
}
#else /* !CONFIG_ACPI */
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
	return -1;
}
#endif

1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
/*
 * PCI IDs of compound devices that integrate both host controller and private
 * integrated DMA engine. Please note these are not used in module
 * autoloading and probing in this module but matching the LPSS SSP type.
 */
static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
	/* SPT-LP */
	{ PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
	/* SPT-H */
	{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
1450 1451 1452
	/* KBL-H */
	{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
1453
	/* BXT A-Step */
1454 1455 1456
	{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
1457 1458 1459 1460
	/* BXT B-Step */
	{ PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
1461 1462 1463 1464
	/* APL */
	{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
1465
	{ },
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
};

static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
	struct device *dev = param;

	if (dev != chan->device->dev->parent)
		return false;

	return true;
}

1478
static struct pxa2xx_spi_master *
1479
pxa2xx_spi_init_pdata(struct platform_device *pdev)
1480 1481 1482 1483 1484
{
	struct pxa2xx_spi_master *pdata;
	struct acpi_device *adev;
	struct ssp_device *ssp;
	struct resource *res;
1485 1486
	const struct acpi_device_id *adev_id = NULL;
	const struct pci_device_id *pcidev_id = NULL;
1487
	int type;
1488

1489
	adev = ACPI_COMPANION(&pdev->dev);
1490

1491 1492 1493
	if (dev_is_pci(pdev->dev.parent))
		pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
					 to_pci_dev(pdev->dev.parent));
1494
	else if (adev)
1495 1496
		adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
					    &pdev->dev);
1497 1498
	else
		return NULL;
1499 1500 1501 1502 1503

	if (adev_id)
		type = (int)adev_id->driver_data;
	else if (pcidev_id)
		type = (int)pcidev_id->driver_data;
1504 1505 1506
	else
		return NULL;

1507
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1508
	if (!pdata)
1509 1510 1511 1512 1513 1514 1515 1516 1517
		return NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return NULL;

	ssp = &pdata->ssp;

	ssp->phys_base = res->start;
1518 1519
	ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ssp->mmio_base))
1520
		return NULL;
1521

1522 1523 1524 1525 1526 1527
	if (pcidev_id) {
		pdata->tx_param = pdev->dev.parent;
		pdata->rx_param = pdev->dev.parent;
		pdata->dma_filter = pxa2xx_spi_idma_filter;
	}

1528 1529
	ssp->clk = devm_clk_get(&pdev->dev, NULL);
	ssp->irq = platform_get_irq(pdev, 0);
1530
	ssp->type = type;
1531
	ssp->pdev = pdev;
1532
	ssp->port_id = pxa2xx_spi_get_port_id(adev);
1533 1534

	pdata->num_chipselect = 1;
1535
	pdata->enable_dma = true;
1536 1537 1538 1539

	return pdata;
}

1540
#else /* !CONFIG_PCI */
1541
static inline struct pxa2xx_spi_master *
1542
pxa2xx_spi_init_pdata(struct platform_device *pdev)
1543 1544 1545 1546 1547
{
	return NULL;
}
#endif

1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
static int pxa2xx_spi_fw_translate_cs(struct spi_master *master, unsigned cs)
{
	struct driver_data *drv_data = spi_master_get_devdata(master);

	if (has_acpi_companion(&drv_data->pdev->dev)) {
		switch (drv_data->ssp_type) {
		/*
		 * For Atoms the ACPI DeviceSelection used by the Windows
		 * driver starts from 1 instead of 0 so translate it here
		 * to match what Linux expects.
		 */
		case LPSS_BYT_SSP:
1560
		case LPSS_BSW_SSP:
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
			return cs - 1;

		default:
			break;
		}
	}

	return cs;
}

1571
static int pxa2xx_spi_probe(struct platform_device *pdev)
1572 1573 1574 1575
{
	struct device *dev = &pdev->dev;
	struct pxa2xx_spi_master *platform_info;
	struct spi_master *master;
G
Guennadi Liakhovetski 已提交
1576
	struct driver_data *drv_data;
1577
	struct ssp_device *ssp;
1578
	const struct lpss_config *config;
1579
	int status, count;
1580
	u32 tmp;
1581

1582 1583
	platform_info = dev_get_platdata(dev);
	if (!platform_info) {
1584
		platform_info = pxa2xx_spi_init_pdata(pdev);
1585 1586 1587 1588
		if (!platform_info) {
			dev_err(&pdev->dev, "missing platform data\n");
			return -ENODEV;
		}
1589
	}
1590

H
Haojian Zhuang 已提交
1591
	ssp = pxa_ssp_request(pdev->id, pdev->name);
1592 1593 1594 1595 1596
	if (!ssp)
		ssp = &platform_info->ssp;

	if (!ssp->mmio_base) {
		dev_err(&pdev->dev, "failed to get ssp\n");
1597 1598 1599
		return -ENODEV;
	}

1600
	master = spi_alloc_master(dev, sizeof(struct driver_data));
1601
	if (!master) {
G
Guennadi Liakhovetski 已提交
1602
		dev_err(&pdev->dev, "cannot alloc spi_master\n");
H
Haojian Zhuang 已提交
1603
		pxa_ssp_free(ssp);
1604 1605 1606 1607 1608 1609
		return -ENOMEM;
	}
	drv_data = spi_master_get_devdata(master);
	drv_data->master = master;
	drv_data->master_info = platform_info;
	drv_data->pdev = pdev;
1610
	drv_data->ssp = ssp;
1611

1612
	master->dev.of_node = pdev->dev.of_node;
1613
	/* the spi->mode bits understood by this driver: */
1614
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1615

1616
	master->bus_num = ssp->port_id;
1617
	master->dma_alignment = DMA_ALIGNMENT;
1618 1619
	master->cleanup = cleanup;
	master->setup = setup;
1620
	master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1621
	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1622
	master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
1623
	master->auto_runtime_pm = true;
1624
	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
1625

1626
	drv_data->ssp_type = ssp->type;
1627

1628 1629
	drv_data->ioaddr = ssp->mmio_base;
	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1630
	if (pxa25x_ssp_comp(drv_data)) {
1631 1632 1633 1634 1635 1636 1637 1638 1639
		switch (drv_data->ssp_type) {
		case QUARK_X1000_SSP:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
			break;
		default:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
			break;
		}

1640 1641 1642 1643 1644
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
		drv_data->dma_cr1 = 0;
		drv_data->clear_sr = SSSR_ROR;
		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
	} else {
1645
		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1646
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1647
		drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1648 1649 1650 1651
		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
	}

1652 1653
	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
			drv_data);
1654
	if (status < 0) {
G
Guennadi Liakhovetski 已提交
1655
		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1656 1657 1658 1659 1660
		goto out_error_master_alloc;
	}

	/* Setup DMA if requested */
	if (platform_info->enable_dma) {
1661 1662
		status = pxa2xx_spi_dma_setup(drv_data);
		if (status) {
1663
			dev_dbg(dev, "no DMA channels available, using PIO\n");
1664
			platform_info->enable_dma = false;
1665 1666
		} else {
			master->can_dma = pxa2xx_spi_can_dma;
1667 1668 1669 1670
		}
	}

	/* Enable SOC clock */
1671 1672
	clk_prepare_enable(ssp->clk);

1673
	master->max_speed_hz = clk_get_rate(ssp->clk);
1674 1675

	/* Load default SSP configuration */
1676
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1677 1678
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
1679 1680
		tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
		      QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
1681
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1682 1683

		/* using the Motorola SPI protocol and use 8 bit frame */
1684 1685
		tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1686
		break;
1687 1688 1689 1690 1691 1692
	case CE4100_SSP:
		tmp = CE4100_SSCR1_RxTresh(RX_THRESH_CE4100_DFLT) |
		      CE4100_SSCR1_TxTresh(TX_THRESH_CE4100_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
A
Andy Shevchenko 已提交
1693
		break;
1694
	default:
1695 1696 1697 1698 1699
		tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
		      SSCR1_TxTresh(TX_THRESH_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1700 1701 1702
		break;
	}

1703
	if (!pxa25x_ssp_comp(drv_data))
1704
		pxa2xx_spi_write(drv_data, SSTO, 0);
1705 1706

	if (!is_quark_x1000_ssp(drv_data))
1707
		pxa2xx_spi_write(drv_data, SSPSP, 0);
1708

1709 1710 1711 1712 1713 1714 1715 1716 1717
	if (is_lpss_ssp(drv_data)) {
		lpss_ssp_setup(drv_data);
		config = lpss_get_config(drv_data);
		if (config->reg_capabilities >= 0) {
			tmp = __lpss_ssp_read_priv(drv_data,
						   config->reg_capabilities);
			tmp &= LPSS_CAPS_CS_EN_MASK;
			tmp >>= LPSS_CAPS_CS_EN_SHIFT;
			platform_info->num_chipselect = ffz(tmp);
1718 1719
		} else if (config->cs_num) {
			platform_info->num_chipselect = config->cs_num;
1720 1721 1722 1723
		}
	}
	master->num_chipselect = platform_info->num_chipselect;

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	count = gpiod_count(&pdev->dev, "cs");
	if (count > 0) {
		int i;

		master->num_chipselect = max_t(int, count,
			master->num_chipselect);

		drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
			master->num_chipselect, sizeof(struct gpio_desc *),
			GFP_KERNEL);
		if (!drv_data->cs_gpiods) {
			status = -ENOMEM;
			goto out_error_clock_enabled;
		}

		for (i = 0; i < master->num_chipselect; i++) {
			struct gpio_desc *gpiod;

			gpiod = devm_gpiod_get_index(dev, "cs", i,
						     GPIOD_OUT_HIGH);
			if (IS_ERR(gpiod)) {
				/* Means use native chip select */
				if (PTR_ERR(gpiod) == -ENOENT)
					continue;

				status = (int)PTR_ERR(gpiod);
				goto out_error_clock_enabled;
			} else {
				drv_data->cs_gpiods[i] = gpiod;
			}
		}
	}

1757 1758
	tasklet_init(&drv_data->pump_transfers, pump_transfers,
		     (unsigned long)drv_data);
1759

1760 1761 1762 1763 1764
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

1765 1766
	/* Register with the SPI framework */
	platform_set_drvdata(pdev, drv_data);
1767
	status = devm_spi_register_master(&pdev->dev, master);
1768 1769
	if (status != 0) {
		dev_err(&pdev->dev, "problem registering spi master\n");
1770
		goto out_error_clock_enabled;
1771 1772 1773 1774 1775
	}

	return status;

out_error_clock_enabled:
1776
	clk_disable_unprepare(ssp->clk);
1777
	pxa2xx_spi_dma_release(drv_data);
1778
	free_irq(ssp->irq, drv_data);
1779 1780 1781

out_error_master_alloc:
	spi_master_put(master);
H
Haojian Zhuang 已提交
1782
	pxa_ssp_free(ssp);
1783 1784 1785 1786 1787 1788
	return status;
}

static int pxa2xx_spi_remove(struct platform_device *pdev)
{
	struct driver_data *drv_data = platform_get_drvdata(pdev);
1789
	struct ssp_device *ssp;
1790 1791 1792

	if (!drv_data)
		return 0;
1793
	ssp = drv_data->ssp;
1794

1795 1796
	pm_runtime_get_sync(&pdev->dev);

1797
	/* Disable the SSP at the peripheral and SOC level */
1798
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1799
	clk_disable_unprepare(ssp->clk);
1800 1801

	/* Release DMA */
1802 1803
	if (drv_data->master_info->enable_dma)
		pxa2xx_spi_dma_release(drv_data);
1804

1805 1806 1807
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

1808
	/* Release IRQ */
1809 1810 1811
	free_irq(ssp->irq, drv_data);

	/* Release SSP */
H
Haojian Zhuang 已提交
1812
	pxa_ssp_free(ssp);
1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824

	return 0;
}

static void pxa2xx_spi_shutdown(struct platform_device *pdev)
{
	int status = 0;

	if ((status = pxa2xx_spi_remove(pdev)) != 0)
		dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}

1825
#ifdef CONFIG_PM_SLEEP
1826
static int pxa2xx_spi_suspend(struct device *dev)
1827
{
1828
	struct driver_data *drv_data = dev_get_drvdata(dev);
1829
	struct ssp_device *ssp = drv_data->ssp;
1830
	int status;
1831

1832
	status = spi_master_suspend(drv_data->master);
1833 1834
	if (status != 0)
		return status;
1835
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1836 1837 1838

	if (!pm_runtime_suspended(dev))
		clk_disable_unprepare(ssp->clk);
1839 1840 1841 1842

	return 0;
}

1843
static int pxa2xx_spi_resume(struct device *dev)
1844
{
1845
	struct driver_data *drv_data = dev_get_drvdata(dev);
1846
	struct ssp_device *ssp = drv_data->ssp;
1847
	int status;
1848 1849

	/* Enable the SSP clock */
1850 1851
	if (!pm_runtime_suspended(dev))
		clk_prepare_enable(ssp->clk);
1852

1853
	/* Restore LPSS private register bits */
1854 1855
	if (is_lpss_ssp(drv_data))
		lpss_ssp_setup(drv_data);
1856

1857
	/* Start the queue running */
1858
	status = spi_master_resume(drv_data->master);
1859
	if (status != 0) {
1860
		dev_err(dev, "problem starting queue (%d)\n", status);
1861 1862 1863 1864 1865
		return status;
	}

	return 0;
}
1866 1867
#endif

1868
#ifdef CONFIG_PM
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_disable_unprepare(drv_data->ssp->clk);
	return 0;
}

static int pxa2xx_spi_runtime_resume(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_prepare_enable(drv_data->ssp->clk);
	return 0;
}
#endif
1885

1886
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1887 1888 1889
	SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
	SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
			   pxa2xx_spi_runtime_resume, NULL)
1890
};
1891 1892 1893

static struct platform_driver driver = {
	.driver = {
1894 1895
		.name	= "pxa2xx-spi",
		.pm	= &pxa2xx_spi_pm_ops,
1896
		.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1897
	},
1898
	.probe = pxa2xx_spi_probe,
1899
	.remove = pxa2xx_spi_remove,
1900 1901 1902 1903 1904
	.shutdown = pxa2xx_spi_shutdown,
};

static int __init pxa2xx_spi_init(void)
{
1905
	return platform_driver_register(&driver);
1906
}
A
Antonio Ospite 已提交
1907
subsys_initcall(pxa2xx_spi_init);
1908 1909 1910 1911 1912 1913

static void __exit pxa2xx_spi_exit(void)
{
	platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);