spi-pxa2xx.c 41.5 KB
Newer Older
1 2
/*
 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3
 * Copyright (C) 2013, Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
21
#include <linux/err.h>
22
#include <linux/interrupt.h>
23
#include <linux/kernel.h>
24
#include <linux/platform_device.h>
25
#include <linux/spi/pxa2xx_spi.h>
26 27
#include <linux/spi/spi.h>
#include <linux/delay.h>
28
#include <linux/gpio.h>
29
#include <linux/slab.h>
30
#include <linux/clk.h>
31
#include <linux/pm_runtime.h>
32
#include <linux/acpi.h>
33

34
#include "spi-pxa2xx.h"
35 36

MODULE_AUTHOR("Stephen Street");
W
Will Newton 已提交
37
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
38
MODULE_LICENSE("GPL");
39
MODULE_ALIAS("platform:pxa2xx-spi");
40

41 42
#define TIMOUT_DFLT		1000

43 44 45 46 47 48 49 50
/*
 * for testing SSCR1 changes that require SSP restart, basically
 * everything except the service and interrupt enables, the pxa270 developer
 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
 * list, but the PXA255 dev man says all bits without really meaning the
 * service and interrupt enables
 */
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
51
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
52 53 54 55
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
56

57 58 59 60 61 62
#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF	\
				| QUARK_X1000_SSCR1_EFWR	\
				| QUARK_X1000_SSCR1_RFT		\
				| QUARK_X1000_SSCR1_TFT		\
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

63
#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
64 65 66
#define SPI_CS_CONTROL_SW_MODE	BIT(0)
#define SPI_CS_CONTROL_CS_HIGH	BIT(1)

67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
struct lpss_config {
	/* LPSS offset from drv_data->ioaddr */
	unsigned offset;
	/* Register offsets from drv_data->lpss_base or -1 */
	int reg_general;
	int reg_ssp;
	int reg_cs_ctrl;
	/* FIFO thresholds */
	u32 rx_threshold;
	u32 tx_threshold_lo;
	u32 tx_threshold_hi;
};

/* Keep these sorted with enum pxa_ssp_type */
static const struct lpss_config lpss_platforms[] = {
	{	/* LPSS_LPT_SSP */
		.offset = 0x800,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
	{	/* LPSS_BYT_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
};

static inline const struct lpss_config
*lpss_get_config(const struct driver_data *drv_data)
{
	return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
}

108 109
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
110 111 112 113 114 115 116
	switch (drv_data->ssp_type) {
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
		return true;
	default:
		return false;
	}
117 118
}

119 120 121 122 123
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
{
	return drv_data->ssp_type == QUARK_X1000_SSP;
}

124 125 126
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
127 128
	case QUARK_X1000_SSP:
		return QUARK_X1000_SSCR1_CHANGE_MASK;
129 130 131 132 133 134 135 136 137
	default:
		return SSCR1_CHANGE_MASK;
	}
}

static u32
pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
138 139
	case QUARK_X1000_SSP:
		return RX_THRESH_QUARK_X1000_DFLT;
140 141 142 143 144 145 146 147 148 149
	default:
		return RX_THRESH_DFLT;
	}
}

static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
	u32 mask;

	switch (drv_data->ssp_type) {
150 151 152
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSSR_TFL_MASK;
		break;
153 154 155 156 157
	default:
		mask = SSSR_TFL_MASK;
		break;
	}

158
	return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
159 160 161 162 163 164 165 166
}

static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
				     u32 *sccr1_reg)
{
	u32 mask;

	switch (drv_data->ssp_type) {
167 168 169
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSCR1_RFT;
		break;
170 171 172 173 174 175 176 177 178 179 180
	default:
		mask = SSCR1_RFT;
		break;
	}
	*sccr1_reg &= ~mask;
}

static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
				   u32 *sccr1_reg, u32 threshold)
{
	switch (drv_data->ssp_type) {
181 182 183
	case QUARK_X1000_SSP:
		*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
		break;
184 185 186 187 188 189 190 191 192 193
	default:
		*sccr1_reg |= SSCR1_RxTresh(threshold);
		break;
	}
}

static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
				  u32 clk_div, u8 bits)
{
	switch (drv_data->ssp_type) {
194 195 196 197 198
	case QUARK_X1000_SSP:
		return clk_div
			| QUARK_X1000_SSCR0_Motorola
			| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
			| SSCR0_SSE;
199 200 201 202 203 204 205 206 207
	default:
		return clk_div
			| SSCR0_Motorola
			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
			| SSCR0_SSE
			| (bits > 16 ? SSCR0_EDSS : 0);
	}
}

208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
/*
 * Read and write LPSS SSP private registers. Caller must first check that
 * is_lpss_ssp() returns true before these can be called.
 */
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
	WARN_ON(!drv_data->lpss_base);
	return readl(drv_data->lpss_base + offset);
}

static void __lpss_ssp_write_priv(struct driver_data *drv_data,
				  unsigned offset, u32 value)
{
	WARN_ON(!drv_data->lpss_base);
	writel(value, drv_data->lpss_base + offset);
}

/*
 * lpss_ssp_setup - perform LPSS SSP specific setup
 * @drv_data: pointer to the driver private data
 *
 * Perform LPSS SSP specific setup. This function must be called first if
 * one is going to use LPSS SSP private registers.
 */
static void lpss_ssp_setup(struct driver_data *drv_data)
{
234 235
	const struct lpss_config *config;
	u32 value;
236

237 238
	config = lpss_get_config(drv_data);
	drv_data->lpss_base = drv_data->ioaddr + config->offset;
239 240 241

	/* Enable software chip select control */
	value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
242
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
243 244

	/* Enable multiblock DMA transfers */
245
	if (drv_data->master_info->enable_dma) {
246
		__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
247

248 249 250 251 252 253 254
		if (config->reg_general >= 0) {
			value = __lpss_ssp_read_priv(drv_data,
						     config->reg_general);
			value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
			__lpss_ssp_write_priv(drv_data,
					      config->reg_general, value);
		}
255
	}
256 257 258 259
}

static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
260
	const struct lpss_config *config;
261 262
	u32 value;

263 264 265
	config = lpss_get_config(drv_data);

	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
266 267 268 269
	if (enable)
		value &= ~SPI_CS_CONTROL_CS_HIGH;
	else
		value |= SPI_CS_CONTROL_CS_HIGH;
270
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
271 272
}

273 274 275 276
static void cs_assert(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;

277
	if (drv_data->ssp_type == CE4100_SSP) {
278
		pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
279 280 281
		return;
	}

282 283 284 285 286
	if (chip->cs_control) {
		chip->cs_control(PXA2XX_CS_ASSERT);
		return;
	}

287
	if (gpio_is_valid(chip->gpio_cs)) {
288
		gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
289 290 291
		return;
	}

292 293
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, true);
294 295 296 297 298 299
}

static void cs_deassert(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;

300 301 302
	if (drv_data->ssp_type == CE4100_SSP)
		return;

303
	if (chip->cs_control) {
304
		chip->cs_control(PXA2XX_CS_DEASSERT);
305 306 307
		return;
	}

308
	if (gpio_is_valid(chip->gpio_cs)) {
309
		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
310 311 312
		return;
	}

313 314
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, false);
315 316
}

317
int pxa2xx_spi_flush(struct driver_data *drv_data)
318 319 320 321
{
	unsigned long limit = loops_per_jiffy << 1;

	do {
322 323 324
		while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
			pxa2xx_spi_read(drv_data, SSDR);
	} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
325
	write_SSSR_CS(drv_data, SSSR_ROR);
326 327 328 329

	return limit;
}

330
static int null_writer(struct driver_data *drv_data)
331
{
332
	u8 n_bytes = drv_data->n_bytes;
333

334
	if (pxa2xx_spi_txfifo_full(drv_data)
335 336 337
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

338
	pxa2xx_spi_write(drv_data, SSDR, 0);
339 340 341
	drv_data->tx += n_bytes;

	return 1;
342 343
}

344
static int null_reader(struct driver_data *drv_data)
345
{
346
	u8 n_bytes = drv_data->n_bytes;
347

348 349 350
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		pxa2xx_spi_read(drv_data, SSDR);
351 352
		drv_data->rx += n_bytes;
	}
353 354

	return drv_data->rx == drv_data->rx_end;
355 356
}

357
static int u8_writer(struct driver_data *drv_data)
358
{
359
	if (pxa2xx_spi_txfifo_full(drv_data)
360 361 362
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

363
	pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
364 365 366
	++drv_data->tx;

	return 1;
367 368
}

369
static int u8_reader(struct driver_data *drv_data)
370
{
371 372 373
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
374 375
		++drv_data->rx;
	}
376 377

	return drv_data->rx == drv_data->rx_end;
378 379
}

380
static int u16_writer(struct driver_data *drv_data)
381
{
382
	if (pxa2xx_spi_txfifo_full(drv_data)
383 384 385
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

386
	pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
387 388 389
	drv_data->tx += 2;

	return 1;
390 391
}

392
static int u16_reader(struct driver_data *drv_data)
393
{
394 395 396
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
397 398
		drv_data->rx += 2;
	}
399 400

	return drv_data->rx == drv_data->rx_end;
401
}
402 403

static int u32_writer(struct driver_data *drv_data)
404
{
405
	if (pxa2xx_spi_txfifo_full(drv_data)
406 407 408
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

409
	pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
410 411 412
	drv_data->tx += 4;

	return 1;
413 414
}

415
static int u32_reader(struct driver_data *drv_data)
416
{
417 418 419
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
420 421
		drv_data->rx += 4;
	}
422 423

	return drv_data->rx == drv_data->rx_end;
424 425
}

426
void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
{
	struct spi_message *msg = drv_data->cur_msg;
	struct spi_transfer *trans = drv_data->cur_transfer;

	/* Move to next transfer */
	if (trans->transfer_list.next != &msg->transfers) {
		drv_data->cur_transfer =
			list_entry(trans->transfer_list.next,
					struct spi_transfer,
					transfer_list);
		return RUNNING_STATE;
	} else
		return DONE_STATE;
}

/* caller already set message->status; dma and pio irqs are blocked */
S
Stephen Street 已提交
443
static void giveback(struct driver_data *drv_data)
444 445
{
	struct spi_transfer* last_transfer;
S
Stephen Street 已提交
446
	struct spi_message *msg;
447

S
Stephen Street 已提交
448 449 450 451
	msg = drv_data->cur_msg;
	drv_data->cur_msg = NULL;
	drv_data->cur_transfer = NULL;

452
	last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
453 454
					transfer_list);

N
Ned Forrester 已提交
455 456 457 458 459 460 461
	/* Delay if requested before any change in chip select */
	if (last_transfer->delay_usecs)
		udelay(last_transfer->delay_usecs);

	/* Drop chip select UNLESS cs_change is true or we are returning
	 * a message with an error, or next message is for another chip
	 */
462
	if (!last_transfer->cs_change)
463
		cs_deassert(drv_data);
N
Ned Forrester 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477
	else {
		struct spi_message *next_msg;

		/* Holding of cs was hinted, but we need to make sure
		 * the next message is for the same chip.  Don't waste
		 * time with the following tests unless this was hinted.
		 *
		 * We cannot postpone this until pump_messages, because
		 * after calling msg->complete (below) the driver that
		 * sent the current message could be unloaded, which
		 * could invalidate the cs_control() callback...
		 */

		/* get a pointer to the next message, if any */
478
		next_msg = spi_get_next_queued_message(drv_data->master);
N
Ned Forrester 已提交
479 480 481 482 483 484 485

		/* see if the next and current messages point
		 * to the same chip
		 */
		if (next_msg && next_msg->spi != msg->spi)
			next_msg = NULL;
		if (!next_msg || msg->state == ERROR_STATE)
486
			cs_deassert(drv_data);
N
Ned Forrester 已提交
487
	}
488

489
	drv_data->cur_chip = NULL;
490
	spi_finalize_current_message(drv_data->master);
491 492
}

493 494 495 496 497
static void reset_sccr1(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;
	u32 sccr1_reg;

498
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
499 500
	sccr1_reg &= ~SSCR1_RFT;
	sccr1_reg |= chip->threshold;
501
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
502 503
}

504
static void int_error_stop(struct driver_data *drv_data, const char* msg)
505
{
506
	/* Stop and reset SSP */
507
	write_SSSR_CS(drv_data, drv_data->clear_sr);
508
	reset_sccr1(drv_data);
509
	if (!pxa25x_ssp_comp(drv_data))
510
		pxa2xx_spi_write(drv_data, SSTO, 0);
511
	pxa2xx_spi_flush(drv_data);
512 513
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
514

515
	dev_err(&drv_data->pdev->dev, "%s\n", msg);
516

517 518 519
	drv_data->cur_msg->state = ERROR_STATE;
	tasklet_schedule(&drv_data->pump_transfers);
}
S
Stephen Street 已提交
520

521 522 523
static void int_transfer_complete(struct driver_data *drv_data)
{
	/* Stop SSP */
524
	write_SSSR_CS(drv_data, drv_data->clear_sr);
525
	reset_sccr1(drv_data);
526
	if (!pxa25x_ssp_comp(drv_data))
527
		pxa2xx_spi_write(drv_data, SSTO, 0);
528

L
Lucas De Marchi 已提交
529
	/* Update total byte transferred return count actual bytes read */
530 531
	drv_data->cur_msg->actual_length += drv_data->len -
				(drv_data->rx_end - drv_data->rx);
532

N
Ned Forrester 已提交
533 534 535
	/* Transfer delays and chip select release are
	 * handled in pump_transfers or giveback
	 */
536

537
	/* Move to next transfer */
538
	drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
539

540 541 542
	/* Schedule transfer tasklet */
	tasklet_schedule(&drv_data->pump_transfers);
}
543

544 545
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
546 547
	u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
		       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
548

549
	u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
550

551 552 553 554
	if (irq_status & SSSR_ROR) {
		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
		return IRQ_HANDLED;
	}
555

556
	if (irq_status & SSSR_TINT) {
557
		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
558 559 560 561 562
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	}
563

564 565 566 567 568 569 570
	/* Drain rx fifo, Fill tx fifo and prevent overruns */
	do {
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	} while (drv_data->write(drv_data));
571

572 573 574 575
	if (drv_data->read(drv_data)) {
		int_transfer_complete(drv_data);
		return IRQ_HANDLED;
	}
576

577
	if (drv_data->tx == drv_data->tx_end) {
578 579 580
		u32 bytes_left;
		u32 sccr1_reg;

581
		sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
582 583 584 585
		sccr1_reg &= ~SSCR1_TIE;

		/*
		 * PXA25x_SSP has no timeout, set up rx threshould for the
L
Lucas De Marchi 已提交
586
		 * remaining RX bytes.
587
		 */
588
		if (pxa25x_ssp_comp(drv_data)) {
589
			u32 rx_thre;
590

591
			pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
592 593 594 595 596 597 598

			bytes_left = drv_data->rx_end - drv_data->rx;
			switch (drv_data->n_bytes) {
			case 4:
				bytes_left >>= 1;
			case 2:
				bytes_left >>= 1;
599
			}
600

601 602 603
			rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
			if (rx_thre > bytes_left)
				rx_thre = bytes_left;
604

605
			pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
606
		}
607
		pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
608 609
	}

S
Stephen Street 已提交
610 611
	/* We did something */
	return IRQ_HANDLED;
612 613
}

614
static irqreturn_t ssp_int(int irq, void *dev_id)
615
{
616
	struct driver_data *drv_data = dev_id;
617
	u32 sccr1_reg;
618 619 620
	u32 mask = drv_data->mask_sr;
	u32 status;

621 622 623 624 625 626 627 628 629
	/*
	 * The IRQ might be shared with other peripherals so we must first
	 * check that are we RPM suspended or not. If we are we assume that
	 * the IRQ was not for us (we shouldn't be RPM suspended when the
	 * interrupt is enabled).
	 */
	if (pm_runtime_suspended(&drv_data->pdev->dev))
		return IRQ_NONE;

630 631 632 633 634 635
	/*
	 * If the device is not yet in RPM suspended state and we get an
	 * interrupt that is meant for another device, check if status bits
	 * are all set to one. That means that the device is already
	 * powered off.
	 */
636
	status = pxa2xx_spi_read(drv_data, SSSR);
637 638 639
	if (status == ~0)
		return IRQ_NONE;

640
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
641 642 643 644 645 646 647

	/* Ignore possible writes if we don't need to write */
	if (!(sccr1_reg & SSCR1_TIE))
		mask &= ~SSSR_TFS;

	if (!(status & mask))
		return IRQ_NONE;
648 649

	if (!drv_data->cur_msg) {
S
Stephen Street 已提交
650

651 652 653 654 655 656
		pxa2xx_spi_write(drv_data, SSCR0,
				 pxa2xx_spi_read(drv_data, SSCR0)
				 & ~SSCR0_SSE);
		pxa2xx_spi_write(drv_data, SSCR1,
				 pxa2xx_spi_read(drv_data, SSCR1)
				 & ~drv_data->int_cr1);
657
		if (!pxa25x_ssp_comp(drv_data))
658
			pxa2xx_spi_write(drv_data, SSTO, 0);
659
		write_SSSR_CS(drv_data, drv_data->clear_sr);
S
Stephen Street 已提交
660

661 662
		dev_err(&drv_data->pdev->dev,
			"bad message state in interrupt handler\n");
S
Stephen Street 已提交
663

664 665 666 667 668 669 670
		/* Never fail */
		return IRQ_HANDLED;
	}

	return drv_data->transfer_handler(drv_data);
}

671
/*
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
 * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
 * input frequency by fractions of 2^24. It also has a divider by 5.
 *
 * There are formulas to get baud rate value for given input frequency and
 * divider parameters, such as DDS_CLK_RATE and SCR:
 *
 * Fsys = 200MHz
 *
 * Fssp = Fsys * DDS_CLK_RATE / 2^24			(1)
 * Baud rate = Fsclk = Fssp / (2 * (SCR + 1))		(2)
 *
 * DDS_CLK_RATE either 2^n or 2^n / 5.
 * SCR is in range 0 .. 255
 *
 * Divisor = 5^i * 2^j * 2 * k
 *       i = [0, 1]      i = 1 iff j = 0 or j > 3
 *       j = [0, 23]     j = 0 iff i = 1
 *       k = [1, 256]
 * Special case: j = 0, i = 1: Divisor = 2 / 5
 *
 * Accordingly to the specification the recommended values for DDS_CLK_RATE
 * are:
 *	Case 1:		2^n, n = [0, 23]
 *	Case 2:		2^24 * 2 / 5 (0x666666)
 *	Case 3:		less than or equal to 2^24 / 5 / 16 (0x33333)
 *
 * In all cases the lowest possible value is better.
 *
 * The function calculates parameters for all cases and chooses the one closest
 * to the asked baud rate.
702
 */
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
	unsigned long xtal = 200000000;
	unsigned long fref = xtal / 2;		/* mandatory division by 2,
						   see (2) */
						/* case 3 */
	unsigned long fref1 = fref / 2;		/* case 1 */
	unsigned long fref2 = fref * 2 / 5;	/* case 2 */
	unsigned long scale;
	unsigned long q, q1, q2;
	long r, r1, r2;
	u32 mul;

	/* Case 1 */

	/* Set initial value for DDS_CLK_RATE */
	mul = (1 << 24) >> 1;

	/* Calculate initial quot */
	q1 = DIV_ROUND_CLOSEST(fref1, rate);

	/* Scale q1 if it's too big */
	if (q1 > 256) {
		/* Scale q1 to range [1, 512] */
		scale = fls_long(q1 - 1);
		if (scale > 9) {
			q1 >>= scale - 9;
			mul >>= scale - 9;
731
		}
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763

		/* Round the result if we have a remainder */
		q1 += q1 & 1;
	}

	/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
	scale = __ffs(q1);
	q1 >>= scale;
	mul >>= scale;

	/* Get the remainder */
	r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);

	/* Case 2 */

	q2 = DIV_ROUND_CLOSEST(fref2, rate);
	r2 = abs(fref2 / q2 - rate);

	/*
	 * Choose the best between two: less remainder we have the better. We
	 * can't go case 2 if q2 is greater than 256 since SCR register can
	 * hold only values 0 .. 255.
	 */
	if (r2 >= r1 || q2 > 256) {
		/* case 1 is better */
		r = r1;
		q = q1;
	} else {
		/* case 2 is better */
		r = r2;
		q = q2;
		mul = (1 << 24) * 2 / 5;
764 765
	}

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	/* Check case 3 only If the divisor is big enough */
	if (fref / rate >= 80) {
		u64 fssp;
		u32 m;

		/* Calculate initial quot */
		q1 = DIV_ROUND_CLOSEST(fref, rate);
		m = (1 << 24) / q1;

		/* Get the remainder */
		fssp = (u64)fref * m;
		do_div(fssp, 1 << 24);
		r1 = abs(fssp - rate);

		/* Choose this one if it suits better */
		if (r1 < r) {
			/* case 3 is better */
			q = 1;
			mul = m;
		}
	}
787

788 789
	*dds = mul;
	return q - 1;
790 791
}

792
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
793
{
794 795 796 797
	unsigned long ssp_clk = drv_data->max_clk_rate;
	const struct ssp_device *ssp = drv_data->ssp;

	rate = min_t(int, ssp_clk, rate);
798

799
	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
800
		return (ssp_clk / (2 * rate) - 1) & 0xff;
801
	else
802
		return (ssp_clk / rate - 1) & 0xfff;
803 804
}

805 806 807
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
					   struct chip_data *chip, int rate)
{
808
	unsigned int clk_div;
809 810 811

	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
812
		clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
813
		break;
814
	default:
815
		clk_div = ssp_get_clk_div(drv_data, rate);
816
		break;
817
	}
818
	return clk_div << 8;
819 820
}

821 822 823 824 825 826 827
static void pump_transfers(unsigned long data)
{
	struct driver_data *drv_data = (struct driver_data *)data;
	struct spi_message *message = NULL;
	struct spi_transfer *transfer = NULL;
	struct spi_transfer *previous = NULL;
	struct chip_data *chip = NULL;
828 829 830 831
	u32 clk_div = 0;
	u8 bits = 0;
	u32 speed = 0;
	u32 cr0;
832 833 834
	u32 cr1;
	u32 dma_thresh = drv_data->cur_chip->dma_threshold;
	u32 dma_burst = drv_data->cur_chip->dma_burst_size;
835
	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
836 837 838 839 840 841 842 843 844

	/* Get current state information */
	message = drv_data->cur_msg;
	transfer = drv_data->cur_transfer;
	chip = drv_data->cur_chip;

	/* Handle for abort */
	if (message->state == ERROR_STATE) {
		message->status = -EIO;
S
Stephen Street 已提交
845
		giveback(drv_data);
846 847 848 849 850 851
		return;
	}

	/* Handle end of message */
	if (message->state == DONE_STATE) {
		message->status = 0;
S
Stephen Street 已提交
852
		giveback(drv_data);
853 854 855
		return;
	}

N
Ned Forrester 已提交
856
	/* Delay if requested at end of transfer before CS change */
857 858 859 860 861 862
	if (message->state == RUNNING_STATE) {
		previous = list_entry(transfer->transfer_list.prev,
					struct spi_transfer,
					transfer_list);
		if (previous->delay_usecs)
			udelay(previous->delay_usecs);
N
Ned Forrester 已提交
863 864 865

		/* Drop chip select only if cs_change is requested */
		if (previous->cs_change)
866
			cs_deassert(drv_data);
867 868
	}

869 870
	/* Check if we can DMA this transfer */
	if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
N
Ned Forrester 已提交
871 872 873 874 875

		/* reject already-mapped transfers; PIO won't always work */
		if (message->is_dma_mapped
				|| transfer->rx_dma || transfer->tx_dma) {
			dev_err(&drv_data->pdev->dev,
876 877
				"pump_transfers: mapped transfer length of "
				"%u is greater than %d\n",
N
Ned Forrester 已提交
878 879 880 881 882 883 884
				transfer->len, MAX_DMA_LEN);
			message->status = -EINVAL;
			giveback(drv_data);
			return;
		}

		/* warn ... we force this to PIO mode */
885 886 887 888
		dev_warn_ratelimited(&message->spi->dev,
				     "pump_transfers: DMA disabled for transfer length %ld "
				     "greater than %d\n",
				     (long)drv_data->len, MAX_DMA_LEN);
889 890
	}

891
	/* Setup the transfer state based on the type of transfer */
892
	if (pxa2xx_spi_flush(drv_data) == 0) {
893 894
		dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
		message->status = -EIO;
S
Stephen Street 已提交
895
		giveback(drv_data);
896 897
		return;
	}
898
	drv_data->n_bytes = chip->n_bytes;
899 900 901 902 903 904
	drv_data->tx = (void *)transfer->tx_buf;
	drv_data->tx_end = drv_data->tx + transfer->len;
	drv_data->rx = transfer->rx_buf;
	drv_data->rx_end = drv_data->rx + transfer->len;
	drv_data->rx_dma = transfer->rx_dma;
	drv_data->tx_dma = transfer->tx_dma;
905
	drv_data->len = transfer->len;
906 907
	drv_data->write = drv_data->tx ? chip->write : null_writer;
	drv_data->read = drv_data->rx ? chip->read : null_reader;
908 909

	/* Change speed and bit per word on a per transfer */
910
	cr0 = chip->cr0;
911 912 913 914 915 916 917 918 919 920 921
	if (transfer->speed_hz || transfer->bits_per_word) {

		bits = chip->bits_per_word;
		speed = chip->speed_hz;

		if (transfer->speed_hz)
			speed = transfer->speed_hz;

		if (transfer->bits_per_word)
			bits = transfer->bits_per_word;

922
		clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942

		if (bits <= 8) {
			drv_data->n_bytes = 1;
			drv_data->read = drv_data->read != null_reader ?
						u8_reader : null_reader;
			drv_data->write = drv_data->write != null_writer ?
						u8_writer : null_writer;
		} else if (bits <= 16) {
			drv_data->n_bytes = 2;
			drv_data->read = drv_data->read != null_reader ?
						u16_reader : null_reader;
			drv_data->write = drv_data->write != null_writer ?
						u16_writer : null_writer;
		} else if (bits <= 32) {
			drv_data->n_bytes = 4;
			drv_data->read = drv_data->read != null_reader ?
						u32_reader : null_reader;
			drv_data->write = drv_data->write != null_writer ?
						u32_writer : null_writer;
		}
943 944 945
		/* if bits/word is changed in dma mode, then must check the
		 * thresholds and burst also */
		if (chip->enable_dma) {
946 947
			if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
							message->spi,
948 949
							bits, &dma_burst,
							&dma_thresh))
950 951
				dev_warn_ratelimited(&message->spi->dev,
						     "pump_transfers: DMA burst size reduced to match bits_per_word\n");
952
		}
953

954
		cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
955 956
	}

957 958
	message->state = RUNNING_STATE;

N
Ned Forrester 已提交
959
	drv_data->dma_mapped = 0;
960 961
	if (pxa2xx_spi_dma_is_possible(drv_data->len))
		drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
N
Ned Forrester 已提交
962
	if (drv_data->dma_mapped) {
963 964

		/* Ensure we have the correct interrupt handler */
965 966 967
		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;

		pxa2xx_spi_dma_prepare(drv_data, dma_burst);
968

969 970
		/* Clear status and start DMA engine */
		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
971
		pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
972 973

		pxa2xx_spi_dma_start(drv_data);
974 975 976 977
	} else {
		/* Ensure we have the correct interrupt handler	*/
		drv_data->transfer_handler = interrupt_transfer;

978 979
		/* Clear status  */
		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
980
		write_SSSR_CS(drv_data, drv_data->clear_sr);
981 982
	}

983
	if (is_lpss_ssp(drv_data)) {
984 985 986 987 988 989 990 991
		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
		    != chip->lpss_rx_threshold)
			pxa2xx_spi_write(drv_data, SSIRF,
					 chip->lpss_rx_threshold);
		if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
		    != chip->lpss_tx_threshold)
			pxa2xx_spi_write(drv_data, SSITF,
					 chip->lpss_tx_threshold);
992 993
	}

994
	if (is_quark_x1000_ssp(drv_data) &&
995 996
	    (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
		pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
997

998
	/* see if we need to reload the config registers */
999 1000 1001
	if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
	    || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
	    != (cr1 & change_mask)) {
1002
		/* stop the SSP, and update the other bits */
1003
		pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
1004
		if (!pxa25x_ssp_comp(drv_data))
1005
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1006
		/* first set CR1 without interrupt and service enables */
1007
		pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
1008
		/* restart the SSP */
1009
		pxa2xx_spi_write(drv_data, SSCR0, cr0);
1010

1011
	} else {
1012
		if (!pxa25x_ssp_comp(drv_data))
1013
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1014
	}
1015

1016
	cs_assert(drv_data);
1017 1018 1019

	/* after chip select, release the data by enabling service
	 * requests and interrupts, without changing any mode bits */
1020
	pxa2xx_spi_write(drv_data, SSCR1, cr1);
1021 1022
}

1023 1024
static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
					   struct spi_message *msg)
1025
{
1026
	struct driver_data *drv_data = spi_master_get_devdata(master);
1027

1028
	drv_data->cur_msg = msg;
1029 1030 1031 1032 1033 1034
	/* Initial message state*/
	drv_data->cur_msg->state = START_STATE;
	drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
						struct spi_transfer,
						transfer_list);

1035 1036
	/* prepare to setup the SSP, in pump_transfers, using the per
	 * chip configuration */
1037 1038 1039 1040 1041 1042 1043
	drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);

	/* Mark as busy and launch transfers */
	tasklet_schedule(&drv_data->pump_transfers);
	return 0;
}

1044 1045 1046 1047 1048
static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
	struct driver_data *drv_data = spi_master_get_devdata(master);

	/* Disable the SSP now */
1049 1050
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1051 1052 1053 1054

	return 0;
}

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
		    struct pxa2xx_spi_chip *chip_info)
{
	int err = 0;

	if (chip == NULL || chip_info == NULL)
		return 0;

	/* NOTE: setup() can be called multiple times, possibly with
	 * different chip_info, release previously requested GPIO
	 */
	if (gpio_is_valid(chip->gpio_cs))
		gpio_free(chip->gpio_cs);

	/* If (*cs_control) is provided, ignore GPIO chip select */
	if (chip_info->cs_control) {
		chip->cs_control = chip_info->cs_control;
		return 0;
	}

	if (gpio_is_valid(chip_info->gpio_cs)) {
		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
		if (err) {
1078 1079
			dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
				chip_info->gpio_cs);
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
			return err;
		}

		chip->gpio_cs = chip_info->gpio_cs;
		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;

		err = gpio_direction_output(chip->gpio_cs,
					!chip->gpio_cs_inverted);
	}

	return err;
}

1093 1094 1095 1096
static int setup(struct spi_device *spi)
{
	struct pxa2xx_spi_chip *chip_info = NULL;
	struct chip_data *chip;
1097
	const struct lpss_config *config;
1098 1099
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
	unsigned int clk_div;
1100 1101
	uint tx_thres, tx_hi_thres, rx_thres;

1102 1103 1104 1105 1106 1107
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		tx_thres = TX_THRESH_QUARK_X1000_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_QUARK_X1000_DFLT;
		break;
1108 1109
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
1110 1111 1112 1113
		config = lpss_get_config(drv_data);
		tx_thres = config->tx_threshold_lo;
		tx_hi_thres = config->tx_threshold_hi;
		rx_thres = config->rx_threshold;
1114 1115
		break;
	default:
1116 1117 1118
		tx_thres = TX_THRESH_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_DFLT;
1119
		break;
1120
	}
1121

1122
	/* Only alloc on first setup */
1123
	chip = spi_get_ctldata(spi);
1124
	if (!chip) {
1125
		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1126
		if (!chip)
1127 1128
			return -ENOMEM;

1129 1130
		if (drv_data->ssp_type == CE4100_SSP) {
			if (spi->chip_select > 4) {
1131 1132
				dev_err(&spi->dev,
					"failed setup: cs number must not be > 4.\n");
1133 1134 1135 1136 1137 1138 1139
				kfree(chip);
				return -EINVAL;
			}

			chip->frm = spi->chip_select;
		} else
			chip->gpio_cs = -1;
1140
		chip->enable_dma = 0;
1141
		chip->timeout = TIMOUT_DFLT;
1142 1143
	}

1144 1145 1146 1147
	/* protocol drivers may change the chip settings, so...
	 * if chip_info exists, use it */
	chip_info = spi->controller_data;

1148
	/* chip_info isn't always needed */
1149
	chip->cr1 = 0;
1150
	if (chip_info) {
1151 1152 1153 1154
		if (chip_info->timeout)
			chip->timeout = chip_info->timeout;
		if (chip_info->tx_threshold)
			tx_thres = chip_info->tx_threshold;
1155 1156
		if (chip_info->tx_hi_threshold)
			tx_hi_thres = chip_info->tx_hi_threshold;
1157 1158 1159
		if (chip_info->rx_threshold)
			rx_thres = chip_info->rx_threshold;
		chip->enable_dma = drv_data->master_info->enable_dma;
1160 1161 1162
		chip->dma_threshold = 0;
		if (chip_info->enable_loopback)
			chip->cr1 = SSCR1_LBM;
1163 1164 1165 1166 1167 1168 1169
	} else if (ACPI_HANDLE(&spi->dev)) {
		/*
		 * Slave devices enumerated from ACPI namespace don't
		 * usually have chip_info but we still might want to use
		 * DMA with them.
		 */
		chip->enable_dma = drv_data->master_info->enable_dma;
1170 1171
	}

1172 1173 1174 1175
	chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
	chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
				| SSITF_TxHiThresh(tx_hi_thres);

1176 1177 1178 1179 1180
	/* set dma burst and threshold outside of chip_info path so that if
	 * chip_info goes away after setting chip->enable_dma, the
	 * burst and threshold can still respond to changes in bits_per_word */
	if (chip->enable_dma) {
		/* set up legal burst and threshold for dma */
1181 1182
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
						spi->bits_per_word,
1183 1184
						&chip->dma_burst_size,
						&chip->dma_threshold)) {
1185 1186
			dev_warn(&spi->dev,
				 "in setup: DMA burst size reduced to match bits_per_word\n");
1187 1188 1189
		}
	}

1190
	clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
1191
	chip->speed_hz = spi->max_speed_hz;
1192

1193 1194
	chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
					   spi->bits_per_word);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
				   & QUARK_X1000_SSCR1_RFT)
				   | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
				   & QUARK_X1000_SSCR1_TFT);
		break;
	default:
		chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
		break;
	}

1208 1209 1210
	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1211

1212 1213 1214
	if (spi->mode & SPI_LOOP)
		chip->cr1 |= SSCR1_LBM;

1215
	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
1216
	if (!pxa25x_ssp_comp(drv_data))
1217
		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1218
			drv_data->max_clk_rate
1219 1220
				/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
			chip->enable_dma ? "DMA" : "PIO");
1221
	else
1222
		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1223
			drv_data->max_clk_rate / 2
1224 1225
				/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
			chip->enable_dma ? "DMA" : "PIO");
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235

	if (spi->bits_per_word <= 8) {
		chip->n_bytes = 1;
		chip->read = u8_reader;
		chip->write = u8_writer;
	} else if (spi->bits_per_word <= 16) {
		chip->n_bytes = 2;
		chip->read = u16_reader;
		chip->write = u16_writer;
	} else if (spi->bits_per_word <= 32) {
1236 1237
		if (!is_quark_x1000_ssp(drv_data))
			chip->cr0 |= SSCR0_EDSS;
1238 1239 1240 1241
		chip->n_bytes = 4;
		chip->read = u32_reader;
		chip->write = u32_writer;
	}
1242
	chip->bits_per_word = spi->bits_per_word;
1243 1244 1245

	spi_set_ctldata(spi, chip);

1246 1247 1248
	if (drv_data->ssp_type == CE4100_SSP)
		return 0;

1249
	return setup_cs(spi, chip, chip_info);
1250 1251
}

1252
static void cleanup(struct spi_device *spi)
1253
{
1254
	struct chip_data *chip = spi_get_ctldata(spi);
1255
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1256

1257 1258 1259
	if (!chip)
		return;

1260
	if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
1261 1262
		gpio_free(chip->gpio_cs);

1263 1264 1265
	kfree(chip);
}

1266
#ifdef CONFIG_ACPI
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278

static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
	{ "INT33C0", LPSS_LPT_SSP },
	{ "INT33C1", LPSS_LPT_SSP },
	{ "INT3430", LPSS_LPT_SSP },
	{ "INT3431", LPSS_LPT_SSP },
	{ "80860F0E", LPSS_BYT_SSP },
	{ "8086228E", LPSS_BYT_SSP },
	{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);

1279 1280 1281 1282 1283 1284 1285
static struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
	struct pxa2xx_spi_master *pdata;
	struct acpi_device *adev;
	struct ssp_device *ssp;
	struct resource *res;
1286 1287
	const struct acpi_device_id *id;
	int devid, type;
1288 1289 1290 1291 1292

	if (!ACPI_HANDLE(&pdev->dev) ||
	    acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
		return NULL;

1293 1294 1295 1296 1297 1298
	id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
	if (id)
		type = (int)id->driver_data;
	else
		return NULL;

1299
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1300
	if (!pdata)
1301 1302 1303 1304 1305 1306 1307 1308 1309
		return NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return NULL;

	ssp = &pdata->ssp;

	ssp->phys_base = res->start;
1310 1311
	ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ssp->mmio_base))
1312
		return NULL;
1313 1314 1315

	ssp->clk = devm_clk_get(&pdev->dev, NULL);
	ssp->irq = platform_get_irq(pdev, 0);
1316
	ssp->type = type;
1317 1318 1319 1320 1321 1322 1323
	ssp->pdev = pdev;

	ssp->port_id = -1;
	if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
		ssp->port_id = devid;

	pdata->num_chipselect = 1;
1324
	pdata->enable_dma = true;
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336

	return pdata;
}

#else
static inline struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
	return NULL;
}
#endif

1337
static int pxa2xx_spi_probe(struct platform_device *pdev)
1338 1339 1340 1341
{
	struct device *dev = &pdev->dev;
	struct pxa2xx_spi_master *platform_info;
	struct spi_master *master;
G
Guennadi Liakhovetski 已提交
1342
	struct driver_data *drv_data;
1343
	struct ssp_device *ssp;
G
Guennadi Liakhovetski 已提交
1344
	int status;
1345
	u32 tmp;
1346

1347 1348
	platform_info = dev_get_platdata(dev);
	if (!platform_info) {
1349 1350 1351 1352 1353
		platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
		if (!platform_info) {
			dev_err(&pdev->dev, "missing platform data\n");
			return -ENODEV;
		}
1354
	}
1355

H
Haojian Zhuang 已提交
1356
	ssp = pxa_ssp_request(pdev->id, pdev->name);
1357 1358 1359 1360 1361
	if (!ssp)
		ssp = &platform_info->ssp;

	if (!ssp->mmio_base) {
		dev_err(&pdev->dev, "failed to get ssp\n");
1362 1363 1364 1365 1366 1367
		return -ENODEV;
	}

	/* Allocate master with space for drv_data and null dma buffer */
	master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
	if (!master) {
G
Guennadi Liakhovetski 已提交
1368
		dev_err(&pdev->dev, "cannot alloc spi_master\n");
H
Haojian Zhuang 已提交
1369
		pxa_ssp_free(ssp);
1370 1371 1372 1373 1374 1375
		return -ENOMEM;
	}
	drv_data = spi_master_get_devdata(master);
	drv_data->master = master;
	drv_data->master_info = platform_info;
	drv_data->pdev = pdev;
1376
	drv_data->ssp = ssp;
1377

1378 1379
	master->dev.parent = &pdev->dev;
	master->dev.of_node = pdev->dev.of_node;
1380
	/* the spi->mode bits understood by this driver: */
1381
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1382

1383
	master->bus_num = ssp->port_id;
1384
	master->num_chipselect = platform_info->num_chipselect;
1385
	master->dma_alignment = DMA_ALIGNMENT;
1386 1387
	master->cleanup = cleanup;
	master->setup = setup;
1388
	master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1389
	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1390
	master->auto_runtime_pm = true;
1391

1392
	drv_data->ssp_type = ssp->type;
1393
	drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
1394

1395 1396
	drv_data->ioaddr = ssp->mmio_base;
	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1397
	if (pxa25x_ssp_comp(drv_data)) {
1398 1399 1400 1401 1402 1403 1404 1405 1406
		switch (drv_data->ssp_type) {
		case QUARK_X1000_SSP:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
			break;
		default:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
			break;
		}

1407 1408 1409 1410 1411
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
		drv_data->dma_cr1 = 0;
		drv_data->clear_sr = SSSR_ROR;
		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
	} else {
1412
		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1413
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1414
		drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1415 1416 1417 1418
		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
	}

1419 1420
	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
			drv_data);
1421
	if (status < 0) {
G
Guennadi Liakhovetski 已提交
1422
		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1423 1424 1425 1426 1427 1428 1429
		goto out_error_master_alloc;
	}

	/* Setup DMA if requested */
	drv_data->tx_channel = -1;
	drv_data->rx_channel = -1;
	if (platform_info->enable_dma) {
1430 1431
		status = pxa2xx_spi_dma_setup(drv_data);
		if (status) {
1432
			dev_dbg(dev, "no DMA channels available, using PIO\n");
1433
			platform_info->enable_dma = false;
1434 1435 1436 1437
		}
	}

	/* Enable SOC clock */
1438 1439 1440
	clk_prepare_enable(ssp->clk);

	drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1441 1442

	/* Load default SSP configuration */
1443
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1444 1445
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
1446 1447 1448
		tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
		      | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1449 1450

		/* using the Motorola SPI protocol and use 8 bit frame */
1451 1452 1453
		pxa2xx_spi_write(drv_data, SSCR0,
				 QUARK_X1000_SSCR0_Motorola
				 | QUARK_X1000_SSCR0_DataSize(8));
1454 1455
		break;
	default:
1456 1457 1458 1459 1460
		tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
		      SSCR1_TxTresh(TX_THRESH_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1461 1462 1463
		break;
	}

1464
	if (!pxa25x_ssp_comp(drv_data))
1465
		pxa2xx_spi_write(drv_data, SSTO, 0);
1466 1467

	if (!is_quark_x1000_ssp(drv_data))
1468
		pxa2xx_spi_write(drv_data, SSPSP, 0);
1469

1470 1471
	if (is_lpss_ssp(drv_data))
		lpss_ssp_setup(drv_data);
1472

1473 1474
	tasklet_init(&drv_data->pump_transfers, pump_transfers,
		     (unsigned long)drv_data);
1475

1476 1477 1478 1479 1480
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

1481 1482
	/* Register with the SPI framework */
	platform_set_drvdata(pdev, drv_data);
1483
	status = devm_spi_register_master(&pdev->dev, master);
1484 1485
	if (status != 0) {
		dev_err(&pdev->dev, "problem registering spi master\n");
1486
		goto out_error_clock_enabled;
1487 1488 1489 1490 1491
	}

	return status;

out_error_clock_enabled:
1492
	clk_disable_unprepare(ssp->clk);
1493
	pxa2xx_spi_dma_release(drv_data);
1494
	free_irq(ssp->irq, drv_data);
1495 1496 1497

out_error_master_alloc:
	spi_master_put(master);
H
Haojian Zhuang 已提交
1498
	pxa_ssp_free(ssp);
1499 1500 1501 1502 1503 1504
	return status;
}

static int pxa2xx_spi_remove(struct platform_device *pdev)
{
	struct driver_data *drv_data = platform_get_drvdata(pdev);
1505
	struct ssp_device *ssp;
1506 1507 1508

	if (!drv_data)
		return 0;
1509
	ssp = drv_data->ssp;
1510

1511 1512
	pm_runtime_get_sync(&pdev->dev);

1513
	/* Disable the SSP at the peripheral and SOC level */
1514
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1515
	clk_disable_unprepare(ssp->clk);
1516 1517

	/* Release DMA */
1518 1519
	if (drv_data->master_info->enable_dma)
		pxa2xx_spi_dma_release(drv_data);
1520

1521 1522 1523
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

1524
	/* Release IRQ */
1525 1526 1527
	free_irq(ssp->irq, drv_data);

	/* Release SSP */
H
Haojian Zhuang 已提交
1528
	pxa_ssp_free(ssp);
1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540

	return 0;
}

static void pxa2xx_spi_shutdown(struct platform_device *pdev)
{
	int status = 0;

	if ((status = pxa2xx_spi_remove(pdev)) != 0)
		dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}

1541
#ifdef CONFIG_PM_SLEEP
1542
static int pxa2xx_spi_suspend(struct device *dev)
1543
{
1544
	struct driver_data *drv_data = dev_get_drvdata(dev);
1545
	struct ssp_device *ssp = drv_data->ssp;
1546 1547
	int status = 0;

1548
	status = spi_master_suspend(drv_data->master);
1549 1550
	if (status != 0)
		return status;
1551
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1552 1553 1554

	if (!pm_runtime_suspended(dev))
		clk_disable_unprepare(ssp->clk);
1555 1556 1557 1558

	return 0;
}

1559
static int pxa2xx_spi_resume(struct device *dev)
1560
{
1561
	struct driver_data *drv_data = dev_get_drvdata(dev);
1562
	struct ssp_device *ssp = drv_data->ssp;
1563 1564
	int status = 0;

1565
	pxa2xx_spi_dma_resume(drv_data);
1566

1567
	/* Enable the SSP clock */
1568 1569
	if (!pm_runtime_suspended(dev))
		clk_prepare_enable(ssp->clk);
1570

1571
	/* Restore LPSS private register bits */
1572 1573
	if (is_lpss_ssp(drv_data))
		lpss_ssp_setup(drv_data);
1574

1575
	/* Start the queue running */
1576
	status = spi_master_resume(drv_data->master);
1577
	if (status != 0) {
1578
		dev_err(dev, "problem starting queue (%d)\n", status);
1579 1580 1581 1582 1583
		return status;
	}

	return 0;
}
1584 1585
#endif

1586
#ifdef CONFIG_PM
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_disable_unprepare(drv_data->ssp->clk);
	return 0;
}

static int pxa2xx_spi_runtime_resume(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_prepare_enable(drv_data->ssp->clk);
	return 0;
}
#endif
1603

1604
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1605 1606 1607
	SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
	SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
			   pxa2xx_spi_runtime_resume, NULL)
1608
};
1609 1610 1611

static struct platform_driver driver = {
	.driver = {
1612 1613
		.name	= "pxa2xx-spi",
		.pm	= &pxa2xx_spi_pm_ops,
1614
		.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1615
	},
1616
	.probe = pxa2xx_spi_probe,
1617
	.remove = pxa2xx_spi_remove,
1618 1619 1620 1621 1622
	.shutdown = pxa2xx_spi_shutdown,
};

static int __init pxa2xx_spi_init(void)
{
1623
	return platform_driver_register(&driver);
1624
}
A
Antonio Ospite 已提交
1625
subsys_initcall(pxa2xx_spi_init);
1626 1627 1628 1629 1630 1631

static void __exit pxa2xx_spi_exit(void)
{
	platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);