spi-pxa2xx.c 42.7 KB
Newer Older
1 2
/*
 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3
 * Copyright (C) 2013, Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
21
#include <linux/err.h>
22
#include <linux/interrupt.h>
23
#include <linux/kernel.h>
24
#include <linux/pci.h>
25
#include <linux/platform_device.h>
26
#include <linux/spi/pxa2xx_spi.h>
27 28
#include <linux/spi/spi.h>
#include <linux/delay.h>
29
#include <linux/gpio.h>
30
#include <linux/slab.h>
31
#include <linux/clk.h>
32
#include <linux/pm_runtime.h>
33
#include <linux/acpi.h>
34

35
#include "spi-pxa2xx.h"
36 37

MODULE_AUTHOR("Stephen Street");
W
Will Newton 已提交
38
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
39
MODULE_LICENSE("GPL");
40
MODULE_ALIAS("platform:pxa2xx-spi");
41

42 43
#define TIMOUT_DFLT		1000

44 45 46 47 48 49 50 51
/*
 * for testing SSCR1 changes that require SSP restart, basically
 * everything except the service and interrupt enables, the pxa270 developer
 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
 * list, but the PXA255 dev man says all bits without really meaning the
 * service and interrupt enables
 */
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
52
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
53 54 55 56
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
57

58 59 60 61 62 63
#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF	\
				| QUARK_X1000_SSCR1_EFWR	\
				| QUARK_X1000_SSCR1_RFT		\
				| QUARK_X1000_SSCR1_TFT		\
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

64
#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
65 66 67
#define SPI_CS_CONTROL_SW_MODE	BIT(0)
#define SPI_CS_CONTROL_CS_HIGH	BIT(1)

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
struct lpss_config {
	/* LPSS offset from drv_data->ioaddr */
	unsigned offset;
	/* Register offsets from drv_data->lpss_base or -1 */
	int reg_general;
	int reg_ssp;
	int reg_cs_ctrl;
	/* FIFO thresholds */
	u32 rx_threshold;
	u32 tx_threshold_lo;
	u32 tx_threshold_hi;
};

/* Keep these sorted with enum pxa_ssp_type */
static const struct lpss_config lpss_platforms[] = {
	{	/* LPSS_LPT_SSP */
		.offset = 0x800,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
	{	/* LPSS_BYT_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
101 102 103 104 105 106 107 108 109
	{	/* LPSS_SPT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
		.rx_threshold = 1,
		.tx_threshold_lo = 32,
		.tx_threshold_hi = 56,
	},
110 111 112 113 114 115 116 117
};

static inline const struct lpss_config
*lpss_get_config(const struct driver_data *drv_data)
{
	return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
}

118 119
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
120 121 122
	switch (drv_data->ssp_type) {
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
123
	case LPSS_SPT_SSP:
124 125 126 127
		return true;
	default:
		return false;
	}
128 129
}

130 131 132 133 134
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
{
	return drv_data->ssp_type == QUARK_X1000_SSP;
}

135 136 137
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
138 139
	case QUARK_X1000_SSP:
		return QUARK_X1000_SSCR1_CHANGE_MASK;
140 141 142 143 144 145 146 147 148
	default:
		return SSCR1_CHANGE_MASK;
	}
}

static u32
pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
149 150
	case QUARK_X1000_SSP:
		return RX_THRESH_QUARK_X1000_DFLT;
151 152 153 154 155 156 157 158 159 160
	default:
		return RX_THRESH_DFLT;
	}
}

static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
	u32 mask;

	switch (drv_data->ssp_type) {
161 162 163
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSSR_TFL_MASK;
		break;
164 165 166 167 168
	default:
		mask = SSSR_TFL_MASK;
		break;
	}

169
	return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
170 171 172 173 174 175 176 177
}

static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
				     u32 *sccr1_reg)
{
	u32 mask;

	switch (drv_data->ssp_type) {
178 179 180
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSCR1_RFT;
		break;
181 182 183 184 185 186 187 188 189 190 191
	default:
		mask = SSCR1_RFT;
		break;
	}
	*sccr1_reg &= ~mask;
}

static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
				   u32 *sccr1_reg, u32 threshold)
{
	switch (drv_data->ssp_type) {
192 193 194
	case QUARK_X1000_SSP:
		*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
		break;
195 196 197 198 199 200 201 202 203 204
	default:
		*sccr1_reg |= SSCR1_RxTresh(threshold);
		break;
	}
}

static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
				  u32 clk_div, u8 bits)
{
	switch (drv_data->ssp_type) {
205 206 207 208 209
	case QUARK_X1000_SSP:
		return clk_div
			| QUARK_X1000_SSCR0_Motorola
			| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
			| SSCR0_SSE;
210 211 212 213 214 215 216 217 218
	default:
		return clk_div
			| SSCR0_Motorola
			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
			| SSCR0_SSE
			| (bits > 16 ? SSCR0_EDSS : 0);
	}
}

219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
/*
 * Read and write LPSS SSP private registers. Caller must first check that
 * is_lpss_ssp() returns true before these can be called.
 */
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
	WARN_ON(!drv_data->lpss_base);
	return readl(drv_data->lpss_base + offset);
}

static void __lpss_ssp_write_priv(struct driver_data *drv_data,
				  unsigned offset, u32 value)
{
	WARN_ON(!drv_data->lpss_base);
	writel(value, drv_data->lpss_base + offset);
}

/*
 * lpss_ssp_setup - perform LPSS SSP specific setup
 * @drv_data: pointer to the driver private data
 *
 * Perform LPSS SSP specific setup. This function must be called first if
 * one is going to use LPSS SSP private registers.
 */
static void lpss_ssp_setup(struct driver_data *drv_data)
{
245 246
	const struct lpss_config *config;
	u32 value;
247

248 249
	config = lpss_get_config(drv_data);
	drv_data->lpss_base = drv_data->ioaddr + config->offset;
250 251 252

	/* Enable software chip select control */
	value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
253
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
254 255

	/* Enable multiblock DMA transfers */
256
	if (drv_data->master_info->enable_dma) {
257
		__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
258

259 260 261 262 263 264 265
		if (config->reg_general >= 0) {
			value = __lpss_ssp_read_priv(drv_data,
						     config->reg_general);
			value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
			__lpss_ssp_write_priv(drv_data,
					      config->reg_general, value);
		}
266
	}
267 268 269 270
}

static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
271
	const struct lpss_config *config;
272 273
	u32 value;

274 275 276
	config = lpss_get_config(drv_data);

	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
277 278 279 280
	if (enable)
		value &= ~SPI_CS_CONTROL_CS_HIGH;
	else
		value |= SPI_CS_CONTROL_CS_HIGH;
281
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
282 283
}

284 285 286 287
static void cs_assert(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;

288
	if (drv_data->ssp_type == CE4100_SSP) {
289
		pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
290 291 292
		return;
	}

293 294 295 296 297
	if (chip->cs_control) {
		chip->cs_control(PXA2XX_CS_ASSERT);
		return;
	}

298
	if (gpio_is_valid(chip->gpio_cs)) {
299
		gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
300 301 302
		return;
	}

303 304
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, true);
305 306 307 308 309 310
}

static void cs_deassert(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;

311 312 313
	if (drv_data->ssp_type == CE4100_SSP)
		return;

314
	if (chip->cs_control) {
315
		chip->cs_control(PXA2XX_CS_DEASSERT);
316 317 318
		return;
	}

319
	if (gpio_is_valid(chip->gpio_cs)) {
320
		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
321 322 323
		return;
	}

324 325
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, false);
326 327
}

328
int pxa2xx_spi_flush(struct driver_data *drv_data)
329 330 331 332
{
	unsigned long limit = loops_per_jiffy << 1;

	do {
333 334 335
		while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
			pxa2xx_spi_read(drv_data, SSDR);
	} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
336
	write_SSSR_CS(drv_data, SSSR_ROR);
337 338 339 340

	return limit;
}

341
static int null_writer(struct driver_data *drv_data)
342
{
343
	u8 n_bytes = drv_data->n_bytes;
344

345
	if (pxa2xx_spi_txfifo_full(drv_data)
346 347 348
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

349
	pxa2xx_spi_write(drv_data, SSDR, 0);
350 351 352
	drv_data->tx += n_bytes;

	return 1;
353 354
}

355
static int null_reader(struct driver_data *drv_data)
356
{
357
	u8 n_bytes = drv_data->n_bytes;
358

359 360 361
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		pxa2xx_spi_read(drv_data, SSDR);
362 363
		drv_data->rx += n_bytes;
	}
364 365

	return drv_data->rx == drv_data->rx_end;
366 367
}

368
static int u8_writer(struct driver_data *drv_data)
369
{
370
	if (pxa2xx_spi_txfifo_full(drv_data)
371 372 373
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

374
	pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
375 376 377
	++drv_data->tx;

	return 1;
378 379
}

380
static int u8_reader(struct driver_data *drv_data)
381
{
382 383 384
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
385 386
		++drv_data->rx;
	}
387 388

	return drv_data->rx == drv_data->rx_end;
389 390
}

391
static int u16_writer(struct driver_data *drv_data)
392
{
393
	if (pxa2xx_spi_txfifo_full(drv_data)
394 395 396
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

397
	pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
398 399 400
	drv_data->tx += 2;

	return 1;
401 402
}

403
static int u16_reader(struct driver_data *drv_data)
404
{
405 406 407
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
408 409
		drv_data->rx += 2;
	}
410 411

	return drv_data->rx == drv_data->rx_end;
412
}
413 414

static int u32_writer(struct driver_data *drv_data)
415
{
416
	if (pxa2xx_spi_txfifo_full(drv_data)
417 418 419
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

420
	pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
421 422 423
	drv_data->tx += 4;

	return 1;
424 425
}

426
static int u32_reader(struct driver_data *drv_data)
427
{
428 429 430
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
431 432
		drv_data->rx += 4;
	}
433 434

	return drv_data->rx == drv_data->rx_end;
435 436
}

437
void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
{
	struct spi_message *msg = drv_data->cur_msg;
	struct spi_transfer *trans = drv_data->cur_transfer;

	/* Move to next transfer */
	if (trans->transfer_list.next != &msg->transfers) {
		drv_data->cur_transfer =
			list_entry(trans->transfer_list.next,
					struct spi_transfer,
					transfer_list);
		return RUNNING_STATE;
	} else
		return DONE_STATE;
}

/* caller already set message->status; dma and pio irqs are blocked */
S
Stephen Street 已提交
454
static void giveback(struct driver_data *drv_data)
455 456
{
	struct spi_transfer* last_transfer;
S
Stephen Street 已提交
457
	struct spi_message *msg;
458

S
Stephen Street 已提交
459 460 461 462
	msg = drv_data->cur_msg;
	drv_data->cur_msg = NULL;
	drv_data->cur_transfer = NULL;

463
	last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
464 465
					transfer_list);

N
Ned Forrester 已提交
466 467 468 469 470 471 472
	/* Delay if requested before any change in chip select */
	if (last_transfer->delay_usecs)
		udelay(last_transfer->delay_usecs);

	/* Drop chip select UNLESS cs_change is true or we are returning
	 * a message with an error, or next message is for another chip
	 */
473
	if (!last_transfer->cs_change)
474
		cs_deassert(drv_data);
N
Ned Forrester 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487 488
	else {
		struct spi_message *next_msg;

		/* Holding of cs was hinted, but we need to make sure
		 * the next message is for the same chip.  Don't waste
		 * time with the following tests unless this was hinted.
		 *
		 * We cannot postpone this until pump_messages, because
		 * after calling msg->complete (below) the driver that
		 * sent the current message could be unloaded, which
		 * could invalidate the cs_control() callback...
		 */

		/* get a pointer to the next message, if any */
489
		next_msg = spi_get_next_queued_message(drv_data->master);
N
Ned Forrester 已提交
490 491 492 493 494 495 496

		/* see if the next and current messages point
		 * to the same chip
		 */
		if (next_msg && next_msg->spi != msg->spi)
			next_msg = NULL;
		if (!next_msg || msg->state == ERROR_STATE)
497
			cs_deassert(drv_data);
N
Ned Forrester 已提交
498
	}
499

500
	drv_data->cur_chip = NULL;
501
	spi_finalize_current_message(drv_data->master);
502 503
}

504 505 506 507 508
static void reset_sccr1(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;
	u32 sccr1_reg;

509
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
510 511
	sccr1_reg &= ~SSCR1_RFT;
	sccr1_reg |= chip->threshold;
512
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
513 514
}

515
static void int_error_stop(struct driver_data *drv_data, const char* msg)
516
{
517
	/* Stop and reset SSP */
518
	write_SSSR_CS(drv_data, drv_data->clear_sr);
519
	reset_sccr1(drv_data);
520
	if (!pxa25x_ssp_comp(drv_data))
521
		pxa2xx_spi_write(drv_data, SSTO, 0);
522
	pxa2xx_spi_flush(drv_data);
523 524
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
525

526
	dev_err(&drv_data->pdev->dev, "%s\n", msg);
527

528 529 530
	drv_data->cur_msg->state = ERROR_STATE;
	tasklet_schedule(&drv_data->pump_transfers);
}
S
Stephen Street 已提交
531

532 533 534
static void int_transfer_complete(struct driver_data *drv_data)
{
	/* Stop SSP */
535
	write_SSSR_CS(drv_data, drv_data->clear_sr);
536
	reset_sccr1(drv_data);
537
	if (!pxa25x_ssp_comp(drv_data))
538
		pxa2xx_spi_write(drv_data, SSTO, 0);
539

L
Lucas De Marchi 已提交
540
	/* Update total byte transferred return count actual bytes read */
541 542
	drv_data->cur_msg->actual_length += drv_data->len -
				(drv_data->rx_end - drv_data->rx);
543

N
Ned Forrester 已提交
544 545 546
	/* Transfer delays and chip select release are
	 * handled in pump_transfers or giveback
	 */
547

548
	/* Move to next transfer */
549
	drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
550

551 552 553
	/* Schedule transfer tasklet */
	tasklet_schedule(&drv_data->pump_transfers);
}
554

555 556
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
557 558
	u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
		       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
559

560
	u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
561

562 563 564 565
	if (irq_status & SSSR_ROR) {
		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
		return IRQ_HANDLED;
	}
566

567
	if (irq_status & SSSR_TINT) {
568
		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
569 570 571 572 573
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	}
574

575 576 577 578 579 580 581
	/* Drain rx fifo, Fill tx fifo and prevent overruns */
	do {
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	} while (drv_data->write(drv_data));
582

583 584 585 586
	if (drv_data->read(drv_data)) {
		int_transfer_complete(drv_data);
		return IRQ_HANDLED;
	}
587

588
	if (drv_data->tx == drv_data->tx_end) {
589 590 591
		u32 bytes_left;
		u32 sccr1_reg;

592
		sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
593 594 595 596
		sccr1_reg &= ~SSCR1_TIE;

		/*
		 * PXA25x_SSP has no timeout, set up rx threshould for the
L
Lucas De Marchi 已提交
597
		 * remaining RX bytes.
598
		 */
599
		if (pxa25x_ssp_comp(drv_data)) {
600
			u32 rx_thre;
601

602
			pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
603 604 605 606 607 608 609

			bytes_left = drv_data->rx_end - drv_data->rx;
			switch (drv_data->n_bytes) {
			case 4:
				bytes_left >>= 1;
			case 2:
				bytes_left >>= 1;
610
			}
611

612 613 614
			rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
			if (rx_thre > bytes_left)
				rx_thre = bytes_left;
615

616
			pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
617
		}
618
		pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
619 620
	}

S
Stephen Street 已提交
621 622
	/* We did something */
	return IRQ_HANDLED;
623 624
}

625
static irqreturn_t ssp_int(int irq, void *dev_id)
626
{
627
	struct driver_data *drv_data = dev_id;
628
	u32 sccr1_reg;
629 630 631
	u32 mask = drv_data->mask_sr;
	u32 status;

632 633 634 635 636 637 638 639 640
	/*
	 * The IRQ might be shared with other peripherals so we must first
	 * check that are we RPM suspended or not. If we are we assume that
	 * the IRQ was not for us (we shouldn't be RPM suspended when the
	 * interrupt is enabled).
	 */
	if (pm_runtime_suspended(&drv_data->pdev->dev))
		return IRQ_NONE;

641 642 643 644 645 646
	/*
	 * If the device is not yet in RPM suspended state and we get an
	 * interrupt that is meant for another device, check if status bits
	 * are all set to one. That means that the device is already
	 * powered off.
	 */
647
	status = pxa2xx_spi_read(drv_data, SSSR);
648 649 650
	if (status == ~0)
		return IRQ_NONE;

651
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
652 653 654 655 656 657 658

	/* Ignore possible writes if we don't need to write */
	if (!(sccr1_reg & SSCR1_TIE))
		mask &= ~SSSR_TFS;

	if (!(status & mask))
		return IRQ_NONE;
659 660

	if (!drv_data->cur_msg) {
S
Stephen Street 已提交
661

662 663 664 665 666 667
		pxa2xx_spi_write(drv_data, SSCR0,
				 pxa2xx_spi_read(drv_data, SSCR0)
				 & ~SSCR0_SSE);
		pxa2xx_spi_write(drv_data, SSCR1,
				 pxa2xx_spi_read(drv_data, SSCR1)
				 & ~drv_data->int_cr1);
668
		if (!pxa25x_ssp_comp(drv_data))
669
			pxa2xx_spi_write(drv_data, SSTO, 0);
670
		write_SSSR_CS(drv_data, drv_data->clear_sr);
S
Stephen Street 已提交
671

672 673
		dev_err(&drv_data->pdev->dev,
			"bad message state in interrupt handler\n");
S
Stephen Street 已提交
674

675 676 677 678 679 680 681
		/* Never fail */
		return IRQ_HANDLED;
	}

	return drv_data->transfer_handler(drv_data);
}

682
/*
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
 * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
 * input frequency by fractions of 2^24. It also has a divider by 5.
 *
 * There are formulas to get baud rate value for given input frequency and
 * divider parameters, such as DDS_CLK_RATE and SCR:
 *
 * Fsys = 200MHz
 *
 * Fssp = Fsys * DDS_CLK_RATE / 2^24			(1)
 * Baud rate = Fsclk = Fssp / (2 * (SCR + 1))		(2)
 *
 * DDS_CLK_RATE either 2^n or 2^n / 5.
 * SCR is in range 0 .. 255
 *
 * Divisor = 5^i * 2^j * 2 * k
 *       i = [0, 1]      i = 1 iff j = 0 or j > 3
 *       j = [0, 23]     j = 0 iff i = 1
 *       k = [1, 256]
 * Special case: j = 0, i = 1: Divisor = 2 / 5
 *
 * Accordingly to the specification the recommended values for DDS_CLK_RATE
 * are:
 *	Case 1:		2^n, n = [0, 23]
 *	Case 2:		2^24 * 2 / 5 (0x666666)
 *	Case 3:		less than or equal to 2^24 / 5 / 16 (0x33333)
 *
 * In all cases the lowest possible value is better.
 *
 * The function calculates parameters for all cases and chooses the one closest
 * to the asked baud rate.
713
 */
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
	unsigned long xtal = 200000000;
	unsigned long fref = xtal / 2;		/* mandatory division by 2,
						   see (2) */
						/* case 3 */
	unsigned long fref1 = fref / 2;		/* case 1 */
	unsigned long fref2 = fref * 2 / 5;	/* case 2 */
	unsigned long scale;
	unsigned long q, q1, q2;
	long r, r1, r2;
	u32 mul;

	/* Case 1 */

	/* Set initial value for DDS_CLK_RATE */
	mul = (1 << 24) >> 1;

	/* Calculate initial quot */
	q1 = DIV_ROUND_CLOSEST(fref1, rate);

	/* Scale q1 if it's too big */
	if (q1 > 256) {
		/* Scale q1 to range [1, 512] */
		scale = fls_long(q1 - 1);
		if (scale > 9) {
			q1 >>= scale - 9;
			mul >>= scale - 9;
742
		}
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774

		/* Round the result if we have a remainder */
		q1 += q1 & 1;
	}

	/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
	scale = __ffs(q1);
	q1 >>= scale;
	mul >>= scale;

	/* Get the remainder */
	r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);

	/* Case 2 */

	q2 = DIV_ROUND_CLOSEST(fref2, rate);
	r2 = abs(fref2 / q2 - rate);

	/*
	 * Choose the best between two: less remainder we have the better. We
	 * can't go case 2 if q2 is greater than 256 since SCR register can
	 * hold only values 0 .. 255.
	 */
	if (r2 >= r1 || q2 > 256) {
		/* case 1 is better */
		r = r1;
		q = q1;
	} else {
		/* case 2 is better */
		r = r2;
		q = q2;
		mul = (1 << 24) * 2 / 5;
775 776
	}

777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
	/* Check case 3 only If the divisor is big enough */
	if (fref / rate >= 80) {
		u64 fssp;
		u32 m;

		/* Calculate initial quot */
		q1 = DIV_ROUND_CLOSEST(fref, rate);
		m = (1 << 24) / q1;

		/* Get the remainder */
		fssp = (u64)fref * m;
		do_div(fssp, 1 << 24);
		r1 = abs(fssp - rate);

		/* Choose this one if it suits better */
		if (r1 < r) {
			/* case 3 is better */
			q = 1;
			mul = m;
		}
	}
798

799 800
	*dds = mul;
	return q - 1;
801 802
}

803
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
804
{
805 806 807 808
	unsigned long ssp_clk = drv_data->max_clk_rate;
	const struct ssp_device *ssp = drv_data->ssp;

	rate = min_t(int, ssp_clk, rate);
809

810
	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
811
		return (ssp_clk / (2 * rate) - 1) & 0xff;
812
	else
813
		return (ssp_clk / rate - 1) & 0xfff;
814 815
}

816 817 818
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
					   struct chip_data *chip, int rate)
{
819
	unsigned int clk_div;
820 821 822

	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
823
		clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
824
		break;
825
	default:
826
		clk_div = ssp_get_clk_div(drv_data, rate);
827
		break;
828
	}
829
	return clk_div << 8;
830 831
}

832 833 834 835 836 837 838
static void pump_transfers(unsigned long data)
{
	struct driver_data *drv_data = (struct driver_data *)data;
	struct spi_message *message = NULL;
	struct spi_transfer *transfer = NULL;
	struct spi_transfer *previous = NULL;
	struct chip_data *chip = NULL;
839 840 841 842
	u32 clk_div = 0;
	u8 bits = 0;
	u32 speed = 0;
	u32 cr0;
843 844 845
	u32 cr1;
	u32 dma_thresh = drv_data->cur_chip->dma_threshold;
	u32 dma_burst = drv_data->cur_chip->dma_burst_size;
846
	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
847 848 849 850 851 852 853 854 855

	/* Get current state information */
	message = drv_data->cur_msg;
	transfer = drv_data->cur_transfer;
	chip = drv_data->cur_chip;

	/* Handle for abort */
	if (message->state == ERROR_STATE) {
		message->status = -EIO;
S
Stephen Street 已提交
856
		giveback(drv_data);
857 858 859 860 861 862
		return;
	}

	/* Handle end of message */
	if (message->state == DONE_STATE) {
		message->status = 0;
S
Stephen Street 已提交
863
		giveback(drv_data);
864 865 866
		return;
	}

N
Ned Forrester 已提交
867
	/* Delay if requested at end of transfer before CS change */
868 869 870 871 872 873
	if (message->state == RUNNING_STATE) {
		previous = list_entry(transfer->transfer_list.prev,
					struct spi_transfer,
					transfer_list);
		if (previous->delay_usecs)
			udelay(previous->delay_usecs);
N
Ned Forrester 已提交
874 875 876

		/* Drop chip select only if cs_change is requested */
		if (previous->cs_change)
877
			cs_deassert(drv_data);
878 879
	}

880 881
	/* Check if we can DMA this transfer */
	if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
N
Ned Forrester 已提交
882 883 884 885 886

		/* reject already-mapped transfers; PIO won't always work */
		if (message->is_dma_mapped
				|| transfer->rx_dma || transfer->tx_dma) {
			dev_err(&drv_data->pdev->dev,
887 888
				"pump_transfers: mapped transfer length of "
				"%u is greater than %d\n",
N
Ned Forrester 已提交
889 890 891 892 893 894 895
				transfer->len, MAX_DMA_LEN);
			message->status = -EINVAL;
			giveback(drv_data);
			return;
		}

		/* warn ... we force this to PIO mode */
896 897 898 899
		dev_warn_ratelimited(&message->spi->dev,
				     "pump_transfers: DMA disabled for transfer length %ld "
				     "greater than %d\n",
				     (long)drv_data->len, MAX_DMA_LEN);
900 901
	}

902
	/* Setup the transfer state based on the type of transfer */
903
	if (pxa2xx_spi_flush(drv_data) == 0) {
904 905
		dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
		message->status = -EIO;
S
Stephen Street 已提交
906
		giveback(drv_data);
907 908
		return;
	}
909
	drv_data->n_bytes = chip->n_bytes;
910 911 912 913 914 915
	drv_data->tx = (void *)transfer->tx_buf;
	drv_data->tx_end = drv_data->tx + transfer->len;
	drv_data->rx = transfer->rx_buf;
	drv_data->rx_end = drv_data->rx + transfer->len;
	drv_data->rx_dma = transfer->rx_dma;
	drv_data->tx_dma = transfer->tx_dma;
916
	drv_data->len = transfer->len;
917 918
	drv_data->write = drv_data->tx ? chip->write : null_writer;
	drv_data->read = drv_data->rx ? chip->read : null_reader;
919 920

	/* Change speed and bit per word on a per transfer */
921
	cr0 = chip->cr0;
922 923 924 925 926 927 928 929 930 931 932
	if (transfer->speed_hz || transfer->bits_per_word) {

		bits = chip->bits_per_word;
		speed = chip->speed_hz;

		if (transfer->speed_hz)
			speed = transfer->speed_hz;

		if (transfer->bits_per_word)
			bits = transfer->bits_per_word;

933
		clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953

		if (bits <= 8) {
			drv_data->n_bytes = 1;
			drv_data->read = drv_data->read != null_reader ?
						u8_reader : null_reader;
			drv_data->write = drv_data->write != null_writer ?
						u8_writer : null_writer;
		} else if (bits <= 16) {
			drv_data->n_bytes = 2;
			drv_data->read = drv_data->read != null_reader ?
						u16_reader : null_reader;
			drv_data->write = drv_data->write != null_writer ?
						u16_writer : null_writer;
		} else if (bits <= 32) {
			drv_data->n_bytes = 4;
			drv_data->read = drv_data->read != null_reader ?
						u32_reader : null_reader;
			drv_data->write = drv_data->write != null_writer ?
						u32_writer : null_writer;
		}
954 955 956
		/* if bits/word is changed in dma mode, then must check the
		 * thresholds and burst also */
		if (chip->enable_dma) {
957 958
			if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
							message->spi,
959 960
							bits, &dma_burst,
							&dma_thresh))
961 962
				dev_warn_ratelimited(&message->spi->dev,
						     "pump_transfers: DMA burst size reduced to match bits_per_word\n");
963
		}
964

965
		cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
966 967
	}

968 969
	message->state = RUNNING_STATE;

N
Ned Forrester 已提交
970
	drv_data->dma_mapped = 0;
971 972
	if (pxa2xx_spi_dma_is_possible(drv_data->len))
		drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
N
Ned Forrester 已提交
973
	if (drv_data->dma_mapped) {
974 975

		/* Ensure we have the correct interrupt handler */
976 977 978
		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;

		pxa2xx_spi_dma_prepare(drv_data, dma_burst);
979

980 981
		/* Clear status and start DMA engine */
		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
982
		pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
983 984

		pxa2xx_spi_dma_start(drv_data);
985 986 987 988
	} else {
		/* Ensure we have the correct interrupt handler	*/
		drv_data->transfer_handler = interrupt_transfer;

989 990
		/* Clear status  */
		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
991
		write_SSSR_CS(drv_data, drv_data->clear_sr);
992 993
	}

994
	if (is_lpss_ssp(drv_data)) {
995 996 997 998 999 1000 1001 1002
		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
		    != chip->lpss_rx_threshold)
			pxa2xx_spi_write(drv_data, SSIRF,
					 chip->lpss_rx_threshold);
		if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
		    != chip->lpss_tx_threshold)
			pxa2xx_spi_write(drv_data, SSITF,
					 chip->lpss_tx_threshold);
1003 1004
	}

1005
	if (is_quark_x1000_ssp(drv_data) &&
1006 1007
	    (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
		pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
1008

1009
	/* see if we need to reload the config registers */
1010 1011 1012
	if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
	    || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
	    != (cr1 & change_mask)) {
1013
		/* stop the SSP, and update the other bits */
1014
		pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
1015
		if (!pxa25x_ssp_comp(drv_data))
1016
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1017
		/* first set CR1 without interrupt and service enables */
1018
		pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
1019
		/* restart the SSP */
1020
		pxa2xx_spi_write(drv_data, SSCR0, cr0);
1021

1022
	} else {
1023
		if (!pxa25x_ssp_comp(drv_data))
1024
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1025
	}
1026

1027
	cs_assert(drv_data);
1028 1029 1030

	/* after chip select, release the data by enabling service
	 * requests and interrupts, without changing any mode bits */
1031
	pxa2xx_spi_write(drv_data, SSCR1, cr1);
1032 1033
}

1034 1035
static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
					   struct spi_message *msg)
1036
{
1037
	struct driver_data *drv_data = spi_master_get_devdata(master);
1038

1039
	drv_data->cur_msg = msg;
1040 1041 1042 1043 1044 1045
	/* Initial message state*/
	drv_data->cur_msg->state = START_STATE;
	drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
						struct spi_transfer,
						transfer_list);

1046 1047
	/* prepare to setup the SSP, in pump_transfers, using the per
	 * chip configuration */
1048 1049 1050 1051 1052 1053 1054
	drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);

	/* Mark as busy and launch transfers */
	tasklet_schedule(&drv_data->pump_transfers);
	return 0;
}

1055 1056 1057 1058 1059
static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
	struct driver_data *drv_data = spi_master_get_devdata(master);

	/* Disable the SSP now */
1060 1061
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1062 1063 1064 1065

	return 0;
}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
		    struct pxa2xx_spi_chip *chip_info)
{
	int err = 0;

	if (chip == NULL || chip_info == NULL)
		return 0;

	/* NOTE: setup() can be called multiple times, possibly with
	 * different chip_info, release previously requested GPIO
	 */
	if (gpio_is_valid(chip->gpio_cs))
		gpio_free(chip->gpio_cs);

	/* If (*cs_control) is provided, ignore GPIO chip select */
	if (chip_info->cs_control) {
		chip->cs_control = chip_info->cs_control;
		return 0;
	}

	if (gpio_is_valid(chip_info->gpio_cs)) {
		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
		if (err) {
1089 1090
			dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
				chip_info->gpio_cs);
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
			return err;
		}

		chip->gpio_cs = chip_info->gpio_cs;
		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;

		err = gpio_direction_output(chip->gpio_cs,
					!chip->gpio_cs_inverted);
	}

	return err;
}

1104 1105 1106 1107
static int setup(struct spi_device *spi)
{
	struct pxa2xx_spi_chip *chip_info = NULL;
	struct chip_data *chip;
1108
	const struct lpss_config *config;
1109 1110
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
	unsigned int clk_div;
1111 1112
	uint tx_thres, tx_hi_thres, rx_thres;

1113 1114 1115 1116 1117 1118
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		tx_thres = TX_THRESH_QUARK_X1000_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_QUARK_X1000_DFLT;
		break;
1119 1120
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
1121
	case LPSS_SPT_SSP:
1122 1123 1124 1125
		config = lpss_get_config(drv_data);
		tx_thres = config->tx_threshold_lo;
		tx_hi_thres = config->tx_threshold_hi;
		rx_thres = config->rx_threshold;
1126 1127
		break;
	default:
1128 1129 1130
		tx_thres = TX_THRESH_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_DFLT;
1131
		break;
1132
	}
1133

1134
	/* Only alloc on first setup */
1135
	chip = spi_get_ctldata(spi);
1136
	if (!chip) {
1137
		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1138
		if (!chip)
1139 1140
			return -ENOMEM;

1141 1142
		if (drv_data->ssp_type == CE4100_SSP) {
			if (spi->chip_select > 4) {
1143 1144
				dev_err(&spi->dev,
					"failed setup: cs number must not be > 4.\n");
1145 1146 1147 1148 1149 1150 1151
				kfree(chip);
				return -EINVAL;
			}

			chip->frm = spi->chip_select;
		} else
			chip->gpio_cs = -1;
1152
		chip->enable_dma = 0;
1153
		chip->timeout = TIMOUT_DFLT;
1154 1155
	}

1156 1157 1158 1159
	/* protocol drivers may change the chip settings, so...
	 * if chip_info exists, use it */
	chip_info = spi->controller_data;

1160
	/* chip_info isn't always needed */
1161
	chip->cr1 = 0;
1162
	if (chip_info) {
1163 1164 1165 1166
		if (chip_info->timeout)
			chip->timeout = chip_info->timeout;
		if (chip_info->tx_threshold)
			tx_thres = chip_info->tx_threshold;
1167 1168
		if (chip_info->tx_hi_threshold)
			tx_hi_thres = chip_info->tx_hi_threshold;
1169 1170 1171
		if (chip_info->rx_threshold)
			rx_thres = chip_info->rx_threshold;
		chip->enable_dma = drv_data->master_info->enable_dma;
1172 1173 1174
		chip->dma_threshold = 0;
		if (chip_info->enable_loopback)
			chip->cr1 = SSCR1_LBM;
1175 1176 1177 1178 1179 1180 1181
	} else if (ACPI_HANDLE(&spi->dev)) {
		/*
		 * Slave devices enumerated from ACPI namespace don't
		 * usually have chip_info but we still might want to use
		 * DMA with them.
		 */
		chip->enable_dma = drv_data->master_info->enable_dma;
1182 1183
	}

1184 1185 1186 1187
	chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
	chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
				| SSITF_TxHiThresh(tx_hi_thres);

1188 1189 1190 1191 1192
	/* set dma burst and threshold outside of chip_info path so that if
	 * chip_info goes away after setting chip->enable_dma, the
	 * burst and threshold can still respond to changes in bits_per_word */
	if (chip->enable_dma) {
		/* set up legal burst and threshold for dma */
1193 1194
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
						spi->bits_per_word,
1195 1196
						&chip->dma_burst_size,
						&chip->dma_threshold)) {
1197 1198
			dev_warn(&spi->dev,
				 "in setup: DMA burst size reduced to match bits_per_word\n");
1199 1200 1201
		}
	}

1202
	clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
1203
	chip->speed_hz = spi->max_speed_hz;
1204

1205 1206
	chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
					   spi->bits_per_word);
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
				   & QUARK_X1000_SSCR1_RFT)
				   | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
				   & QUARK_X1000_SSCR1_TFT);
		break;
	default:
		chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
		break;
	}

1220 1221 1222
	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1223

1224 1225 1226
	if (spi->mode & SPI_LOOP)
		chip->cr1 |= SSCR1_LBM;

1227
	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
1228
	if (!pxa25x_ssp_comp(drv_data))
1229
		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1230
			drv_data->max_clk_rate
1231 1232
				/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
			chip->enable_dma ? "DMA" : "PIO");
1233
	else
1234
		dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1235
			drv_data->max_clk_rate / 2
1236 1237
				/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
			chip->enable_dma ? "DMA" : "PIO");
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247

	if (spi->bits_per_word <= 8) {
		chip->n_bytes = 1;
		chip->read = u8_reader;
		chip->write = u8_writer;
	} else if (spi->bits_per_word <= 16) {
		chip->n_bytes = 2;
		chip->read = u16_reader;
		chip->write = u16_writer;
	} else if (spi->bits_per_word <= 32) {
1248 1249
		if (!is_quark_x1000_ssp(drv_data))
			chip->cr0 |= SSCR0_EDSS;
1250 1251 1252 1253
		chip->n_bytes = 4;
		chip->read = u32_reader;
		chip->write = u32_writer;
	}
1254
	chip->bits_per_word = spi->bits_per_word;
1255 1256 1257

	spi_set_ctldata(spi, chip);

1258 1259 1260
	if (drv_data->ssp_type == CE4100_SSP)
		return 0;

1261
	return setup_cs(spi, chip, chip_info);
1262 1263
}

1264
static void cleanup(struct spi_device *spi)
1265
{
1266
	struct chip_data *chip = spi_get_ctldata(spi);
1267
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1268

1269 1270 1271
	if (!chip)
		return;

1272
	if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
1273 1274
		gpio_free(chip->gpio_cs);

1275 1276 1277
	kfree(chip);
}

1278
#ifdef CONFIG_ACPI
1279

1280
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
	{ "INT33C0", LPSS_LPT_SSP },
	{ "INT33C1", LPSS_LPT_SSP },
	{ "INT3430", LPSS_LPT_SSP },
	{ "INT3431", LPSS_LPT_SSP },
	{ "80860F0E", LPSS_BYT_SSP },
	{ "8086228E", LPSS_BYT_SSP },
	{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);

1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
/*
 * PCI IDs of compound devices that integrate both host controller and private
 * integrated DMA engine. Please note these are not used in module
 * autoloading and probing in this module but matching the LPSS SSP type.
 */
static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
	/* SPT-LP */
	{ PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
	/* SPT-H */
	{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
1303
	{ },
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
};

static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
	struct device *dev = param;

	if (dev != chan->device->dev->parent)
		return false;

	return true;
}

1316 1317 1318 1319 1320 1321 1322
static struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
	struct pxa2xx_spi_master *pdata;
	struct acpi_device *adev;
	struct ssp_device *ssp;
	struct resource *res;
1323 1324
	const struct acpi_device_id *adev_id = NULL;
	const struct pci_device_id *pcidev_id = NULL;
1325
	int devid, type;
1326 1327 1328 1329 1330

	if (!ACPI_HANDLE(&pdev->dev) ||
	    acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
		return NULL;

1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341
	if (dev_is_pci(pdev->dev.parent))
		pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
					 to_pci_dev(pdev->dev.parent));
	else
		adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
					    &pdev->dev);

	if (adev_id)
		type = (int)adev_id->driver_data;
	else if (pcidev_id)
		type = (int)pcidev_id->driver_data;
1342 1343 1344
	else
		return NULL;

1345
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1346
	if (!pdata)
1347 1348 1349 1350 1351 1352 1353 1354 1355
		return NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return NULL;

	ssp = &pdata->ssp;

	ssp->phys_base = res->start;
1356 1357
	ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ssp->mmio_base))
1358
		return NULL;
1359

1360 1361 1362 1363 1364 1365
	if (pcidev_id) {
		pdata->tx_param = pdev->dev.parent;
		pdata->rx_param = pdev->dev.parent;
		pdata->dma_filter = pxa2xx_spi_idma_filter;
	}

1366 1367
	ssp->clk = devm_clk_get(&pdev->dev, NULL);
	ssp->irq = platform_get_irq(pdev, 0);
1368
	ssp->type = type;
1369 1370 1371 1372 1373 1374 1375
	ssp->pdev = pdev;

	ssp->port_id = -1;
	if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
		ssp->port_id = devid;

	pdata->num_chipselect = 1;
1376
	pdata->enable_dma = true;
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388

	return pdata;
}

#else
static inline struct pxa2xx_spi_master *
pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
{
	return NULL;
}
#endif

1389
static int pxa2xx_spi_probe(struct platform_device *pdev)
1390 1391 1392 1393
{
	struct device *dev = &pdev->dev;
	struct pxa2xx_spi_master *platform_info;
	struct spi_master *master;
G
Guennadi Liakhovetski 已提交
1394
	struct driver_data *drv_data;
1395
	struct ssp_device *ssp;
G
Guennadi Liakhovetski 已提交
1396
	int status;
1397
	u32 tmp;
1398

1399 1400
	platform_info = dev_get_platdata(dev);
	if (!platform_info) {
1401 1402 1403 1404 1405
		platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
		if (!platform_info) {
			dev_err(&pdev->dev, "missing platform data\n");
			return -ENODEV;
		}
1406
	}
1407

H
Haojian Zhuang 已提交
1408
	ssp = pxa_ssp_request(pdev->id, pdev->name);
1409 1410 1411 1412 1413
	if (!ssp)
		ssp = &platform_info->ssp;

	if (!ssp->mmio_base) {
		dev_err(&pdev->dev, "failed to get ssp\n");
1414 1415 1416 1417 1418 1419
		return -ENODEV;
	}

	/* Allocate master with space for drv_data and null dma buffer */
	master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
	if (!master) {
G
Guennadi Liakhovetski 已提交
1420
		dev_err(&pdev->dev, "cannot alloc spi_master\n");
H
Haojian Zhuang 已提交
1421
		pxa_ssp_free(ssp);
1422 1423 1424 1425 1426 1427
		return -ENOMEM;
	}
	drv_data = spi_master_get_devdata(master);
	drv_data->master = master;
	drv_data->master_info = platform_info;
	drv_data->pdev = pdev;
1428
	drv_data->ssp = ssp;
1429

1430 1431
	master->dev.parent = &pdev->dev;
	master->dev.of_node = pdev->dev.of_node;
1432
	/* the spi->mode bits understood by this driver: */
1433
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1434

1435
	master->bus_num = ssp->port_id;
1436
	master->num_chipselect = platform_info->num_chipselect;
1437
	master->dma_alignment = DMA_ALIGNMENT;
1438 1439
	master->cleanup = cleanup;
	master->setup = setup;
1440
	master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1441
	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1442
	master->auto_runtime_pm = true;
1443

1444
	drv_data->ssp_type = ssp->type;
1445
	drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
1446

1447 1448
	drv_data->ioaddr = ssp->mmio_base;
	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1449
	if (pxa25x_ssp_comp(drv_data)) {
1450 1451 1452 1453 1454 1455 1456 1457 1458
		switch (drv_data->ssp_type) {
		case QUARK_X1000_SSP:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
			break;
		default:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
			break;
		}

1459 1460 1461 1462 1463
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
		drv_data->dma_cr1 = 0;
		drv_data->clear_sr = SSSR_ROR;
		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
	} else {
1464
		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1465
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1466
		drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1467 1468 1469 1470
		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
	}

1471 1472
	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
			drv_data);
1473
	if (status < 0) {
G
Guennadi Liakhovetski 已提交
1474
		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1475 1476 1477 1478 1479
		goto out_error_master_alloc;
	}

	/* Setup DMA if requested */
	if (platform_info->enable_dma) {
1480 1481
		status = pxa2xx_spi_dma_setup(drv_data);
		if (status) {
1482
			dev_dbg(dev, "no DMA channels available, using PIO\n");
1483
			platform_info->enable_dma = false;
1484 1485 1486 1487
		}
	}

	/* Enable SOC clock */
1488 1489 1490
	clk_prepare_enable(ssp->clk);

	drv_data->max_clk_rate = clk_get_rate(ssp->clk);
1491 1492

	/* Load default SSP configuration */
1493
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1494 1495
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
1496 1497 1498
		tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
		      | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1499 1500

		/* using the Motorola SPI protocol and use 8 bit frame */
1501 1502 1503
		pxa2xx_spi_write(drv_data, SSCR0,
				 QUARK_X1000_SSCR0_Motorola
				 | QUARK_X1000_SSCR0_DataSize(8));
1504 1505
		break;
	default:
1506 1507 1508 1509 1510
		tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
		      SSCR1_TxTresh(TX_THRESH_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1511 1512 1513
		break;
	}

1514
	if (!pxa25x_ssp_comp(drv_data))
1515
		pxa2xx_spi_write(drv_data, SSTO, 0);
1516 1517

	if (!is_quark_x1000_ssp(drv_data))
1518
		pxa2xx_spi_write(drv_data, SSPSP, 0);
1519

1520 1521
	if (is_lpss_ssp(drv_data))
		lpss_ssp_setup(drv_data);
1522

1523 1524
	tasklet_init(&drv_data->pump_transfers, pump_transfers,
		     (unsigned long)drv_data);
1525

1526 1527 1528 1529 1530
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

1531 1532
	/* Register with the SPI framework */
	platform_set_drvdata(pdev, drv_data);
1533
	status = devm_spi_register_master(&pdev->dev, master);
1534 1535
	if (status != 0) {
		dev_err(&pdev->dev, "problem registering spi master\n");
1536
		goto out_error_clock_enabled;
1537 1538 1539 1540 1541
	}

	return status;

out_error_clock_enabled:
1542
	clk_disable_unprepare(ssp->clk);
1543
	pxa2xx_spi_dma_release(drv_data);
1544
	free_irq(ssp->irq, drv_data);
1545 1546 1547

out_error_master_alloc:
	spi_master_put(master);
H
Haojian Zhuang 已提交
1548
	pxa_ssp_free(ssp);
1549 1550 1551 1552 1553 1554
	return status;
}

static int pxa2xx_spi_remove(struct platform_device *pdev)
{
	struct driver_data *drv_data = platform_get_drvdata(pdev);
1555
	struct ssp_device *ssp;
1556 1557 1558

	if (!drv_data)
		return 0;
1559
	ssp = drv_data->ssp;
1560

1561 1562
	pm_runtime_get_sync(&pdev->dev);

1563
	/* Disable the SSP at the peripheral and SOC level */
1564
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1565
	clk_disable_unprepare(ssp->clk);
1566 1567

	/* Release DMA */
1568 1569
	if (drv_data->master_info->enable_dma)
		pxa2xx_spi_dma_release(drv_data);
1570

1571 1572 1573
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

1574
	/* Release IRQ */
1575 1576 1577
	free_irq(ssp->irq, drv_data);

	/* Release SSP */
H
Haojian Zhuang 已提交
1578
	pxa_ssp_free(ssp);
1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590

	return 0;
}

static void pxa2xx_spi_shutdown(struct platform_device *pdev)
{
	int status = 0;

	if ((status = pxa2xx_spi_remove(pdev)) != 0)
		dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}

1591
#ifdef CONFIG_PM_SLEEP
1592
static int pxa2xx_spi_suspend(struct device *dev)
1593
{
1594
	struct driver_data *drv_data = dev_get_drvdata(dev);
1595
	struct ssp_device *ssp = drv_data->ssp;
1596 1597
	int status = 0;

1598
	status = spi_master_suspend(drv_data->master);
1599 1600
	if (status != 0)
		return status;
1601
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1602 1603 1604

	if (!pm_runtime_suspended(dev))
		clk_disable_unprepare(ssp->clk);
1605 1606 1607 1608

	return 0;
}

1609
static int pxa2xx_spi_resume(struct device *dev)
1610
{
1611
	struct driver_data *drv_data = dev_get_drvdata(dev);
1612
	struct ssp_device *ssp = drv_data->ssp;
1613 1614
	int status = 0;

1615
	pxa2xx_spi_dma_resume(drv_data);
1616

1617
	/* Enable the SSP clock */
1618 1619
	if (!pm_runtime_suspended(dev))
		clk_prepare_enable(ssp->clk);
1620

1621
	/* Restore LPSS private register bits */
1622 1623
	if (is_lpss_ssp(drv_data))
		lpss_ssp_setup(drv_data);
1624

1625
	/* Start the queue running */
1626
	status = spi_master_resume(drv_data->master);
1627
	if (status != 0) {
1628
		dev_err(dev, "problem starting queue (%d)\n", status);
1629 1630 1631 1632 1633
		return status;
	}

	return 0;
}
1634 1635
#endif

1636
#ifdef CONFIG_PM
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_disable_unprepare(drv_data->ssp->clk);
	return 0;
}

static int pxa2xx_spi_runtime_resume(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_prepare_enable(drv_data->ssp->clk);
	return 0;
}
#endif
1653

1654
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1655 1656 1657
	SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
	SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
			   pxa2xx_spi_runtime_resume, NULL)
1658
};
1659 1660 1661

static struct platform_driver driver = {
	.driver = {
1662 1663
		.name	= "pxa2xx-spi",
		.pm	= &pxa2xx_spi_pm_ops,
1664
		.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1665
	},
1666
	.probe = pxa2xx_spi_probe,
1667
	.remove = pxa2xx_spi_remove,
1668 1669 1670 1671 1672
	.shutdown = pxa2xx_spi_shutdown,
};

static int __init pxa2xx_spi_init(void)
{
1673
	return platform_driver_register(&driver);
1674
}
A
Antonio Ospite 已提交
1675
subsys_initcall(pxa2xx_spi_init);
1676 1677 1678 1679 1680 1681

static void __exit pxa2xx_spi_exit(void)
{
	platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);