spi-pxa2xx.c 50.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3
/*
 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
4
 * Copyright (C) 2013, Intel Corporation
5 6
 */

7
#include <linux/bitops.h>
8 9 10 11 12
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
13
#include <linux/err.h>
14
#include <linux/interrupt.h>
15
#include <linux/kernel.h>
16
#include <linux/pci.h>
17
#include <linux/platform_device.h>
18
#include <linux/spi/pxa2xx_spi.h>
19 20
#include <linux/spi/spi.h>
#include <linux/delay.h>
21
#include <linux/gpio.h>
22
#include <linux/gpio/consumer.h>
23
#include <linux/slab.h>
24
#include <linux/clk.h>
25
#include <linux/pm_runtime.h>
26
#include <linux/acpi.h>
27
#include <linux/of_device.h>
28

29
#include "spi-pxa2xx.h"
30 31

MODULE_AUTHOR("Stephen Street");
W
Will Newton 已提交
32
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
33
MODULE_LICENSE("GPL");
34
MODULE_ALIAS("platform:pxa2xx-spi");
35

36 37
#define TIMOUT_DFLT		1000

38 39 40 41 42 43 44 45
/*
 * for testing SSCR1 changes that require SSP restart, basically
 * everything except the service and interrupt enables, the pxa270 developer
 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
 * list, but the PXA255 dev man says all bits without really meaning the
 * service and interrupt enables
 */
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
46
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
47 48 49 50
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
51

52 53 54 55 56 57
#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF	\
				| QUARK_X1000_SSCR1_EFWR	\
				| QUARK_X1000_SSCR1_RFT		\
				| QUARK_X1000_SSCR1_TFT		\
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

58 59 60 61 62 63 64
#define CE4100_SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| CE4100_SSCR1_RFT | CE4100_SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

65 66 67
#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE	BIT(24)
#define LPSS_CS_CONTROL_SW_MODE			BIT(0)
#define LPSS_CS_CONTROL_CS_HIGH			BIT(1)
68 69
#define LPSS_CAPS_CS_EN_SHIFT			9
#define LPSS_CAPS_CS_EN_MASK			(0xf << LPSS_CAPS_CS_EN_SHIFT)
70

71 72 73 74 75 76 77
struct lpss_config {
	/* LPSS offset from drv_data->ioaddr */
	unsigned offset;
	/* Register offsets from drv_data->lpss_base or -1 */
	int reg_general;
	int reg_ssp;
	int reg_cs_ctrl;
78
	int reg_capabilities;
79 80 81 82
	/* FIFO thresholds */
	u32 rx_threshold;
	u32 tx_threshold_lo;
	u32 tx_threshold_hi;
83 84 85
	/* Chip select control */
	unsigned cs_sel_shift;
	unsigned cs_sel_mask;
86
	unsigned cs_num;
87 88 89 90 91 92 93 94 95
};

/* Keep these sorted with enum pxa_ssp_type */
static const struct lpss_config lpss_platforms[] = {
	{	/* LPSS_LPT_SSP */
		.offset = 0x800,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
96
		.reg_capabilities = -1,
97 98 99 100 101 102 103 104 105
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
	{	/* LPSS_BYT_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
106
		.reg_capabilities = -1,
107 108 109 110
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
111 112 113 114 115 116 117 118 119 120 121 122 123
	{	/* LPSS_BSW_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.reg_capabilities = -1,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
		.cs_sel_shift = 2,
		.cs_sel_mask = 1 << 2,
		.cs_num = 2,
	},
124 125 126 127 128
	{	/* LPSS_SPT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
129
		.reg_capabilities = -1,
130 131 132 133
		.rx_threshold = 1,
		.tx_threshold_lo = 32,
		.tx_threshold_hi = 56,
	},
134 135 136 137 138 139 140 141 142
	{	/* LPSS_BXT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
		.reg_capabilities = 0xfc,
		.rx_threshold = 1,
		.tx_threshold_lo = 16,
		.tx_threshold_hi = 48,
143 144
		.cs_sel_shift = 8,
		.cs_sel_mask = 3 << 8,
145
	},
146 147 148 149 150 151 152 153 154 155 156 157
	{	/* LPSS_CNL_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
		.reg_capabilities = 0xfc,
		.rx_threshold = 1,
		.tx_threshold_lo = 32,
		.tx_threshold_hi = 56,
		.cs_sel_shift = 8,
		.cs_sel_mask = 3 << 8,
	},
158 159 160 161 162 163 164 165
};

static inline const struct lpss_config
*lpss_get_config(const struct driver_data *drv_data)
{
	return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
}

166 167
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
168 169 170
	switch (drv_data->ssp_type) {
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
171
	case LPSS_BSW_SSP:
172
	case LPSS_SPT_SSP:
173
	case LPSS_BXT_SSP:
174
	case LPSS_CNL_SSP:
175 176 177 178
		return true;
	default:
		return false;
	}
179 180
}

181 182 183 184 185
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
{
	return drv_data->ssp_type == QUARK_X1000_SSP;
}

186 187 188
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
189 190
	case QUARK_X1000_SSP:
		return QUARK_X1000_SSCR1_CHANGE_MASK;
191 192
	case CE4100_SSP:
		return CE4100_SSCR1_CHANGE_MASK;
193 194 195 196 197 198 199 200 201
	default:
		return SSCR1_CHANGE_MASK;
	}
}

static u32
pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
202 203
	case QUARK_X1000_SSP:
		return RX_THRESH_QUARK_X1000_DFLT;
204 205
	case CE4100_SSP:
		return RX_THRESH_CE4100_DFLT;
206 207 208 209 210 211 212 213 214 215
	default:
		return RX_THRESH_DFLT;
	}
}

static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
	u32 mask;

	switch (drv_data->ssp_type) {
216 217 218
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSSR_TFL_MASK;
		break;
219 220 221
	case CE4100_SSP:
		mask = CE4100_SSSR_TFL_MASK;
		break;
222 223 224 225 226
	default:
		mask = SSSR_TFL_MASK;
		break;
	}

227
	return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
228 229 230 231 232 233 234 235
}

static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
				     u32 *sccr1_reg)
{
	u32 mask;

	switch (drv_data->ssp_type) {
236 237 238
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSCR1_RFT;
		break;
239 240 241
	case CE4100_SSP:
		mask = CE4100_SSCR1_RFT;
		break;
242 243 244 245 246 247 248 249 250 251 252
	default:
		mask = SSCR1_RFT;
		break;
	}
	*sccr1_reg &= ~mask;
}

static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
				   u32 *sccr1_reg, u32 threshold)
{
	switch (drv_data->ssp_type) {
253 254 255
	case QUARK_X1000_SSP:
		*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
		break;
256 257 258
	case CE4100_SSP:
		*sccr1_reg |= CE4100_SSCR1_RxTresh(threshold);
		break;
259 260 261 262 263 264 265 266 267 268
	default:
		*sccr1_reg |= SSCR1_RxTresh(threshold);
		break;
	}
}

static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
				  u32 clk_div, u8 bits)
{
	switch (drv_data->ssp_type) {
269 270 271 272 273
	case QUARK_X1000_SSP:
		return clk_div
			| QUARK_X1000_SSCR0_Motorola
			| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
			| SSCR0_SSE;
274 275 276 277 278 279 280 281 282
	default:
		return clk_div
			| SSCR0_Motorola
			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
			| SSCR0_SSE
			| (bits > 16 ? SSCR0_EDSS : 0);
	}
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
/*
 * Read and write LPSS SSP private registers. Caller must first check that
 * is_lpss_ssp() returns true before these can be called.
 */
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
	WARN_ON(!drv_data->lpss_base);
	return readl(drv_data->lpss_base + offset);
}

static void __lpss_ssp_write_priv(struct driver_data *drv_data,
				  unsigned offset, u32 value)
{
	WARN_ON(!drv_data->lpss_base);
	writel(value, drv_data->lpss_base + offset);
}

/*
 * lpss_ssp_setup - perform LPSS SSP specific setup
 * @drv_data: pointer to the driver private data
 *
 * Perform LPSS SSP specific setup. This function must be called first if
 * one is going to use LPSS SSP private registers.
 */
static void lpss_ssp_setup(struct driver_data *drv_data)
{
309 310
	const struct lpss_config *config;
	u32 value;
311

312 313
	config = lpss_get_config(drv_data);
	drv_data->lpss_base = drv_data->ioaddr + config->offset;
314 315

	/* Enable software chip select control */
316
	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
317 318
	value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
	value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
319
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
320 321

	/* Enable multiblock DMA transfers */
322
	if (drv_data->controller_info->enable_dma) {
323
		__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
324

325 326 327
		if (config->reg_general >= 0) {
			value = __lpss_ssp_read_priv(drv_data,
						     config->reg_general);
328
			value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
329 330 331
			__lpss_ssp_write_priv(drv_data,
					      config->reg_general, value);
		}
332
	}
333 334
}

335
static void lpss_ssp_select_cs(struct spi_device *spi,
336 337
			       const struct lpss_config *config)
{
338 339
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
340 341 342 343 344 345 346
	u32 value, cs;

	if (!config->cs_sel_mask)
		return;

	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);

347
	cs = spi->chip_select;
348 349 350 351 352 353 354 355 356 357 358 359 360 361
	cs <<= config->cs_sel_shift;
	if (cs != (value & config->cs_sel_mask)) {
		/*
		 * When switching another chip select output active the
		 * output must be selected first and wait 2 ssp_clk cycles
		 * before changing state to active. Otherwise a short
		 * glitch will occur on the previous chip select since
		 * output select is latched but state control is not.
		 */
		value &= ~config->cs_sel_mask;
		value |= cs;
		__lpss_ssp_write_priv(drv_data,
				      config->reg_cs_ctrl, value);
		ndelay(1000000000 /
362
		       (drv_data->controller->max_speed_hz / 2));
363 364 365
	}
}

366
static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
367
{
368 369
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
370
	const struct lpss_config *config;
371
	u32 value;
372

373 374
	config = lpss_get_config(drv_data);

375
	if (enable)
376
		lpss_ssp_select_cs(spi, config);
377

378
	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
379
	if (enable)
380
		value &= ~LPSS_CS_CONTROL_CS_HIGH;
381
	else
382
		value |= LPSS_CS_CONTROL_CS_HIGH;
383
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
384 385
}

386
static void cs_assert(struct spi_device *spi)
387
{
388 389 390
	struct chip_data *chip = spi_get_ctldata(spi);
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
391

392
	if (drv_data->ssp_type == CE4100_SSP) {
393
		pxa2xx_spi_write(drv_data, SSSR, chip->frm);
394 395 396
		return;
	}

397 398 399 400 401
	if (chip->cs_control) {
		chip->cs_control(PXA2XX_CS_ASSERT);
		return;
	}

402 403
	if (chip->gpiod_cs) {
		gpiod_set_value(chip->gpiod_cs, chip->gpio_cs_inverted);
404 405 406
		return;
	}

407
	if (is_lpss_ssp(drv_data))
408
		lpss_ssp_cs_control(spi, true);
409 410
}

411
static void cs_deassert(struct spi_device *spi)
412
{
413 414 415
	struct chip_data *chip = spi_get_ctldata(spi);
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
416
	unsigned long timeout;
417

418 419 420
	if (drv_data->ssp_type == CE4100_SSP)
		return;

421 422 423 424 425 426
	/* Wait until SSP becomes idle before deasserting the CS */
	timeout = jiffies + msecs_to_jiffies(10);
	while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
	       !time_after(jiffies, timeout))
		cpu_relax();

427
	if (chip->cs_control) {
428
		chip->cs_control(PXA2XX_CS_DEASSERT);
429 430 431
		return;
	}

432 433
	if (chip->gpiod_cs) {
		gpiod_set_value(chip->gpiod_cs, !chip->gpio_cs_inverted);
434 435 436
		return;
	}

437
	if (is_lpss_ssp(drv_data))
438 439 440 441 442 443 444 445 446
		lpss_ssp_cs_control(spi, false);
}

static void pxa2xx_spi_set_cs(struct spi_device *spi, bool level)
{
	if (level)
		cs_deassert(spi);
	else
		cs_assert(spi);
447 448
}

449
int pxa2xx_spi_flush(struct driver_data *drv_data)
450 451 452 453
{
	unsigned long limit = loops_per_jiffy << 1;

	do {
454 455 456
		while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
			pxa2xx_spi_read(drv_data, SSDR);
	} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
457
	write_SSSR_CS(drv_data, SSSR_ROR);
458 459 460 461

	return limit;
}

462
static int null_writer(struct driver_data *drv_data)
463
{
464
	u8 n_bytes = drv_data->n_bytes;
465

466
	if (pxa2xx_spi_txfifo_full(drv_data)
467 468 469
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

470
	pxa2xx_spi_write(drv_data, SSDR, 0);
471 472 473
	drv_data->tx += n_bytes;

	return 1;
474 475
}

476
static int null_reader(struct driver_data *drv_data)
477
{
478
	u8 n_bytes = drv_data->n_bytes;
479

480 481 482
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		pxa2xx_spi_read(drv_data, SSDR);
483 484
		drv_data->rx += n_bytes;
	}
485 486

	return drv_data->rx == drv_data->rx_end;
487 488
}

489
static int u8_writer(struct driver_data *drv_data)
490
{
491
	if (pxa2xx_spi_txfifo_full(drv_data)
492 493 494
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

495
	pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
496 497 498
	++drv_data->tx;

	return 1;
499 500
}

501
static int u8_reader(struct driver_data *drv_data)
502
{
503 504 505
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
506 507
		++drv_data->rx;
	}
508 509

	return drv_data->rx == drv_data->rx_end;
510 511
}

512
static int u16_writer(struct driver_data *drv_data)
513
{
514
	if (pxa2xx_spi_txfifo_full(drv_data)
515 516 517
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

518
	pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
519 520 521
	drv_data->tx += 2;

	return 1;
522 523
}

524
static int u16_reader(struct driver_data *drv_data)
525
{
526 527 528
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
529 530
		drv_data->rx += 2;
	}
531 532

	return drv_data->rx == drv_data->rx_end;
533
}
534 535

static int u32_writer(struct driver_data *drv_data)
536
{
537
	if (pxa2xx_spi_txfifo_full(drv_data)
538 539 540
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

541
	pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
542 543 544
	drv_data->tx += 4;

	return 1;
545 546
}

547
static int u32_reader(struct driver_data *drv_data)
548
{
549 550 551
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
552 553
		drv_data->rx += 4;
	}
554 555

	return drv_data->rx == drv_data->rx_end;
556 557
}

558 559
static void reset_sccr1(struct driver_data *drv_data)
{
560
	struct chip_data *chip =
561
		spi_get_ctldata(drv_data->controller->cur_msg->spi);
562 563
	u32 sccr1_reg;

564
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
565 566 567 568
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
		break;
569 570 571
	case CE4100_SSP:
		sccr1_reg &= ~CE4100_SSCR1_RFT;
		break;
572 573 574 575
	default:
		sccr1_reg &= ~SSCR1_RFT;
		break;
	}
576
	sccr1_reg |= chip->threshold;
577
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
578 579
}

580
static void int_error_stop(struct driver_data *drv_data, const char* msg)
581
{
582
	/* Stop and reset SSP */
583
	write_SSSR_CS(drv_data, drv_data->clear_sr);
584
	reset_sccr1(drv_data);
585
	if (!pxa25x_ssp_comp(drv_data))
586
		pxa2xx_spi_write(drv_data, SSTO, 0);
587
	pxa2xx_spi_flush(drv_data);
588 589
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
590

591
	dev_err(&drv_data->pdev->dev, "%s\n", msg);
592

593 594
	drv_data->controller->cur_msg->status = -EIO;
	spi_finalize_current_transfer(drv_data->controller);
595
}
S
Stephen Street 已提交
596

597 598
static void int_transfer_complete(struct driver_data *drv_data)
{
599
	/* Clear and disable interrupts */
600
	write_SSSR_CS(drv_data, drv_data->clear_sr);
601
	reset_sccr1(drv_data);
602
	if (!pxa25x_ssp_comp(drv_data))
603
		pxa2xx_spi_write(drv_data, SSTO, 0);
604

605
	spi_finalize_current_transfer(drv_data->controller);
606
}
607

608 609
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
610 611
	u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
		       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
612

613
	u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
614

615 616 617 618
	if (irq_status & SSSR_ROR) {
		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
		return IRQ_HANDLED;
	}
619

620 621 622 623 624
	if (irq_status & SSSR_TUR) {
		int_error_stop(drv_data, "interrupt_transfer: fifo underrun");
		return IRQ_HANDLED;
	}

625
	if (irq_status & SSSR_TINT) {
626
		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
627 628 629 630 631
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	}
632

633 634 635 636 637 638 639
	/* Drain rx fifo, Fill tx fifo and prevent overruns */
	do {
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	} while (drv_data->write(drv_data));
640

641 642 643 644
	if (drv_data->read(drv_data)) {
		int_transfer_complete(drv_data);
		return IRQ_HANDLED;
	}
645

646
	if (drv_data->tx == drv_data->tx_end) {
647 648 649
		u32 bytes_left;
		u32 sccr1_reg;

650
		sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
651 652 653 654
		sccr1_reg &= ~SSCR1_TIE;

		/*
		 * PXA25x_SSP has no timeout, set up rx threshould for the
L
Lucas De Marchi 已提交
655
		 * remaining RX bytes.
656
		 */
657
		if (pxa25x_ssp_comp(drv_data)) {
658
			u32 rx_thre;
659

660
			pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
661 662 663 664

			bytes_left = drv_data->rx_end - drv_data->rx;
			switch (drv_data->n_bytes) {
			case 4:
665 666
				bytes_left >>= 2;
				break;
667 668
			case 2:
				bytes_left >>= 1;
669
				break;
670
			}
671

672 673 674
			rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
			if (rx_thre > bytes_left)
				rx_thre = bytes_left;
675

676
			pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
677
		}
678
		pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
679 680
	}

S
Stephen Street 已提交
681 682
	/* We did something */
	return IRQ_HANDLED;
683 684
}

685 686 687 688 689 690 691 692 693 694 695 696 697 698
static void handle_bad_msg(struct driver_data *drv_data)
{
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
	pxa2xx_spi_write(drv_data, SSCR1,
			 pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1);
	if (!pxa25x_ssp_comp(drv_data))
		pxa2xx_spi_write(drv_data, SSTO, 0);
	write_SSSR_CS(drv_data, drv_data->clear_sr);

	dev_err(&drv_data->pdev->dev,
		"bad message state in interrupt handler\n");
}

699
static irqreturn_t ssp_int(int irq, void *dev_id)
700
{
701
	struct driver_data *drv_data = dev_id;
702
	u32 sccr1_reg;
703 704 705
	u32 mask = drv_data->mask_sr;
	u32 status;

706 707 708 709 710 711 712 713 714
	/*
	 * The IRQ might be shared with other peripherals so we must first
	 * check that are we RPM suspended or not. If we are we assume that
	 * the IRQ was not for us (we shouldn't be RPM suspended when the
	 * interrupt is enabled).
	 */
	if (pm_runtime_suspended(&drv_data->pdev->dev))
		return IRQ_NONE;

715 716 717 718 719 720
	/*
	 * If the device is not yet in RPM suspended state and we get an
	 * interrupt that is meant for another device, check if status bits
	 * are all set to one. That means that the device is already
	 * powered off.
	 */
721
	status = pxa2xx_spi_read(drv_data, SSSR);
722 723 724
	if (status == ~0)
		return IRQ_NONE;

725
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
726 727 728 729 730

	/* Ignore possible writes if we don't need to write */
	if (!(sccr1_reg & SSCR1_TIE))
		mask &= ~SSSR_TFS;

731 732 733 734
	/* Ignore RX timeout interrupt if it is disabled */
	if (!(sccr1_reg & SSCR1_TINTE))
		mask &= ~SSSR_TINT;

735 736
	if (!(status & mask))
		return IRQ_NONE;
737

738 739
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
S
Stephen Street 已提交
740

741
	if (!drv_data->controller->cur_msg) {
742
		handle_bad_msg(drv_data);
743 744 745 746 747 748 749
		/* Never fail */
		return IRQ_HANDLED;
	}

	return drv_data->transfer_handler(drv_data);
}

750
/*
751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
 * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
 * input frequency by fractions of 2^24. It also has a divider by 5.
 *
 * There are formulas to get baud rate value for given input frequency and
 * divider parameters, such as DDS_CLK_RATE and SCR:
 *
 * Fsys = 200MHz
 *
 * Fssp = Fsys * DDS_CLK_RATE / 2^24			(1)
 * Baud rate = Fsclk = Fssp / (2 * (SCR + 1))		(2)
 *
 * DDS_CLK_RATE either 2^n or 2^n / 5.
 * SCR is in range 0 .. 255
 *
 * Divisor = 5^i * 2^j * 2 * k
 *       i = [0, 1]      i = 1 iff j = 0 or j > 3
 *       j = [0, 23]     j = 0 iff i = 1
 *       k = [1, 256]
 * Special case: j = 0, i = 1: Divisor = 2 / 5
 *
 * Accordingly to the specification the recommended values for DDS_CLK_RATE
 * are:
 *	Case 1:		2^n, n = [0, 23]
 *	Case 2:		2^24 * 2 / 5 (0x666666)
 *	Case 3:		less than or equal to 2^24 / 5 / 16 (0x33333)
 *
 * In all cases the lowest possible value is better.
 *
 * The function calculates parameters for all cases and chooses the one closest
 * to the asked baud rate.
781
 */
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800
static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
	unsigned long xtal = 200000000;
	unsigned long fref = xtal / 2;		/* mandatory division by 2,
						   see (2) */
						/* case 3 */
	unsigned long fref1 = fref / 2;		/* case 1 */
	unsigned long fref2 = fref * 2 / 5;	/* case 2 */
	unsigned long scale;
	unsigned long q, q1, q2;
	long r, r1, r2;
	u32 mul;

	/* Case 1 */

	/* Set initial value for DDS_CLK_RATE */
	mul = (1 << 24) >> 1;

	/* Calculate initial quot */
801
	q1 = DIV_ROUND_UP(fref1, rate);
802 803 804 805 806 807 808 809

	/* Scale q1 if it's too big */
	if (q1 > 256) {
		/* Scale q1 to range [1, 512] */
		scale = fls_long(q1 - 1);
		if (scale > 9) {
			q1 >>= scale - 9;
			mul >>= scale - 9;
810
		}
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825

		/* Round the result if we have a remainder */
		q1 += q1 & 1;
	}

	/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
	scale = __ffs(q1);
	q1 >>= scale;
	mul >>= scale;

	/* Get the remainder */
	r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);

	/* Case 2 */

826
	q2 = DIV_ROUND_UP(fref2, rate);
827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
	r2 = abs(fref2 / q2 - rate);

	/*
	 * Choose the best between two: less remainder we have the better. We
	 * can't go case 2 if q2 is greater than 256 since SCR register can
	 * hold only values 0 .. 255.
	 */
	if (r2 >= r1 || q2 > 256) {
		/* case 1 is better */
		r = r1;
		q = q1;
	} else {
		/* case 2 is better */
		r = r2;
		q = q2;
		mul = (1 << 24) * 2 / 5;
843 844
	}

845
	/* Check case 3 only if the divisor is big enough */
846 847 848 849 850
	if (fref / rate >= 80) {
		u64 fssp;
		u32 m;

		/* Calculate initial quot */
851
		q1 = DIV_ROUND_UP(fref, rate);
852 853 854 855 856 857 858 859 860 861 862 863 864 865
		m = (1 << 24) / q1;

		/* Get the remainder */
		fssp = (u64)fref * m;
		do_div(fssp, 1 << 24);
		r1 = abs(fssp - rate);

		/* Choose this one if it suits better */
		if (r1 < r) {
			/* case 3 is better */
			q = 1;
			mul = m;
		}
	}
866

867 868
	*dds = mul;
	return q - 1;
869 870
}

871
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
872
{
873
	unsigned long ssp_clk = drv_data->controller->max_speed_hz;
874 875 876
	const struct ssp_device *ssp = drv_data->ssp;

	rate = min_t(int, ssp_clk, rate);
877

878 879 880 881
	/*
	 * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
	 * that the SSP transmission rate can be greater than the device rate
	 */
882
	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
883
		return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
884
	else
885
		return (DIV_ROUND_UP(ssp_clk, rate) - 1)  & 0xfff;
886 887
}

888
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
889
					   int rate)
890
{
891
	struct chip_data *chip =
892
		spi_get_ctldata(drv_data->controller->cur_msg->spi);
893
	unsigned int clk_div;
894 895 896

	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
897
		clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
898
		break;
899
	default:
900
		clk_div = ssp_get_clk_div(drv_data, rate);
901
		break;
902
	}
903
	return clk_div << 8;
904 905
}

906
static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
907 908 909 910 911 912 913 914 915 916
			       struct spi_device *spi,
			       struct spi_transfer *xfer)
{
	struct chip_data *chip = spi_get_ctldata(spi);

	return chip->enable_dma &&
	       xfer->len <= MAX_DMA_LEN &&
	       xfer->len >= chip->dma_burst_size;
}

917
static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
918 919
				   struct spi_device *spi,
				   struct spi_transfer *transfer)
920
{
921 922
	struct driver_data *drv_data = spi_controller_get_devdata(controller);
	struct spi_message *message = controller->cur_msg;
923
	struct chip_data *chip = spi_get_ctldata(spi);
924 925 926
	u32 dma_thresh = chip->dma_threshold;
	u32 dma_burst = chip->dma_burst_size;
	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
927 928 929
	u32 clk_div;
	u8 bits;
	u32 speed;
930
	u32 cr0;
931
	u32 cr1;
932
	int err;
933
	int dma_mapped;
934

935
	/* Check if we can DMA this transfer */
936
	if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
N
Ned Forrester 已提交
937 938 939 940

		/* reject already-mapped transfers; PIO won't always work */
		if (message->is_dma_mapped
				|| transfer->rx_dma || transfer->tx_dma) {
941
			dev_err(&spi->dev,
942
				"Mapped transfer length of %u is greater than %d\n",
N
Ned Forrester 已提交
943
				transfer->len, MAX_DMA_LEN);
944
			return -EINVAL;
N
Ned Forrester 已提交
945 946 947
		}

		/* warn ... we force this to PIO mode */
948
		dev_warn_ratelimited(&spi->dev,
949
				     "DMA disabled for transfer length %ld greater than %d\n",
950
				     (long)transfer->len, MAX_DMA_LEN);
951 952
	}

953
	/* Setup the transfer state based on the type of transfer */
954
	if (pxa2xx_spi_flush(drv_data) == 0) {
955
		dev_err(&spi->dev, "Flush failed\n");
956
		return -EIO;
957
	}
958
	drv_data->n_bytes = chip->n_bytes;
959 960 961 962 963 964
	drv_data->tx = (void *)transfer->tx_buf;
	drv_data->tx_end = drv_data->tx + transfer->len;
	drv_data->rx = transfer->rx_buf;
	drv_data->rx_end = drv_data->rx + transfer->len;
	drv_data->write = drv_data->tx ? chip->write : null_writer;
	drv_data->read = drv_data->rx ? chip->read : null_reader;
965 966

	/* Change speed and bit per word on a per transfer */
967 968 969
	bits = transfer->bits_per_word;
	speed = transfer->speed_hz;

970
	clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989

	if (bits <= 8) {
		drv_data->n_bytes = 1;
		drv_data->read = drv_data->read != null_reader ?
					u8_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u8_writer : null_writer;
	} else if (bits <= 16) {
		drv_data->n_bytes = 2;
		drv_data->read = drv_data->read != null_reader ?
					u16_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u16_writer : null_writer;
	} else if (bits <= 32) {
		drv_data->n_bytes = 4;
		drv_data->read = drv_data->read != null_reader ?
					u32_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u32_writer : null_writer;
990
	}
991 992 993 994 995 996
	/*
	 * if bits/word is changed in dma mode, then must check the
	 * thresholds and burst also
	 */
	if (chip->enable_dma) {
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
997
						spi,
998 999
						bits, &dma_burst,
						&dma_thresh))
1000
			dev_warn_ratelimited(&spi->dev,
1001
					     "DMA burst size reduced to match bits_per_word\n");
1002 1003
	}

1004
	dma_mapped = controller->can_dma &&
1005
		     controller->can_dma(controller, spi, transfer) &&
1006
		     controller->cur_msg_mapped;
1007
	if (dma_mapped) {
1008 1009

		/* Ensure we have the correct interrupt handler */
1010 1011
		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;

1012 1013 1014
		err = pxa2xx_spi_dma_prepare(drv_data, transfer);
		if (err)
			return err;
1015

1016 1017
		/* Clear status and start DMA engine */
		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1018
		pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
1019 1020

		pxa2xx_spi_dma_start(drv_data);
1021 1022 1023 1024
	} else {
		/* Ensure we have the correct interrupt handler	*/
		drv_data->transfer_handler = interrupt_transfer;

1025 1026
		/* Clear status  */
		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
1027
		write_SSSR_CS(drv_data, drv_data->clear_sr);
1028 1029
	}

1030 1031 1032
	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
	cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
	if (!pxa25x_ssp_comp(drv_data))
1033
		dev_dbg(&spi->dev, "%u Hz actual, %s\n",
1034
			controller->max_speed_hz
1035
				/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
1036
			dma_mapped ? "DMA" : "PIO");
1037
	else
1038
		dev_dbg(&spi->dev, "%u Hz actual, %s\n",
1039
			controller->max_speed_hz / 2
1040
				/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1041
			dma_mapped ? "DMA" : "PIO");
1042

1043
	if (is_lpss_ssp(drv_data)) {
1044 1045 1046 1047 1048 1049 1050 1051
		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
		    != chip->lpss_rx_threshold)
			pxa2xx_spi_write(drv_data, SSIRF,
					 chip->lpss_rx_threshold);
		if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
		    != chip->lpss_tx_threshold)
			pxa2xx_spi_write(drv_data, SSITF,
					 chip->lpss_tx_threshold);
1052 1053
	}

1054
	if (is_quark_x1000_ssp(drv_data) &&
1055 1056
	    (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
		pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
1057

1058
	/* see if we need to reload the config registers */
1059 1060 1061
	if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
	    || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
	    != (cr1 & change_mask)) {
1062
		/* stop the SSP, and update the other bits */
1063
		pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
1064
		if (!pxa25x_ssp_comp(drv_data))
1065
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1066
		/* first set CR1 without interrupt and service enables */
1067
		pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
1068
		/* restart the SSP */
1069
		pxa2xx_spi_write(drv_data, SSCR0, cr0);
1070

1071
	} else {
1072
		if (!pxa25x_ssp_comp(drv_data))
1073
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1074
	}
1075

1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	if (drv_data->ssp_type == MMP2_SSP) {
		u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
					& SSSR_TFL_MASK) >> 8;

		if (tx_level) {
			/* On MMP2, flipping SSE doesn't to empty TXFIFO. */
			dev_warn(&spi->dev, "%d bytes of garbage in TXFIFO!\n",
								tx_level);
			if (tx_level > transfer->len)
				tx_level = transfer->len;
			drv_data->tx += tx_level;
		}
	}

1090
	if (spi_controller_is_slave(controller)) {
1091 1092
		while (drv_data->write(drv_data))
			;
L
Lubomir Rintel 已提交
1093 1094 1095 1096 1097
		if (drv_data->gpiod_ready) {
			gpiod_set_value(drv_data->gpiod_ready, 1);
			udelay(1);
			gpiod_set_value(drv_data->gpiod_ready, 0);
		}
1098 1099
	}

1100 1101 1102 1103
	/*
	 * Release the data by enabling service requests and interrupts,
	 * without changing any mode bits
	 */
1104
	pxa2xx_spi_write(drv_data, SSCR1, cr1);
1105 1106

	return 1;
1107 1108
}

1109
static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
1110
{
1111
	struct driver_data *drv_data = spi_controller_get_devdata(controller);
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123

	/* Stop and reset SSP */
	write_SSSR_CS(drv_data, drv_data->clear_sr);
	reset_sccr1(drv_data);
	if (!pxa25x_ssp_comp(drv_data))
		pxa2xx_spi_write(drv_data, SSTO, 0);
	pxa2xx_spi_flush(drv_data);
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);

	dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");

1124 1125
	drv_data->controller->cur_msg->status = -EINTR;
	spi_finalize_current_transfer(drv_data->controller);
1126 1127 1128 1129

	return 0;
}

1130
static void pxa2xx_spi_handle_err(struct spi_controller *controller,
1131
				 struct spi_message *msg)
1132
{
1133
	struct driver_data *drv_data = spi_controller_get_devdata(controller);
1134

1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	/* Disable the SSP */
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
	/* Clear and disable interrupts and service requests */
	write_SSSR_CS(drv_data, drv_data->clear_sr);
	pxa2xx_spi_write(drv_data, SSCR1,
			 pxa2xx_spi_read(drv_data, SSCR1)
			 & ~(drv_data->int_cr1 | drv_data->dma_cr1));
	if (!pxa25x_ssp_comp(drv_data))
		pxa2xx_spi_write(drv_data, SSTO, 0);
1145

1146 1147 1148 1149 1150 1151 1152 1153 1154
	/*
	 * Stop the DMA if running. Note DMA callback handler may have unset
	 * the dma_running already, which is fine as stopping is not needed
	 * then but we shouldn't rely this flag for anything else than
	 * stopping. For instance to differentiate between PIO and DMA
	 * transfers.
	 */
	if (atomic_read(&drv_data->dma_running))
		pxa2xx_spi_dma_stop(drv_data);
1155 1156
}

1157
static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
1158
{
1159
	struct driver_data *drv_data = spi_controller_get_devdata(controller);
1160 1161

	/* Disable the SSP now */
1162 1163
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1164 1165 1166 1167

	return 0;
}

1168 1169 1170
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
		    struct pxa2xx_spi_chip *chip_info)
{
1171 1172
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
1173
	struct gpio_desc *gpiod;
1174 1175
	int err = 0;

1176 1177 1178
	if (chip == NULL)
		return 0;

1179 1180 1181
	if (drv_data->cs_gpiods) {
		gpiod = drv_data->cs_gpiods[spi->chip_select];
		if (gpiod) {
1182
			chip->gpiod_cs = gpiod;
1183 1184
			chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
			gpiod_set_value(gpiod, chip->gpio_cs_inverted);
1185 1186 1187 1188 1189 1190
		}

		return 0;
	}

	if (chip_info == NULL)
1191 1192 1193 1194 1195
		return 0;

	/* NOTE: setup() can be called multiple times, possibly with
	 * different chip_info, release previously requested GPIO
	 */
1196
	if (chip->gpiod_cs) {
1197
		gpiod_put(chip->gpiod_cs);
1198 1199
		chip->gpiod_cs = NULL;
	}
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209

	/* If (*cs_control) is provided, ignore GPIO chip select */
	if (chip_info->cs_control) {
		chip->cs_control = chip_info->cs_control;
		return 0;
	}

	if (gpio_is_valid(chip_info->gpio_cs)) {
		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
		if (err) {
1210 1211
			dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
				chip_info->gpio_cs);
1212 1213 1214
			return err;
		}

1215 1216
		gpiod = gpio_to_desc(chip_info->gpio_cs);
		chip->gpiod_cs = gpiod;
1217 1218
		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;

1219
		err = gpiod_direction_output(gpiod, !chip->gpio_cs_inverted);
1220 1221 1222 1223 1224
	}

	return err;
}

1225 1226
static int setup(struct spi_device *spi)
{
1227
	struct pxa2xx_spi_chip *chip_info;
1228
	struct chip_data *chip;
1229
	const struct lpss_config *config;
1230 1231
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
1232 1233
	uint tx_thres, tx_hi_thres, rx_thres;

1234 1235 1236 1237 1238 1239
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		tx_thres = TX_THRESH_QUARK_X1000_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_QUARK_X1000_DFLT;
		break;
1240 1241 1242 1243 1244
	case CE4100_SSP:
		tx_thres = TX_THRESH_CE4100_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_CE4100_DFLT;
		break;
1245 1246
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
1247
	case LPSS_BSW_SSP:
1248
	case LPSS_SPT_SSP:
1249
	case LPSS_BXT_SSP:
1250
	case LPSS_CNL_SSP:
1251 1252 1253 1254
		config = lpss_get_config(drv_data);
		tx_thres = config->tx_threshold_lo;
		tx_hi_thres = config->tx_threshold_hi;
		rx_thres = config->rx_threshold;
1255 1256
		break;
	default:
1257
		tx_hi_thres = 0;
1258
		if (spi_controller_is_slave(drv_data->controller)) {
1259 1260 1261 1262 1263 1264
			tx_thres = 1;
			rx_thres = 2;
		} else {
			tx_thres = TX_THRESH_DFLT;
			rx_thres = RX_THRESH_DFLT;
		}
1265
		break;
1266
	}
1267

1268
	/* Only alloc on first setup */
1269
	chip = spi_get_ctldata(spi);
1270
	if (!chip) {
1271
		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1272
		if (!chip)
1273 1274
			return -ENOMEM;

1275 1276
		if (drv_data->ssp_type == CE4100_SSP) {
			if (spi->chip_select > 4) {
1277 1278
				dev_err(&spi->dev,
					"failed setup: cs number must not be > 4.\n");
1279 1280 1281 1282 1283
				kfree(chip);
				return -EINVAL;
			}

			chip->frm = spi->chip_select;
1284
		}
1285
		chip->enable_dma = drv_data->controller_info->enable_dma;
1286
		chip->timeout = TIMOUT_DFLT;
1287 1288
	}

1289 1290 1291 1292
	/* protocol drivers may change the chip settings, so...
	 * if chip_info exists, use it */
	chip_info = spi->controller_data;

1293
	/* chip_info isn't always needed */
1294
	chip->cr1 = 0;
1295
	if (chip_info) {
1296 1297 1298 1299
		if (chip_info->timeout)
			chip->timeout = chip_info->timeout;
		if (chip_info->tx_threshold)
			tx_thres = chip_info->tx_threshold;
1300 1301
		if (chip_info->tx_hi_threshold)
			tx_hi_thres = chip_info->tx_hi_threshold;
1302 1303
		if (chip_info->rx_threshold)
			rx_thres = chip_info->rx_threshold;
1304 1305 1306 1307
		chip->dma_threshold = 0;
		if (chip_info->enable_loopback)
			chip->cr1 = SSCR1_LBM;
	}
1308
	if (spi_controller_is_slave(drv_data->controller)) {
1309 1310 1311 1312 1313
		chip->cr1 |= SSCR1_SCFR;
		chip->cr1 |= SSCR1_SCLKDIR;
		chip->cr1 |= SSCR1_SFRMDIR;
		chip->cr1 |= SSCR1_SPH;
	}
1314

1315 1316 1317 1318
	chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
	chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
				| SSITF_TxHiThresh(tx_hi_thres);

1319 1320 1321 1322 1323
	/* set dma burst and threshold outside of chip_info path so that if
	 * chip_info goes away after setting chip->enable_dma, the
	 * burst and threshold can still respond to changes in bits_per_word */
	if (chip->enable_dma) {
		/* set up legal burst and threshold for dma */
1324 1325
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
						spi->bits_per_word,
1326 1327
						&chip->dma_burst_size,
						&chip->dma_threshold)) {
1328 1329
			dev_warn(&spi->dev,
				 "in setup: DMA burst size reduced to match bits_per_word\n");
1330
		}
1331 1332 1333
		dev_dbg(&spi->dev,
			"in setup: DMA burst size set to %u\n",
			chip->dma_burst_size);
1334 1335
	}

1336 1337 1338 1339 1340 1341 1342
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
				   & QUARK_X1000_SSCR1_RFT)
				   | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
				   & QUARK_X1000_SSCR1_TFT);
		break;
1343 1344 1345 1346
	case CE4100_SSP:
		chip->threshold = (CE4100_SSCR1_RxTresh(rx_thres) & CE4100_SSCR1_RFT) |
			(CE4100_SSCR1_TxTresh(tx_thres) & CE4100_SSCR1_TFT);
		break;
1347 1348 1349 1350 1351 1352
	default:
		chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
		break;
	}

1353 1354 1355
	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1356

1357 1358 1359
	if (spi->mode & SPI_LOOP)
		chip->cr1 |= SSCR1_LBM;

1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
	if (spi->bits_per_word <= 8) {
		chip->n_bytes = 1;
		chip->read = u8_reader;
		chip->write = u8_writer;
	} else if (spi->bits_per_word <= 16) {
		chip->n_bytes = 2;
		chip->read = u16_reader;
		chip->write = u16_writer;
	} else if (spi->bits_per_word <= 32) {
		chip->n_bytes = 4;
		chip->read = u32_reader;
		chip->write = u32_writer;
	}

	spi_set_ctldata(spi, chip);

1376 1377 1378
	if (drv_data->ssp_type == CE4100_SSP)
		return 0;

1379
	return setup_cs(spi, chip, chip_info);
1380 1381
}

1382
static void cleanup(struct spi_device *spi)
1383
{
1384
	struct chip_data *chip = spi_get_ctldata(spi);
1385 1386
	struct driver_data *drv_data =
		spi_controller_get_devdata(spi->controller);
1387

1388 1389 1390
	if (!chip)
		return;

1391
	if (drv_data->ssp_type != CE4100_SSP && !drv_data->cs_gpiods &&
1392
	    chip->gpiod_cs)
1393
		gpiod_put(chip->gpiod_cs);
1394

1395 1396 1397
	kfree(chip);
}

1398
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1399 1400 1401 1402 1403
	{ "INT33C0", LPSS_LPT_SSP },
	{ "INT33C1", LPSS_LPT_SSP },
	{ "INT3430", LPSS_LPT_SSP },
	{ "INT3431", LPSS_LPT_SSP },
	{ "80860F0E", LPSS_BYT_SSP },
1404
	{ "8086228E", LPSS_BSW_SSP },
1405 1406 1407 1408
	{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);

1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
/*
 * PCI IDs of compound devices that integrate both host controller and private
 * integrated DMA engine. Please note these are not used in module
 * autoloading and probing in this module but matching the LPSS SSP type.
 */
static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
	/* SPT-LP */
	{ PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
	/* SPT-H */
	{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
1421 1422 1423
	/* KBL-H */
	{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
1424
	/* BXT A-Step */
1425 1426 1427
	{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
1428 1429 1430 1431
	/* BXT B-Step */
	{ PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
1432 1433 1434 1435
	/* GLK */
	{ PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
1436 1437 1438 1439
	/* ICL-LP */
	{ PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
1440 1441 1442 1443
	/* EHL */
	{ PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
1444 1445 1446 1447
	/* APL */
	{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
1448 1449 1450 1451 1452 1453 1454 1455
	/* CNL-LP */
	{ PCI_VDEVICE(INTEL, 0x9daa), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0x9dab), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0x9dfb), LPSS_CNL_SSP },
	/* CNL-H */
	{ PCI_VDEVICE(INTEL, 0xa32a), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0xa32b), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0xa37b), LPSS_CNL_SSP },
1456 1457 1458 1459
	/* CML-LP */
	{ PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
	{ PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
1460
	{ },
1461 1462
};

1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
static const struct of_device_id pxa2xx_spi_of_match[] = {
	{ .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
	{},
};
MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);

#ifdef CONFIG_ACPI

static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
	unsigned int devid;
	int port_id = -1;

	if (adev && adev->pnp.unique_id &&
	    !kstrtouint(adev->pnp.unique_id, 0, &devid))
		port_id = devid;
	return port_id;
}

#else /* !CONFIG_ACPI */

static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
	return -1;
}

#endif /* CONFIG_ACPI */


#ifdef CONFIG_PCI

1494 1495
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
1496
	return param == chan->device->dev;
1497 1498
}

1499 1500
#endif /* CONFIG_PCI */

1501
static struct pxa2xx_spi_controller *
1502
pxa2xx_spi_init_pdata(struct platform_device *pdev)
1503
{
1504
	struct pxa2xx_spi_controller *pdata;
1505 1506 1507
	struct acpi_device *adev;
	struct ssp_device *ssp;
	struct resource *res;
1508 1509
	const struct acpi_device_id *adev_id = NULL;
	const struct pci_device_id *pcidev_id = NULL;
1510
	const struct of_device_id *of_id = NULL;
L
Lubomir Rintel 已提交
1511
	enum pxa_ssp_type type;
1512

1513
	adev = ACPI_COMPANION(&pdev->dev);
1514

1515 1516 1517 1518
	if (pdev->dev.of_node)
		of_id = of_match_device(pdev->dev.driver->of_match_table,
					&pdev->dev);
	else if (dev_is_pci(pdev->dev.parent))
1519 1520
		pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
					 to_pci_dev(pdev->dev.parent));
1521
	else if (adev)
1522 1523
		adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
					    &pdev->dev);
1524 1525
	else
		return NULL;
1526 1527

	if (adev_id)
L
Lubomir Rintel 已提交
1528
		type = (enum pxa_ssp_type)adev_id->driver_data;
1529
	else if (pcidev_id)
L
Lubomir Rintel 已提交
1530
		type = (enum pxa_ssp_type)pcidev_id->driver_data;
1531 1532
	else if (of_id)
		type = (enum pxa_ssp_type)of_id->data;
1533 1534 1535
	else
		return NULL;

1536
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1537
	if (!pdata)
1538 1539 1540 1541 1542 1543 1544 1545 1546
		return NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return NULL;

	ssp = &pdata->ssp;

	ssp->phys_base = res->start;
1547 1548
	ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ssp->mmio_base))
1549
		return NULL;
1550

1551
#ifdef CONFIG_PCI
1552 1553 1554 1555 1556
	if (pcidev_id) {
		pdata->tx_param = pdev->dev.parent;
		pdata->rx_param = pdev->dev.parent;
		pdata->dma_filter = pxa2xx_spi_idma_filter;
	}
1557
#endif
1558

1559 1560
	ssp->clk = devm_clk_get(&pdev->dev, NULL);
	ssp->irq = platform_get_irq(pdev, 0);
1561
	ssp->type = type;
1562
	ssp->pdev = pdev;
1563
	ssp->port_id = pxa2xx_spi_get_port_id(adev);
1564

1565
	pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
1566
	pdata->num_chipselect = 1;
1567
	pdata->enable_dma = true;
1568
	pdata->dma_burst_size = 1;
1569 1570 1571 1572

	return pdata;
}

1573
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
1574
				      unsigned int cs)
1575
{
1576
	struct driver_data *drv_data = spi_controller_get_devdata(controller);
1577 1578 1579 1580 1581 1582 1583 1584 1585

	if (has_acpi_companion(&drv_data->pdev->dev)) {
		switch (drv_data->ssp_type) {
		/*
		 * For Atoms the ACPI DeviceSelection used by the Windows
		 * driver starts from 1 instead of 0 so translate it here
		 * to match what Linux expects.
		 */
		case LPSS_BYT_SSP:
1586
		case LPSS_BSW_SSP:
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
			return cs - 1;

		default:
			break;
		}
	}

	return cs;
}

1597
static int pxa2xx_spi_probe(struct platform_device *pdev)
1598 1599
{
	struct device *dev = &pdev->dev;
1600 1601
	struct pxa2xx_spi_controller *platform_info;
	struct spi_controller *controller;
G
Guennadi Liakhovetski 已提交
1602
	struct driver_data *drv_data;
1603
	struct ssp_device *ssp;
1604
	const struct lpss_config *config;
1605
	int status, count;
1606
	u32 tmp;
1607

1608 1609
	platform_info = dev_get_platdata(dev);
	if (!platform_info) {
1610
		platform_info = pxa2xx_spi_init_pdata(pdev);
1611 1612 1613 1614
		if (!platform_info) {
			dev_err(&pdev->dev, "missing platform data\n");
			return -ENODEV;
		}
1615
	}
1616

H
Haojian Zhuang 已提交
1617
	ssp = pxa_ssp_request(pdev->id, pdev->name);
1618 1619 1620 1621 1622
	if (!ssp)
		ssp = &platform_info->ssp;

	if (!ssp->mmio_base) {
		dev_err(&pdev->dev, "failed to get ssp\n");
1623 1624 1625
		return -ENODEV;
	}

1626
	if (platform_info->is_slave)
1627
		controller = spi_alloc_slave(dev, sizeof(struct driver_data));
1628
	else
1629
		controller = spi_alloc_master(dev, sizeof(struct driver_data));
1630

1631 1632
	if (!controller) {
		dev_err(&pdev->dev, "cannot alloc spi_controller\n");
H
Haojian Zhuang 已提交
1633
		pxa_ssp_free(ssp);
1634 1635
		return -ENOMEM;
	}
1636 1637 1638
	drv_data = spi_controller_get_devdata(controller);
	drv_data->controller = controller;
	drv_data->controller_info = platform_info;
1639
	drv_data->pdev = pdev;
1640
	drv_data->ssp = ssp;
1641

1642
	controller->dev.of_node = pdev->dev.of_node;
1643
	/* the spi->mode bits understood by this driver: */
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
	controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;

	controller->bus_num = ssp->port_id;
	controller->dma_alignment = DMA_ALIGNMENT;
	controller->cleanup = cleanup;
	controller->setup = setup;
	controller->set_cs = pxa2xx_spi_set_cs;
	controller->transfer_one = pxa2xx_spi_transfer_one;
	controller->slave_abort = pxa2xx_spi_slave_abort;
	controller->handle_err = pxa2xx_spi_handle_err;
	controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
	controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
	controller->auto_runtime_pm = true;
	controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
1658

1659
	drv_data->ssp_type = ssp->type;
1660

1661 1662
	drv_data->ioaddr = ssp->mmio_base;
	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1663
	if (pxa25x_ssp_comp(drv_data)) {
1664 1665
		switch (drv_data->ssp_type) {
		case QUARK_X1000_SSP:
1666
			controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1667 1668
			break;
		default:
1669
			controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1670 1671 1672
			break;
		}

1673 1674 1675 1676 1677
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
		drv_data->dma_cr1 = 0;
		drv_data->clear_sr = SSSR_ROR;
		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
	} else {
1678
		controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1679
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1680
		drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1681
		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
1682 1683
		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS
						| SSSR_ROR | SSSR_TUR;
1684 1685
	}

1686 1687
	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
			drv_data);
1688
	if (status < 0) {
G
Guennadi Liakhovetski 已提交
1689
		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1690
		goto out_error_controller_alloc;
1691 1692 1693 1694
	}

	/* Setup DMA if requested */
	if (platform_info->enable_dma) {
1695 1696
		status = pxa2xx_spi_dma_setup(drv_data);
		if (status) {
1697
			dev_warn(dev, "no DMA channels available, using PIO\n");
1698
			platform_info->enable_dma = false;
1699
		} else {
1700
			controller->can_dma = pxa2xx_spi_can_dma;
1701
			controller->max_dma_len = MAX_DMA_LEN;
1702 1703 1704 1705
		}
	}

	/* Enable SOC clock */
1706 1707 1708
	status = clk_prepare_enable(ssp->clk);
	if (status)
		goto out_error_dma_irq_alloc;
1709

1710
	controller->max_speed_hz = clk_get_rate(ssp->clk);
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
	/*
	 * Set minimum speed for all other platforms than Intel Quark which is
	 * able do under 1 Hz transfers.
	 */
	if (!pxa25x_ssp_comp(drv_data))
		controller->min_speed_hz =
			DIV_ROUND_UP(controller->max_speed_hz, 4096);
	else if (!is_quark_x1000_ssp(drv_data))
		controller->min_speed_hz =
			DIV_ROUND_UP(controller->max_speed_hz, 512);
1721 1722

	/* Load default SSP configuration */
1723
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1724 1725
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
1726 1727
		tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT) |
		      QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
1728
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1729 1730

		/* using the Motorola SPI protocol and use 8 bit frame */
1731 1732
		tmp = QUARK_X1000_SSCR0_Motorola | QUARK_X1000_SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1733
		break;
1734 1735 1736 1737 1738 1739
	case CE4100_SSP:
		tmp = CE4100_SSCR1_RxTresh(RX_THRESH_CE4100_DFLT) |
		      CE4100_SSCR1_TxTresh(TX_THRESH_CE4100_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
A
Andy Shevchenko 已提交
1740
		break;
1741
	default:
1742

1743
		if (spi_controller_is_slave(controller)) {
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753
			tmp = SSCR1_SCFR |
			      SSCR1_SCLKDIR |
			      SSCR1_SFRMDIR |
			      SSCR1_RxTresh(2) |
			      SSCR1_TxTresh(1) |
			      SSCR1_SPH;
		} else {
			tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
			      SSCR1_TxTresh(TX_THRESH_DFLT);
		}
1754
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1755
		tmp = SSCR0_Motorola | SSCR0_DataSize(8);
1756
		if (!spi_controller_is_slave(controller))
1757
			tmp |= SSCR0_SCR(2);
1758
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1759 1760 1761
		break;
	}

1762
	if (!pxa25x_ssp_comp(drv_data))
1763
		pxa2xx_spi_write(drv_data, SSTO, 0);
1764 1765

	if (!is_quark_x1000_ssp(drv_data))
1766
		pxa2xx_spi_write(drv_data, SSPSP, 0);
1767

1768 1769 1770 1771 1772 1773 1774 1775 1776
	if (is_lpss_ssp(drv_data)) {
		lpss_ssp_setup(drv_data);
		config = lpss_get_config(drv_data);
		if (config->reg_capabilities >= 0) {
			tmp = __lpss_ssp_read_priv(drv_data,
						   config->reg_capabilities);
			tmp &= LPSS_CAPS_CS_EN_MASK;
			tmp >>= LPSS_CAPS_CS_EN_SHIFT;
			platform_info->num_chipselect = ffz(tmp);
1777 1778
		} else if (config->cs_num) {
			platform_info->num_chipselect = config->cs_num;
1779 1780
		}
	}
1781
	controller->num_chipselect = platform_info->num_chipselect;
1782

1783
	count = gpiod_count(&pdev->dev, "cs");
1784 1785 1786
	if (count > 0) {
		int i;

1787 1788
		controller->num_chipselect = max_t(int, count,
			controller->num_chipselect);
1789

1790
		drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
1791
			controller->num_chipselect, sizeof(struct gpio_desc *),
1792 1793 1794 1795 1796 1797
			GFP_KERNEL);
		if (!drv_data->cs_gpiods) {
			status = -ENOMEM;
			goto out_error_clock_enabled;
		}

1798
		for (i = 0; i < controller->num_chipselect; i++) {
1799 1800
			struct gpio_desc *gpiod;

1801
			gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
1802 1803 1804 1805 1806
			if (IS_ERR(gpiod)) {
				/* Means use native chip select */
				if (PTR_ERR(gpiod) == -ENOENT)
					continue;

L
Lubomir Rintel 已提交
1807
				status = PTR_ERR(gpiod);
1808 1809 1810 1811 1812 1813 1814
				goto out_error_clock_enabled;
			} else {
				drv_data->cs_gpiods[i] = gpiod;
			}
		}
	}

L
Lubomir Rintel 已提交
1815 1816 1817 1818 1819 1820 1821 1822 1823
	if (platform_info->is_slave) {
		drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
						"ready", GPIOD_OUT_LOW);
		if (IS_ERR(drv_data->gpiod_ready)) {
			status = PTR_ERR(drv_data->gpiod_ready);
			goto out_error_clock_enabled;
		}
	}

1824 1825 1826 1827 1828
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

1829 1830
	/* Register with the SPI framework */
	platform_set_drvdata(pdev, drv_data);
1831
	status = devm_spi_register_controller(&pdev->dev, controller);
1832
	if (status != 0) {
1833
		dev_err(&pdev->dev, "problem registering spi controller\n");
1834
		goto out_error_clock_enabled;
1835 1836 1837 1838 1839
	}

	return status;

out_error_clock_enabled:
1840 1841
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
1842
	clk_disable_unprepare(ssp->clk);
1843 1844

out_error_dma_irq_alloc:
1845
	pxa2xx_spi_dma_release(drv_data);
1846
	free_irq(ssp->irq, drv_data);
1847

1848 1849
out_error_controller_alloc:
	spi_controller_put(controller);
H
Haojian Zhuang 已提交
1850
	pxa_ssp_free(ssp);
1851 1852 1853 1854 1855 1856
	return status;
}

static int pxa2xx_spi_remove(struct platform_device *pdev)
{
	struct driver_data *drv_data = platform_get_drvdata(pdev);
1857
	struct ssp_device *ssp;
1858 1859 1860

	if (!drv_data)
		return 0;
1861
	ssp = drv_data->ssp;
1862

1863 1864
	pm_runtime_get_sync(&pdev->dev);

1865
	/* Disable the SSP at the peripheral and SOC level */
1866
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1867
	clk_disable_unprepare(ssp->clk);
1868 1869

	/* Release DMA */
1870
	if (drv_data->controller_info->enable_dma)
1871
		pxa2xx_spi_dma_release(drv_data);
1872

1873 1874 1875
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

1876
	/* Release IRQ */
1877 1878 1879
	free_irq(ssp->irq, drv_data);

	/* Release SSP */
H
Haojian Zhuang 已提交
1880
	pxa_ssp_free(ssp);
1881 1882 1883 1884

	return 0;
}

1885
#ifdef CONFIG_PM_SLEEP
1886
static int pxa2xx_spi_suspend(struct device *dev)
1887
{
1888
	struct driver_data *drv_data = dev_get_drvdata(dev);
1889
	struct ssp_device *ssp = drv_data->ssp;
1890
	int status;
1891

1892
	status = spi_controller_suspend(drv_data->controller);
1893 1894
	if (status != 0)
		return status;
1895
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1896 1897 1898

	if (!pm_runtime_suspended(dev))
		clk_disable_unprepare(ssp->clk);
1899 1900 1901 1902

	return 0;
}

1903
static int pxa2xx_spi_resume(struct device *dev)
1904
{
1905
	struct driver_data *drv_data = dev_get_drvdata(dev);
1906
	struct ssp_device *ssp = drv_data->ssp;
1907
	int status;
1908 1909

	/* Enable the SSP clock */
1910 1911 1912 1913 1914
	if (!pm_runtime_suspended(dev)) {
		status = clk_prepare_enable(ssp->clk);
		if (status)
			return status;
	}
1915 1916

	/* Start the queue running */
1917
	return spi_controller_resume(drv_data->controller);
1918
}
1919 1920
#endif

1921
#ifdef CONFIG_PM
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_disable_unprepare(drv_data->ssp->clk);
	return 0;
}

static int pxa2xx_spi_runtime_resume(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);
1933
	int status;
1934

1935 1936
	status = clk_prepare_enable(drv_data->ssp->clk);
	return status;
1937 1938
}
#endif
1939

1940
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1941 1942 1943
	SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
	SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
			   pxa2xx_spi_runtime_resume, NULL)
1944
};
1945 1946 1947

static struct platform_driver driver = {
	.driver = {
1948 1949
		.name	= "pxa2xx-spi",
		.pm	= &pxa2xx_spi_pm_ops,
1950
		.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1951
		.of_match_table = of_match_ptr(pxa2xx_spi_of_match),
1952
	},
1953
	.probe = pxa2xx_spi_probe,
1954
	.remove = pxa2xx_spi_remove,
1955 1956 1957 1958
};

static int __init pxa2xx_spi_init(void)
{
1959
	return platform_driver_register(&driver);
1960
}
A
Antonio Ospite 已提交
1961
subsys_initcall(pxa2xx_spi_init);
1962 1963 1964 1965 1966 1967

static void __exit pxa2xx_spi_exit(void)
{
	platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);
1968 1969

MODULE_SOFTDEP("pre: dw_dmac");