spi-pxa2xx.c 46.2 KB
Newer Older
1 2
/*
 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
3
 * Copyright (C) 2013, Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

16
#include <linux/bitops.h>
17 18 19 20 21
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/ioport.h>
#include <linux/errno.h>
22
#include <linux/err.h>
23
#include <linux/interrupt.h>
24
#include <linux/kernel.h>
25
#include <linux/pci.h>
26
#include <linux/platform_device.h>
27
#include <linux/spi/pxa2xx_spi.h>
28 29
#include <linux/spi/spi.h>
#include <linux/delay.h>
30
#include <linux/gpio.h>
31
#include <linux/slab.h>
32
#include <linux/clk.h>
33
#include <linux/pm_runtime.h>
34
#include <linux/acpi.h>
35

36
#include "spi-pxa2xx.h"
37 38

MODULE_AUTHOR("Stephen Street");
W
Will Newton 已提交
39
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
40
MODULE_LICENSE("GPL");
41
MODULE_ALIAS("platform:pxa2xx-spi");
42

43 44
#define TIMOUT_DFLT		1000

45 46 47 48 49 50 51 52
/*
 * for testing SSCR1 changes that require SSP restart, basically
 * everything except the service and interrupt enables, the pxa270 developer
 * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
 * list, but the PXA255 dev man says all bits without really meaning the
 * service and interrupt enables
 */
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
53
				| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
54 55 56 57
				| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
				| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
				| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
58

59 60 61 62 63 64
#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF	\
				| QUARK_X1000_SSCR1_EFWR	\
				| QUARK_X1000_SSCR1_RFT		\
				| QUARK_X1000_SSCR1_TFT		\
				| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)

65 66 67
#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE	BIT(24)
#define LPSS_CS_CONTROL_SW_MODE			BIT(0)
#define LPSS_CS_CONTROL_CS_HIGH			BIT(1)
68 69
#define LPSS_CAPS_CS_EN_SHIFT			9
#define LPSS_CAPS_CS_EN_MASK			(0xf << LPSS_CAPS_CS_EN_SHIFT)
70

71 72 73 74 75 76 77
struct lpss_config {
	/* LPSS offset from drv_data->ioaddr */
	unsigned offset;
	/* Register offsets from drv_data->lpss_base or -1 */
	int reg_general;
	int reg_ssp;
	int reg_cs_ctrl;
78
	int reg_capabilities;
79 80 81 82
	/* FIFO thresholds */
	u32 rx_threshold;
	u32 tx_threshold_lo;
	u32 tx_threshold_hi;
83 84 85
	/* Chip select control */
	unsigned cs_sel_shift;
	unsigned cs_sel_mask;
86
	unsigned cs_num;
87 88 89 90 91 92 93 94 95
};

/* Keep these sorted with enum pxa_ssp_type */
static const struct lpss_config lpss_platforms[] = {
	{	/* LPSS_LPT_SSP */
		.offset = 0x800,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
96
		.reg_capabilities = -1,
97 98 99 100 101 102 103 104 105
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
	{	/* LPSS_BYT_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
106
		.reg_capabilities = -1,
107 108 109 110
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
	},
111 112 113 114 115 116 117 118 119 120 121 122 123
	{	/* LPSS_BSW_SSP */
		.offset = 0x400,
		.reg_general = 0x08,
		.reg_ssp = 0x0c,
		.reg_cs_ctrl = 0x18,
		.reg_capabilities = -1,
		.rx_threshold = 64,
		.tx_threshold_lo = 160,
		.tx_threshold_hi = 224,
		.cs_sel_shift = 2,
		.cs_sel_mask = 1 << 2,
		.cs_num = 2,
	},
124 125 126 127 128
	{	/* LPSS_SPT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
129
		.reg_capabilities = -1,
130 131 132 133
		.rx_threshold = 1,
		.tx_threshold_lo = 32,
		.tx_threshold_hi = 56,
	},
134 135 136 137 138 139 140 141 142
	{	/* LPSS_BXT_SSP */
		.offset = 0x200,
		.reg_general = -1,
		.reg_ssp = 0x20,
		.reg_cs_ctrl = 0x24,
		.reg_capabilities = 0xfc,
		.rx_threshold = 1,
		.tx_threshold_lo = 16,
		.tx_threshold_hi = 48,
143 144
		.cs_sel_shift = 8,
		.cs_sel_mask = 3 << 8,
145
	},
146 147 148 149 150 151 152 153
};

static inline const struct lpss_config
*lpss_get_config(const struct driver_data *drv_data)
{
	return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
}

154 155
static bool is_lpss_ssp(const struct driver_data *drv_data)
{
156 157 158
	switch (drv_data->ssp_type) {
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
159
	case LPSS_BSW_SSP:
160
	case LPSS_SPT_SSP:
161
	case LPSS_BXT_SSP:
162 163 164 165
		return true;
	default:
		return false;
	}
166 167
}

168 169 170 171 172
static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
{
	return drv_data->ssp_type == QUARK_X1000_SSP;
}

173 174 175
static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
176 177
	case QUARK_X1000_SSP:
		return QUARK_X1000_SSCR1_CHANGE_MASK;
178 179 180 181 182 183 184 185 186
	default:
		return SSCR1_CHANGE_MASK;
	}
}

static u32
pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
{
	switch (drv_data->ssp_type) {
187 188
	case QUARK_X1000_SSP:
		return RX_THRESH_QUARK_X1000_DFLT;
189 190 191 192 193 194 195 196 197 198
	default:
		return RX_THRESH_DFLT;
	}
}

static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
{
	u32 mask;

	switch (drv_data->ssp_type) {
199 200 201
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSSR_TFL_MASK;
		break;
202 203 204 205 206
	default:
		mask = SSSR_TFL_MASK;
		break;
	}

207
	return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
208 209 210 211 212 213 214 215
}

static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
				     u32 *sccr1_reg)
{
	u32 mask;

	switch (drv_data->ssp_type) {
216 217 218
	case QUARK_X1000_SSP:
		mask = QUARK_X1000_SSCR1_RFT;
		break;
219 220 221 222 223 224 225 226 227 228 229
	default:
		mask = SSCR1_RFT;
		break;
	}
	*sccr1_reg &= ~mask;
}

static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
				   u32 *sccr1_reg, u32 threshold)
{
	switch (drv_data->ssp_type) {
230 231 232
	case QUARK_X1000_SSP:
		*sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
		break;
233 234 235 236 237 238 239 240 241 242
	default:
		*sccr1_reg |= SSCR1_RxTresh(threshold);
		break;
	}
}

static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
				  u32 clk_div, u8 bits)
{
	switch (drv_data->ssp_type) {
243 244 245 246 247
	case QUARK_X1000_SSP:
		return clk_div
			| QUARK_X1000_SSCR0_Motorola
			| QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
			| SSCR0_SSE;
248 249 250 251 252 253 254 255 256
	default:
		return clk_div
			| SSCR0_Motorola
			| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
			| SSCR0_SSE
			| (bits > 16 ? SSCR0_EDSS : 0);
	}
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
/*
 * Read and write LPSS SSP private registers. Caller must first check that
 * is_lpss_ssp() returns true before these can be called.
 */
static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
{
	WARN_ON(!drv_data->lpss_base);
	return readl(drv_data->lpss_base + offset);
}

static void __lpss_ssp_write_priv(struct driver_data *drv_data,
				  unsigned offset, u32 value)
{
	WARN_ON(!drv_data->lpss_base);
	writel(value, drv_data->lpss_base + offset);
}

/*
 * lpss_ssp_setup - perform LPSS SSP specific setup
 * @drv_data: pointer to the driver private data
 *
 * Perform LPSS SSP specific setup. This function must be called first if
 * one is going to use LPSS SSP private registers.
 */
static void lpss_ssp_setup(struct driver_data *drv_data)
{
283 284
	const struct lpss_config *config;
	u32 value;
285

286 287
	config = lpss_get_config(drv_data);
	drv_data->lpss_base = drv_data->ioaddr + config->offset;
288 289

	/* Enable software chip select control */
290
	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
291 292
	value &= ~(LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH);
	value |= LPSS_CS_CONTROL_SW_MODE | LPSS_CS_CONTROL_CS_HIGH;
293
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
294 295

	/* Enable multiblock DMA transfers */
296
	if (drv_data->master_info->enable_dma) {
297
		__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
298

299 300 301
		if (config->reg_general >= 0) {
			value = __lpss_ssp_read_priv(drv_data,
						     config->reg_general);
302
			value |= LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE;
303 304 305
			__lpss_ssp_write_priv(drv_data,
					      config->reg_general, value);
		}
306
	}
307 308
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
static void lpss_ssp_select_cs(struct driver_data *drv_data,
			       const struct lpss_config *config)
{
	u32 value, cs;

	if (!config->cs_sel_mask)
		return;

	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);

	cs = drv_data->cur_msg->spi->chip_select;
	cs <<= config->cs_sel_shift;
	if (cs != (value & config->cs_sel_mask)) {
		/*
		 * When switching another chip select output active the
		 * output must be selected first and wait 2 ssp_clk cycles
		 * before changing state to active. Otherwise a short
		 * glitch will occur on the previous chip select since
		 * output select is latched but state control is not.
		 */
		value &= ~config->cs_sel_mask;
		value |= cs;
		__lpss_ssp_write_priv(drv_data,
				      config->reg_cs_ctrl, value);
		ndelay(1000000000 /
		       (drv_data->master->max_speed_hz / 2));
	}
}

338 339
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
{
340
	const struct lpss_config *config;
341
	u32 value;
342

343 344
	config = lpss_get_config(drv_data);

345 346 347
	if (enable)
		lpss_ssp_select_cs(drv_data, config);

348
	value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
349
	if (enable)
350
		value &= ~LPSS_CS_CONTROL_CS_HIGH;
351
	else
352
		value |= LPSS_CS_CONTROL_CS_HIGH;
353
	__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
354 355
}

356 357 358 359
static void cs_assert(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;

360
	if (drv_data->ssp_type == CE4100_SSP) {
361
		pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
362 363 364
		return;
	}

365 366 367 368 369
	if (chip->cs_control) {
		chip->cs_control(PXA2XX_CS_ASSERT);
		return;
	}

370
	if (gpio_is_valid(chip->gpio_cs)) {
371
		gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
372 373 374
		return;
	}

375 376
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, true);
377 378 379 380 381 382
}

static void cs_deassert(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;

383 384 385
	if (drv_data->ssp_type == CE4100_SSP)
		return;

386
	if (chip->cs_control) {
387
		chip->cs_control(PXA2XX_CS_DEASSERT);
388 389 390
		return;
	}

391
	if (gpio_is_valid(chip->gpio_cs)) {
392
		gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
393 394 395
		return;
	}

396 397
	if (is_lpss_ssp(drv_data))
		lpss_ssp_cs_control(drv_data, false);
398 399
}

400
int pxa2xx_spi_flush(struct driver_data *drv_data)
401 402 403 404
{
	unsigned long limit = loops_per_jiffy << 1;

	do {
405 406 407
		while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
			pxa2xx_spi_read(drv_data, SSDR);
	} while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
408
	write_SSSR_CS(drv_data, SSSR_ROR);
409 410 411 412

	return limit;
}

413
static int null_writer(struct driver_data *drv_data)
414
{
415
	u8 n_bytes = drv_data->n_bytes;
416

417
	if (pxa2xx_spi_txfifo_full(drv_data)
418 419 420
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

421
	pxa2xx_spi_write(drv_data, SSDR, 0);
422 423 424
	drv_data->tx += n_bytes;

	return 1;
425 426
}

427
static int null_reader(struct driver_data *drv_data)
428
{
429
	u8 n_bytes = drv_data->n_bytes;
430

431 432 433
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		pxa2xx_spi_read(drv_data, SSDR);
434 435
		drv_data->rx += n_bytes;
	}
436 437

	return drv_data->rx == drv_data->rx_end;
438 439
}

440
static int u8_writer(struct driver_data *drv_data)
441
{
442
	if (pxa2xx_spi_txfifo_full(drv_data)
443 444 445
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

446
	pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
447 448 449
	++drv_data->tx;

	return 1;
450 451
}

452
static int u8_reader(struct driver_data *drv_data)
453
{
454 455 456
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
457 458
		++drv_data->rx;
	}
459 460

	return drv_data->rx == drv_data->rx_end;
461 462
}

463
static int u16_writer(struct driver_data *drv_data)
464
{
465
	if (pxa2xx_spi_txfifo_full(drv_data)
466 467 468
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

469
	pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
470 471 472
	drv_data->tx += 2;

	return 1;
473 474
}

475
static int u16_reader(struct driver_data *drv_data)
476
{
477 478 479
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
480 481
		drv_data->rx += 2;
	}
482 483

	return drv_data->rx == drv_data->rx_end;
484
}
485 486

static int u32_writer(struct driver_data *drv_data)
487
{
488
	if (pxa2xx_spi_txfifo_full(drv_data)
489 490 491
		|| (drv_data->tx == drv_data->tx_end))
		return 0;

492
	pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
493 494 495
	drv_data->tx += 4;

	return 1;
496 497
}

498
static int u32_reader(struct driver_data *drv_data)
499
{
500 501 502
	while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
	       && (drv_data->rx < drv_data->rx_end)) {
		*(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
503 504
		drv_data->rx += 4;
	}
505 506

	return drv_data->rx == drv_data->rx_end;
507 508
}

509
void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
{
	struct spi_message *msg = drv_data->cur_msg;
	struct spi_transfer *trans = drv_data->cur_transfer;

	/* Move to next transfer */
	if (trans->transfer_list.next != &msg->transfers) {
		drv_data->cur_transfer =
			list_entry(trans->transfer_list.next,
					struct spi_transfer,
					transfer_list);
		return RUNNING_STATE;
	} else
		return DONE_STATE;
}

/* caller already set message->status; dma and pio irqs are blocked */
S
Stephen Street 已提交
526
static void giveback(struct driver_data *drv_data)
527 528
{
	struct spi_transfer* last_transfer;
S
Stephen Street 已提交
529
	struct spi_message *msg;
530
	unsigned long timeout;
531

S
Stephen Street 已提交
532 533 534 535
	msg = drv_data->cur_msg;
	drv_data->cur_msg = NULL;
	drv_data->cur_transfer = NULL;

536
	last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
537 538
					transfer_list);

N
Ned Forrester 已提交
539 540 541 542
	/* Delay if requested before any change in chip select */
	if (last_transfer->delay_usecs)
		udelay(last_transfer->delay_usecs);

543 544 545 546 547 548
	/* Wait until SSP becomes idle before deasserting the CS */
	timeout = jiffies + msecs_to_jiffies(10);
	while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
	       !time_after(jiffies, timeout))
		cpu_relax();

N
Ned Forrester 已提交
549 550 551
	/* Drop chip select UNLESS cs_change is true or we are returning
	 * a message with an error, or next message is for another chip
	 */
552
	if (!last_transfer->cs_change)
553
		cs_deassert(drv_data);
N
Ned Forrester 已提交
554 555 556 557 558 559 560 561 562 563 564 565 566 567
	else {
		struct spi_message *next_msg;

		/* Holding of cs was hinted, but we need to make sure
		 * the next message is for the same chip.  Don't waste
		 * time with the following tests unless this was hinted.
		 *
		 * We cannot postpone this until pump_messages, because
		 * after calling msg->complete (below) the driver that
		 * sent the current message could be unloaded, which
		 * could invalidate the cs_control() callback...
		 */

		/* get a pointer to the next message, if any */
568
		next_msg = spi_get_next_queued_message(drv_data->master);
N
Ned Forrester 已提交
569 570 571 572

		/* see if the next and current messages point
		 * to the same chip
		 */
573 574
		if ((next_msg && next_msg->spi != msg->spi) ||
		    msg->state == ERROR_STATE)
575
			cs_deassert(drv_data);
N
Ned Forrester 已提交
576
	}
577

578
	drv_data->cur_chip = NULL;
579
	spi_finalize_current_message(drv_data->master);
580 581
}

582 583 584 585 586
static void reset_sccr1(struct driver_data *drv_data)
{
	struct chip_data *chip = drv_data->cur_chip;
	u32 sccr1_reg;

587
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
588 589 590 591 592 593 594 595
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
		break;
	default:
		sccr1_reg &= ~SSCR1_RFT;
		break;
	}
596
	sccr1_reg |= chip->threshold;
597
	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
598 599
}

600
static void int_error_stop(struct driver_data *drv_data, const char* msg)
601
{
602
	/* Stop and reset SSP */
603
	write_SSSR_CS(drv_data, drv_data->clear_sr);
604
	reset_sccr1(drv_data);
605
	if (!pxa25x_ssp_comp(drv_data))
606
		pxa2xx_spi_write(drv_data, SSTO, 0);
607
	pxa2xx_spi_flush(drv_data);
608 609
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
610

611
	dev_err(&drv_data->pdev->dev, "%s\n", msg);
612

613 614 615
	drv_data->cur_msg->state = ERROR_STATE;
	tasklet_schedule(&drv_data->pump_transfers);
}
S
Stephen Street 已提交
616

617 618
static void int_transfer_complete(struct driver_data *drv_data)
{
619
	/* Clear and disable interrupts */
620
	write_SSSR_CS(drv_data, drv_data->clear_sr);
621
	reset_sccr1(drv_data);
622
	if (!pxa25x_ssp_comp(drv_data))
623
		pxa2xx_spi_write(drv_data, SSTO, 0);
624

L
Lucas De Marchi 已提交
625
	/* Update total byte transferred return count actual bytes read */
626 627
	drv_data->cur_msg->actual_length += drv_data->len -
				(drv_data->rx_end - drv_data->rx);
628

N
Ned Forrester 已提交
629 630 631
	/* Transfer delays and chip select release are
	 * handled in pump_transfers or giveback
	 */
632

633
	/* Move to next transfer */
634
	drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
635

636 637 638
	/* Schedule transfer tasklet */
	tasklet_schedule(&drv_data->pump_transfers);
}
639

640 641
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
{
642 643
	u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
		       drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
644

645
	u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
646

647 648 649 650
	if (irq_status & SSSR_ROR) {
		int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
		return IRQ_HANDLED;
	}
651

652
	if (irq_status & SSSR_TINT) {
653
		pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
654 655 656 657 658
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	}
659

660 661 662 663 664 665 666
	/* Drain rx fifo, Fill tx fifo and prevent overruns */
	do {
		if (drv_data->read(drv_data)) {
			int_transfer_complete(drv_data);
			return IRQ_HANDLED;
		}
	} while (drv_data->write(drv_data));
667

668 669 670 671
	if (drv_data->read(drv_data)) {
		int_transfer_complete(drv_data);
		return IRQ_HANDLED;
	}
672

673
	if (drv_data->tx == drv_data->tx_end) {
674 675 676
		u32 bytes_left;
		u32 sccr1_reg;

677
		sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
678 679 680 681
		sccr1_reg &= ~SSCR1_TIE;

		/*
		 * PXA25x_SSP has no timeout, set up rx threshould for the
L
Lucas De Marchi 已提交
682
		 * remaining RX bytes.
683
		 */
684
		if (pxa25x_ssp_comp(drv_data)) {
685
			u32 rx_thre;
686

687
			pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
688 689 690 691 692 693 694

			bytes_left = drv_data->rx_end - drv_data->rx;
			switch (drv_data->n_bytes) {
			case 4:
				bytes_left >>= 1;
			case 2:
				bytes_left >>= 1;
695
			}
696

697 698 699
			rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
			if (rx_thre > bytes_left)
				rx_thre = bytes_left;
700

701
			pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
702
		}
703
		pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
704 705
	}

S
Stephen Street 已提交
706 707
	/* We did something */
	return IRQ_HANDLED;
708 709
}

710
static irqreturn_t ssp_int(int irq, void *dev_id)
711
{
712
	struct driver_data *drv_data = dev_id;
713
	u32 sccr1_reg;
714 715 716
	u32 mask = drv_data->mask_sr;
	u32 status;

717 718 719 720 721 722 723 724 725
	/*
	 * The IRQ might be shared with other peripherals so we must first
	 * check that are we RPM suspended or not. If we are we assume that
	 * the IRQ was not for us (we shouldn't be RPM suspended when the
	 * interrupt is enabled).
	 */
	if (pm_runtime_suspended(&drv_data->pdev->dev))
		return IRQ_NONE;

726 727 728 729 730 731
	/*
	 * If the device is not yet in RPM suspended state and we get an
	 * interrupt that is meant for another device, check if status bits
	 * are all set to one. That means that the device is already
	 * powered off.
	 */
732
	status = pxa2xx_spi_read(drv_data, SSSR);
733 734 735
	if (status == ~0)
		return IRQ_NONE;

736
	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
737 738 739 740 741

	/* Ignore possible writes if we don't need to write */
	if (!(sccr1_reg & SSCR1_TIE))
		mask &= ~SSSR_TFS;

742 743 744 745
	/* Ignore RX timeout interrupt if it is disabled */
	if (!(sccr1_reg & SSCR1_TINTE))
		mask &= ~SSSR_TINT;

746 747
	if (!(status & mask))
		return IRQ_NONE;
748 749

	if (!drv_data->cur_msg) {
S
Stephen Street 已提交
750

751 752 753 754 755 756
		pxa2xx_spi_write(drv_data, SSCR0,
				 pxa2xx_spi_read(drv_data, SSCR0)
				 & ~SSCR0_SSE);
		pxa2xx_spi_write(drv_data, SSCR1,
				 pxa2xx_spi_read(drv_data, SSCR1)
				 & ~drv_data->int_cr1);
757
		if (!pxa25x_ssp_comp(drv_data))
758
			pxa2xx_spi_write(drv_data, SSTO, 0);
759
		write_SSSR_CS(drv_data, drv_data->clear_sr);
S
Stephen Street 已提交
760

761 762
		dev_err(&drv_data->pdev->dev,
			"bad message state in interrupt handler\n");
S
Stephen Street 已提交
763

764 765 766 767 768 769 770
		/* Never fail */
		return IRQ_HANDLED;
	}

	return drv_data->transfer_handler(drv_data);
}

771
/*
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
 * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
 * input frequency by fractions of 2^24. It also has a divider by 5.
 *
 * There are formulas to get baud rate value for given input frequency and
 * divider parameters, such as DDS_CLK_RATE and SCR:
 *
 * Fsys = 200MHz
 *
 * Fssp = Fsys * DDS_CLK_RATE / 2^24			(1)
 * Baud rate = Fsclk = Fssp / (2 * (SCR + 1))		(2)
 *
 * DDS_CLK_RATE either 2^n or 2^n / 5.
 * SCR is in range 0 .. 255
 *
 * Divisor = 5^i * 2^j * 2 * k
 *       i = [0, 1]      i = 1 iff j = 0 or j > 3
 *       j = [0, 23]     j = 0 iff i = 1
 *       k = [1, 256]
 * Special case: j = 0, i = 1: Divisor = 2 / 5
 *
 * Accordingly to the specification the recommended values for DDS_CLK_RATE
 * are:
 *	Case 1:		2^n, n = [0, 23]
 *	Case 2:		2^24 * 2 / 5 (0x666666)
 *	Case 3:		less than or equal to 2^24 / 5 / 16 (0x33333)
 *
 * In all cases the lowest possible value is better.
 *
 * The function calculates parameters for all cases and chooses the one closest
 * to the asked baud rate.
802
 */
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
{
	unsigned long xtal = 200000000;
	unsigned long fref = xtal / 2;		/* mandatory division by 2,
						   see (2) */
						/* case 3 */
	unsigned long fref1 = fref / 2;		/* case 1 */
	unsigned long fref2 = fref * 2 / 5;	/* case 2 */
	unsigned long scale;
	unsigned long q, q1, q2;
	long r, r1, r2;
	u32 mul;

	/* Case 1 */

	/* Set initial value for DDS_CLK_RATE */
	mul = (1 << 24) >> 1;

	/* Calculate initial quot */
822
	q1 = DIV_ROUND_UP(fref1, rate);
823 824 825 826 827 828 829 830

	/* Scale q1 if it's too big */
	if (q1 > 256) {
		/* Scale q1 to range [1, 512] */
		scale = fls_long(q1 - 1);
		if (scale > 9) {
			q1 >>= scale - 9;
			mul >>= scale - 9;
831
		}
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846

		/* Round the result if we have a remainder */
		q1 += q1 & 1;
	}

	/* Decrease DDS_CLK_RATE as much as we can without loss in precision */
	scale = __ffs(q1);
	q1 >>= scale;
	mul >>= scale;

	/* Get the remainder */
	r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);

	/* Case 2 */

847
	q2 = DIV_ROUND_UP(fref2, rate);
848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
	r2 = abs(fref2 / q2 - rate);

	/*
	 * Choose the best between two: less remainder we have the better. We
	 * can't go case 2 if q2 is greater than 256 since SCR register can
	 * hold only values 0 .. 255.
	 */
	if (r2 >= r1 || q2 > 256) {
		/* case 1 is better */
		r = r1;
		q = q1;
	} else {
		/* case 2 is better */
		r = r2;
		q = q2;
		mul = (1 << 24) * 2 / 5;
864 865
	}

866
	/* Check case 3 only if the divisor is big enough */
867 868 869 870 871
	if (fref / rate >= 80) {
		u64 fssp;
		u32 m;

		/* Calculate initial quot */
872
		q1 = DIV_ROUND_UP(fref, rate);
873 874 875 876 877 878 879 880 881 882 883 884 885 886
		m = (1 << 24) / q1;

		/* Get the remainder */
		fssp = (u64)fref * m;
		do_div(fssp, 1 << 24);
		r1 = abs(fssp - rate);

		/* Choose this one if it suits better */
		if (r1 < r) {
			/* case 3 is better */
			q = 1;
			mul = m;
		}
	}
887

888 889
	*dds = mul;
	return q - 1;
890 891
}

892
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
893
{
894
	unsigned long ssp_clk = drv_data->master->max_speed_hz;
895 896 897
	const struct ssp_device *ssp = drv_data->ssp;

	rate = min_t(int, ssp_clk, rate);
898

899
	if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
900
		return (ssp_clk / (2 * rate) - 1) & 0xff;
901
	else
902
		return (ssp_clk / rate - 1) & 0xfff;
903 904
}

905
static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
906
					   int rate)
907
{
908
	struct chip_data *chip = drv_data->cur_chip;
909
	unsigned int clk_div;
910 911 912

	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
913
		clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
914
		break;
915
	default:
916
		clk_div = ssp_get_clk_div(drv_data, rate);
917
		break;
918
	}
919
	return clk_div << 8;
920 921
}

922 923 924 925 926 927 928 929 930 931 932
static bool pxa2xx_spi_can_dma(struct spi_master *master,
			       struct spi_device *spi,
			       struct spi_transfer *xfer)
{
	struct chip_data *chip = spi_get_ctldata(spi);

	return chip->enable_dma &&
	       xfer->len <= MAX_DMA_LEN &&
	       xfer->len >= chip->dma_burst_size;
}

933 934 935
static void pump_transfers(unsigned long data)
{
	struct driver_data *drv_data = (struct driver_data *)data;
936
	struct spi_master *master = drv_data->master;
937 938 939 940 941 942 943
	struct spi_message *message;
	struct spi_transfer *transfer;
	struct spi_transfer *previous;
	struct chip_data *chip;
	u32 clk_div;
	u8 bits;
	u32 speed;
944
	u32 cr0;
945 946 947
	u32 cr1;
	u32 dma_thresh = drv_data->cur_chip->dma_threshold;
	u32 dma_burst = drv_data->cur_chip->dma_burst_size;
948
	u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
949
	int err;
950
	int dma_mapped;
951 952 953 954 955 956 957 958 959

	/* Get current state information */
	message = drv_data->cur_msg;
	transfer = drv_data->cur_transfer;
	chip = drv_data->cur_chip;

	/* Handle for abort */
	if (message->state == ERROR_STATE) {
		message->status = -EIO;
S
Stephen Street 已提交
960
		giveback(drv_data);
961 962 963 964 965 966
		return;
	}

	/* Handle end of message */
	if (message->state == DONE_STATE) {
		message->status = 0;
S
Stephen Street 已提交
967
		giveback(drv_data);
968 969 970
		return;
	}

N
Ned Forrester 已提交
971
	/* Delay if requested at end of transfer before CS change */
972 973 974 975 976 977
	if (message->state == RUNNING_STATE) {
		previous = list_entry(transfer->transfer_list.prev,
					struct spi_transfer,
					transfer_list);
		if (previous->delay_usecs)
			udelay(previous->delay_usecs);
N
Ned Forrester 已提交
978 979 980

		/* Drop chip select only if cs_change is requested */
		if (previous->cs_change)
981
			cs_deassert(drv_data);
982 983
	}

984
	/* Check if we can DMA this transfer */
985
	if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
N
Ned Forrester 已提交
986 987 988 989 990

		/* reject already-mapped transfers; PIO won't always work */
		if (message->is_dma_mapped
				|| transfer->rx_dma || transfer->tx_dma) {
			dev_err(&drv_data->pdev->dev,
991 992
				"pump_transfers: mapped transfer length of "
				"%u is greater than %d\n",
N
Ned Forrester 已提交
993 994 995 996 997 998 999
				transfer->len, MAX_DMA_LEN);
			message->status = -EINVAL;
			giveback(drv_data);
			return;
		}

		/* warn ... we force this to PIO mode */
1000 1001 1002 1003
		dev_warn_ratelimited(&message->spi->dev,
				     "pump_transfers: DMA disabled for transfer length %ld "
				     "greater than %d\n",
				     (long)drv_data->len, MAX_DMA_LEN);
1004 1005
	}

1006
	/* Setup the transfer state based on the type of transfer */
1007
	if (pxa2xx_spi_flush(drv_data) == 0) {
1008 1009
		dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
		message->status = -EIO;
S
Stephen Street 已提交
1010
		giveback(drv_data);
1011 1012
		return;
	}
1013
	drv_data->n_bytes = chip->n_bytes;
1014 1015 1016 1017
	drv_data->tx = (void *)transfer->tx_buf;
	drv_data->tx_end = drv_data->tx + transfer->len;
	drv_data->rx = transfer->rx_buf;
	drv_data->rx_end = drv_data->rx + transfer->len;
1018
	drv_data->len = transfer->len;
1019 1020
	drv_data->write = drv_data->tx ? chip->write : null_writer;
	drv_data->read = drv_data->rx ? chip->read : null_reader;
1021 1022

	/* Change speed and bit per word on a per transfer */
1023 1024 1025
	bits = transfer->bits_per_word;
	speed = transfer->speed_hz;

1026
	clk_div = pxa2xx_ssp_get_clk_div(drv_data, speed);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

	if (bits <= 8) {
		drv_data->n_bytes = 1;
		drv_data->read = drv_data->read != null_reader ?
					u8_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u8_writer : null_writer;
	} else if (bits <= 16) {
		drv_data->n_bytes = 2;
		drv_data->read = drv_data->read != null_reader ?
					u16_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u16_writer : null_writer;
	} else if (bits <= 32) {
		drv_data->n_bytes = 4;
		drv_data->read = drv_data->read != null_reader ?
					u32_reader : null_reader;
		drv_data->write = drv_data->write != null_writer ?
					u32_writer : null_writer;
1046
	}
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
	/*
	 * if bits/word is changed in dma mode, then must check the
	 * thresholds and burst also
	 */
	if (chip->enable_dma) {
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
						message->spi,
						bits, &dma_burst,
						&dma_thresh))
			dev_warn_ratelimited(&message->spi->dev,
					     "pump_transfers: DMA burst size reduced to match bits_per_word\n");
1058 1059
	}

1060 1061
	message->state = RUNNING_STATE;

1062 1063 1064 1065
	dma_mapped = master->can_dma &&
		     master->can_dma(master, message->spi, transfer) &&
		     master->cur_msg_mapped;
	if (dma_mapped) {
1066 1067

		/* Ensure we have the correct interrupt handler */
1068 1069
		drv_data->transfer_handler = pxa2xx_spi_dma_transfer;

1070 1071 1072 1073 1074 1075
		err = pxa2xx_spi_dma_prepare(drv_data, dma_burst);
		if (err) {
			message->status = err;
			giveback(drv_data);
			return;
		}
1076

1077 1078
		/* Clear status and start DMA engine */
		cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
1079
		pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
1080 1081

		pxa2xx_spi_dma_start(drv_data);
1082 1083 1084 1085
	} else {
		/* Ensure we have the correct interrupt handler	*/
		drv_data->transfer_handler = interrupt_transfer;

1086 1087
		/* Clear status  */
		cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
1088
		write_SSSR_CS(drv_data, drv_data->clear_sr);
1089 1090
	}

1091 1092 1093 1094
	/* NOTE:  PXA25x_SSP _could_ use external clocking ... */
	cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
	if (!pxa25x_ssp_comp(drv_data))
		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1095
			master->max_speed_hz
1096
				/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
1097
			dma_mapped ? "DMA" : "PIO");
1098 1099
	else
		dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1100
			master->max_speed_hz / 2
1101
				/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1102
			dma_mapped ? "DMA" : "PIO");
1103

1104
	if (is_lpss_ssp(drv_data)) {
1105 1106 1107 1108 1109 1110 1111 1112
		if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
		    != chip->lpss_rx_threshold)
			pxa2xx_spi_write(drv_data, SSIRF,
					 chip->lpss_rx_threshold);
		if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
		    != chip->lpss_tx_threshold)
			pxa2xx_spi_write(drv_data, SSITF,
					 chip->lpss_tx_threshold);
1113 1114
	}

1115
	if (is_quark_x1000_ssp(drv_data) &&
1116 1117
	    (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
		pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
1118

1119
	/* see if we need to reload the config registers */
1120 1121 1122
	if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
	    || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
	    != (cr1 & change_mask)) {
1123
		/* stop the SSP, and update the other bits */
1124
		pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
1125
		if (!pxa25x_ssp_comp(drv_data))
1126
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1127
		/* first set CR1 without interrupt and service enables */
1128
		pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
1129
		/* restart the SSP */
1130
		pxa2xx_spi_write(drv_data, SSCR0, cr0);
1131

1132
	} else {
1133
		if (!pxa25x_ssp_comp(drv_data))
1134
			pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
1135
	}
1136

1137
	cs_assert(drv_data);
1138 1139 1140

	/* after chip select, release the data by enabling service
	 * requests and interrupts, without changing any mode bits */
1141
	pxa2xx_spi_write(drv_data, SSCR1, cr1);
1142 1143
}

1144 1145
static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
					   struct spi_message *msg)
1146
{
1147
	struct driver_data *drv_data = spi_master_get_devdata(master);
1148

1149
	drv_data->cur_msg = msg;
1150 1151 1152 1153 1154 1155
	/* Initial message state*/
	drv_data->cur_msg->state = START_STATE;
	drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
						struct spi_transfer,
						transfer_list);

1156 1157
	/* prepare to setup the SSP, in pump_transfers, using the per
	 * chip configuration */
1158 1159 1160 1161 1162 1163 1164
	drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);

	/* Mark as busy and launch transfers */
	tasklet_schedule(&drv_data->pump_transfers);
	return 0;
}

1165 1166 1167 1168 1169
static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
	struct driver_data *drv_data = spi_master_get_devdata(master);

	/* Disable the SSP now */
1170 1171
	pxa2xx_spi_write(drv_data, SSCR0,
			 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
1172 1173 1174 1175

	return 0;
}

1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
		    struct pxa2xx_spi_chip *chip_info)
{
	int err = 0;

	if (chip == NULL || chip_info == NULL)
		return 0;

	/* NOTE: setup() can be called multiple times, possibly with
	 * different chip_info, release previously requested GPIO
	 */
	if (gpio_is_valid(chip->gpio_cs))
		gpio_free(chip->gpio_cs);

	/* If (*cs_control) is provided, ignore GPIO chip select */
	if (chip_info->cs_control) {
		chip->cs_control = chip_info->cs_control;
		return 0;
	}

	if (gpio_is_valid(chip_info->gpio_cs)) {
		err = gpio_request(chip_info->gpio_cs, "SPI_CS");
		if (err) {
1199 1200
			dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
				chip_info->gpio_cs);
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
			return err;
		}

		chip->gpio_cs = chip_info->gpio_cs;
		chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;

		err = gpio_direction_output(chip->gpio_cs,
					!chip->gpio_cs_inverted);
	}

	return err;
}

1214 1215
static int setup(struct spi_device *spi)
{
1216
	struct pxa2xx_spi_chip *chip_info;
1217
	struct chip_data *chip;
1218
	const struct lpss_config *config;
1219
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1220 1221
	uint tx_thres, tx_hi_thres, rx_thres;

1222 1223 1224 1225 1226 1227
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		tx_thres = TX_THRESH_QUARK_X1000_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_QUARK_X1000_DFLT;
		break;
1228 1229
	case LPSS_LPT_SSP:
	case LPSS_BYT_SSP:
1230
	case LPSS_BSW_SSP:
1231
	case LPSS_SPT_SSP:
1232
	case LPSS_BXT_SSP:
1233 1234 1235 1236
		config = lpss_get_config(drv_data);
		tx_thres = config->tx_threshold_lo;
		tx_hi_thres = config->tx_threshold_hi;
		rx_thres = config->rx_threshold;
1237 1238
		break;
	default:
1239 1240 1241
		tx_thres = TX_THRESH_DFLT;
		tx_hi_thres = 0;
		rx_thres = RX_THRESH_DFLT;
1242
		break;
1243
	}
1244

1245
	/* Only alloc on first setup */
1246
	chip = spi_get_ctldata(spi);
1247
	if (!chip) {
1248
		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1249
		if (!chip)
1250 1251
			return -ENOMEM;

1252 1253
		if (drv_data->ssp_type == CE4100_SSP) {
			if (spi->chip_select > 4) {
1254 1255
				dev_err(&spi->dev,
					"failed setup: cs number must not be > 4.\n");
1256 1257 1258 1259 1260 1261 1262
				kfree(chip);
				return -EINVAL;
			}

			chip->frm = spi->chip_select;
		} else
			chip->gpio_cs = -1;
1263
		chip->enable_dma = drv_data->master_info->enable_dma;
1264
		chip->timeout = TIMOUT_DFLT;
1265 1266
	}

1267 1268 1269 1270
	/* protocol drivers may change the chip settings, so...
	 * if chip_info exists, use it */
	chip_info = spi->controller_data;

1271
	/* chip_info isn't always needed */
1272
	chip->cr1 = 0;
1273
	if (chip_info) {
1274 1275 1276 1277
		if (chip_info->timeout)
			chip->timeout = chip_info->timeout;
		if (chip_info->tx_threshold)
			tx_thres = chip_info->tx_threshold;
1278 1279
		if (chip_info->tx_hi_threshold)
			tx_hi_thres = chip_info->tx_hi_threshold;
1280 1281
		if (chip_info->rx_threshold)
			rx_thres = chip_info->rx_threshold;
1282 1283 1284 1285 1286
		chip->dma_threshold = 0;
		if (chip_info->enable_loopback)
			chip->cr1 = SSCR1_LBM;
	}

1287 1288 1289 1290
	chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
	chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
				| SSITF_TxHiThresh(tx_hi_thres);

1291 1292 1293 1294 1295
	/* set dma burst and threshold outside of chip_info path so that if
	 * chip_info goes away after setting chip->enable_dma, the
	 * burst and threshold can still respond to changes in bits_per_word */
	if (chip->enable_dma) {
		/* set up legal burst and threshold for dma */
1296 1297
		if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
						spi->bits_per_word,
1298 1299
						&chip->dma_burst_size,
						&chip->dma_threshold)) {
1300 1301
			dev_warn(&spi->dev,
				 "in setup: DMA burst size reduced to match bits_per_word\n");
1302 1303 1304
		}
	}

1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
		chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
				   & QUARK_X1000_SSCR1_RFT)
				   | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
				   & QUARK_X1000_SSCR1_TFT);
		break;
	default:
		chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
			(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
		break;
	}

1318 1319 1320
	chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
	chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
			| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1321

1322 1323 1324
	if (spi->mode & SPI_LOOP)
		chip->cr1 |= SSCR1_LBM;

1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
	if (spi->bits_per_word <= 8) {
		chip->n_bytes = 1;
		chip->read = u8_reader;
		chip->write = u8_writer;
	} else if (spi->bits_per_word <= 16) {
		chip->n_bytes = 2;
		chip->read = u16_reader;
		chip->write = u16_writer;
	} else if (spi->bits_per_word <= 32) {
		chip->n_bytes = 4;
		chip->read = u32_reader;
		chip->write = u32_writer;
	}

	spi_set_ctldata(spi, chip);

1341 1342 1343
	if (drv_data->ssp_type == CE4100_SSP)
		return 0;

1344
	return setup_cs(spi, chip, chip_info);
1345 1346
}

1347
static void cleanup(struct spi_device *spi)
1348
{
1349
	struct chip_data *chip = spi_get_ctldata(spi);
1350
	struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1351

1352 1353 1354
	if (!chip)
		return;

1355
	if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
1356 1357
		gpio_free(chip->gpio_cs);

1358 1359 1360
	kfree(chip);
}

1361
#ifdef CONFIG_PCI
1362
#ifdef CONFIG_ACPI
1363

1364
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1365 1366 1367 1368 1369
	{ "INT33C0", LPSS_LPT_SSP },
	{ "INT33C1", LPSS_LPT_SSP },
	{ "INT3430", LPSS_LPT_SSP },
	{ "INT3431", LPSS_LPT_SSP },
	{ "80860F0E", LPSS_BYT_SSP },
1370
	{ "8086228E", LPSS_BSW_SSP },
1371 1372 1373 1374
	{ },
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);

1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
	unsigned int devid;
	int port_id = -1;

	if (adev && adev->pnp.unique_id &&
	    !kstrtouint(adev->pnp.unique_id, 0, &devid))
		port_id = devid;
	return port_id;
}
#else /* !CONFIG_ACPI */
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
	return -1;
}
#endif

1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
/*
 * PCI IDs of compound devices that integrate both host controller and private
 * integrated DMA engine. Please note these are not used in module
 * autoloading and probing in this module but matching the LPSS SSP type.
 */
static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
	/* SPT-LP */
	{ PCI_VDEVICE(INTEL, 0x9d29), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0x9d2a), LPSS_SPT_SSP },
	/* SPT-H */
	{ PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
1404 1405 1406
	/* KBL-H */
	{ PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP },
	{ PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP },
1407
	/* BXT A-Step */
1408 1409 1410
	{ PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
1411 1412 1413 1414
	/* BXT B-Step */
	{ PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
1415 1416 1417 1418
	/* APL */
	{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
	{ PCI_VDEVICE(INTEL, 0x5ac6), LPSS_BXT_SSP },
1419
	{ },
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431
};

static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
	struct device *dev = param;

	if (dev != chan->device->dev->parent)
		return false;

	return true;
}

1432
static struct pxa2xx_spi_master *
1433
pxa2xx_spi_init_pdata(struct platform_device *pdev)
1434 1435 1436 1437 1438
{
	struct pxa2xx_spi_master *pdata;
	struct acpi_device *adev;
	struct ssp_device *ssp;
	struct resource *res;
1439 1440
	const struct acpi_device_id *adev_id = NULL;
	const struct pci_device_id *pcidev_id = NULL;
1441
	int type;
1442

1443
	adev = ACPI_COMPANION(&pdev->dev);
1444

1445 1446 1447
	if (dev_is_pci(pdev->dev.parent))
		pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
					 to_pci_dev(pdev->dev.parent));
1448
	else if (adev)
1449 1450
		adev_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
					    &pdev->dev);
1451 1452
	else
		return NULL;
1453 1454 1455 1456 1457

	if (adev_id)
		type = (int)adev_id->driver_data;
	else if (pcidev_id)
		type = (int)pcidev_id->driver_data;
1458 1459 1460
	else
		return NULL;

1461
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1462
	if (!pdata)
1463 1464 1465 1466 1467 1468 1469 1470 1471
		return NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return NULL;

	ssp = &pdata->ssp;

	ssp->phys_base = res->start;
1472 1473
	ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ssp->mmio_base))
1474
		return NULL;
1475

1476 1477 1478 1479 1480 1481
	if (pcidev_id) {
		pdata->tx_param = pdev->dev.parent;
		pdata->rx_param = pdev->dev.parent;
		pdata->dma_filter = pxa2xx_spi_idma_filter;
	}

1482 1483
	ssp->clk = devm_clk_get(&pdev->dev, NULL);
	ssp->irq = platform_get_irq(pdev, 0);
1484
	ssp->type = type;
1485
	ssp->pdev = pdev;
1486
	ssp->port_id = pxa2xx_spi_get_port_id(adev);
1487 1488

	pdata->num_chipselect = 1;
1489
	pdata->enable_dma = true;
1490 1491 1492 1493

	return pdata;
}

1494
#else /* !CONFIG_PCI */
1495
static inline struct pxa2xx_spi_master *
1496
pxa2xx_spi_init_pdata(struct platform_device *pdev)
1497 1498 1499 1500 1501
{
	return NULL;
}
#endif

1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513
static int pxa2xx_spi_fw_translate_cs(struct spi_master *master, unsigned cs)
{
	struct driver_data *drv_data = spi_master_get_devdata(master);

	if (has_acpi_companion(&drv_data->pdev->dev)) {
		switch (drv_data->ssp_type) {
		/*
		 * For Atoms the ACPI DeviceSelection used by the Windows
		 * driver starts from 1 instead of 0 so translate it here
		 * to match what Linux expects.
		 */
		case LPSS_BYT_SSP:
1514
		case LPSS_BSW_SSP:
1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
			return cs - 1;

		default:
			break;
		}
	}

	return cs;
}

1525
static int pxa2xx_spi_probe(struct platform_device *pdev)
1526 1527 1528 1529
{
	struct device *dev = &pdev->dev;
	struct pxa2xx_spi_master *platform_info;
	struct spi_master *master;
G
Guennadi Liakhovetski 已提交
1530
	struct driver_data *drv_data;
1531
	struct ssp_device *ssp;
1532
	const struct lpss_config *config;
G
Guennadi Liakhovetski 已提交
1533
	int status;
1534
	u32 tmp;
1535

1536 1537
	platform_info = dev_get_platdata(dev);
	if (!platform_info) {
1538
		platform_info = pxa2xx_spi_init_pdata(pdev);
1539 1540 1541 1542
		if (!platform_info) {
			dev_err(&pdev->dev, "missing platform data\n");
			return -ENODEV;
		}
1543
	}
1544

H
Haojian Zhuang 已提交
1545
	ssp = pxa_ssp_request(pdev->id, pdev->name);
1546 1547 1548 1549 1550
	if (!ssp)
		ssp = &platform_info->ssp;

	if (!ssp->mmio_base) {
		dev_err(&pdev->dev, "failed to get ssp\n");
1551 1552 1553
		return -ENODEV;
	}

1554
	master = spi_alloc_master(dev, sizeof(struct driver_data));
1555
	if (!master) {
G
Guennadi Liakhovetski 已提交
1556
		dev_err(&pdev->dev, "cannot alloc spi_master\n");
H
Haojian Zhuang 已提交
1557
		pxa_ssp_free(ssp);
1558 1559 1560 1561 1562 1563
		return -ENOMEM;
	}
	drv_data = spi_master_get_devdata(master);
	drv_data->master = master;
	drv_data->master_info = platform_info;
	drv_data->pdev = pdev;
1564
	drv_data->ssp = ssp;
1565

1566
	master->dev.of_node = pdev->dev.of_node;
1567
	/* the spi->mode bits understood by this driver: */
1568
	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1569

1570
	master->bus_num = ssp->port_id;
1571
	master->dma_alignment = DMA_ALIGNMENT;
1572 1573
	master->cleanup = cleanup;
	master->setup = setup;
1574
	master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1575
	master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1576
	master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
1577
	master->auto_runtime_pm = true;
1578
	master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
1579

1580
	drv_data->ssp_type = ssp->type;
1581

1582 1583
	drv_data->ioaddr = ssp->mmio_base;
	drv_data->ssdr_physical = ssp->phys_base + SSDR;
1584
	if (pxa25x_ssp_comp(drv_data)) {
1585 1586 1587 1588 1589 1590 1591 1592 1593
		switch (drv_data->ssp_type) {
		case QUARK_X1000_SSP:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
			break;
		default:
			master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
			break;
		}

1594 1595 1596 1597 1598
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
		drv_data->dma_cr1 = 0;
		drv_data->clear_sr = SSSR_ROR;
		drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
	} else {
1599
		master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1600
		drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1601
		drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1602 1603 1604 1605
		drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
		drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
	}

1606 1607
	status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
			drv_data);
1608
	if (status < 0) {
G
Guennadi Liakhovetski 已提交
1609
		dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1610 1611 1612 1613 1614
		goto out_error_master_alloc;
	}

	/* Setup DMA if requested */
	if (platform_info->enable_dma) {
1615 1616
		status = pxa2xx_spi_dma_setup(drv_data);
		if (status) {
1617
			dev_dbg(dev, "no DMA channels available, using PIO\n");
1618
			platform_info->enable_dma = false;
1619 1620
		} else {
			master->can_dma = pxa2xx_spi_can_dma;
1621 1622 1623 1624
		}
	}

	/* Enable SOC clock */
1625 1626
	clk_prepare_enable(ssp->clk);

1627
	master->max_speed_hz = clk_get_rate(ssp->clk);
1628 1629

	/* Load default SSP configuration */
1630
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1631 1632
	switch (drv_data->ssp_type) {
	case QUARK_X1000_SSP:
1633 1634 1635
		tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
		      | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
1636 1637

		/* using the Motorola SPI protocol and use 8 bit frame */
1638 1639 1640
		pxa2xx_spi_write(drv_data, SSCR0,
				 QUARK_X1000_SSCR0_Motorola
				 | QUARK_X1000_SSCR0_DataSize(8));
1641 1642
		break;
	default:
1643 1644 1645 1646 1647
		tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
		      SSCR1_TxTresh(TX_THRESH_DFLT);
		pxa2xx_spi_write(drv_data, SSCR1, tmp);
		tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
		pxa2xx_spi_write(drv_data, SSCR0, tmp);
1648 1649 1650
		break;
	}

1651
	if (!pxa25x_ssp_comp(drv_data))
1652
		pxa2xx_spi_write(drv_data, SSTO, 0);
1653 1654

	if (!is_quark_x1000_ssp(drv_data))
1655
		pxa2xx_spi_write(drv_data, SSPSP, 0);
1656

1657 1658 1659 1660 1661 1662 1663 1664 1665
	if (is_lpss_ssp(drv_data)) {
		lpss_ssp_setup(drv_data);
		config = lpss_get_config(drv_data);
		if (config->reg_capabilities >= 0) {
			tmp = __lpss_ssp_read_priv(drv_data,
						   config->reg_capabilities);
			tmp &= LPSS_CAPS_CS_EN_MASK;
			tmp >>= LPSS_CAPS_CS_EN_SHIFT;
			platform_info->num_chipselect = ffz(tmp);
1666 1667
		} else if (config->cs_num) {
			platform_info->num_chipselect = config->cs_num;
1668 1669 1670 1671
		}
	}
	master->num_chipselect = platform_info->num_chipselect;

1672 1673
	tasklet_init(&drv_data->pump_transfers, pump_transfers,
		     (unsigned long)drv_data);
1674

1675 1676 1677 1678 1679
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

1680 1681
	/* Register with the SPI framework */
	platform_set_drvdata(pdev, drv_data);
1682
	status = devm_spi_register_master(&pdev->dev, master);
1683 1684
	if (status != 0) {
		dev_err(&pdev->dev, "problem registering spi master\n");
1685
		goto out_error_clock_enabled;
1686 1687 1688 1689 1690
	}

	return status;

out_error_clock_enabled:
1691
	clk_disable_unprepare(ssp->clk);
1692
	pxa2xx_spi_dma_release(drv_data);
1693
	free_irq(ssp->irq, drv_data);
1694 1695 1696

out_error_master_alloc:
	spi_master_put(master);
H
Haojian Zhuang 已提交
1697
	pxa_ssp_free(ssp);
1698 1699 1700 1701 1702 1703
	return status;
}

static int pxa2xx_spi_remove(struct platform_device *pdev)
{
	struct driver_data *drv_data = platform_get_drvdata(pdev);
1704
	struct ssp_device *ssp;
1705 1706 1707

	if (!drv_data)
		return 0;
1708
	ssp = drv_data->ssp;
1709

1710 1711
	pm_runtime_get_sync(&pdev->dev);

1712
	/* Disable the SSP at the peripheral and SOC level */
1713
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1714
	clk_disable_unprepare(ssp->clk);
1715 1716

	/* Release DMA */
1717 1718
	if (drv_data->master_info->enable_dma)
		pxa2xx_spi_dma_release(drv_data);
1719

1720 1721 1722
	pm_runtime_put_noidle(&pdev->dev);
	pm_runtime_disable(&pdev->dev);

1723
	/* Release IRQ */
1724 1725 1726
	free_irq(ssp->irq, drv_data);

	/* Release SSP */
H
Haojian Zhuang 已提交
1727
	pxa_ssp_free(ssp);
1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739

	return 0;
}

static void pxa2xx_spi_shutdown(struct platform_device *pdev)
{
	int status = 0;

	if ((status = pxa2xx_spi_remove(pdev)) != 0)
		dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}

1740
#ifdef CONFIG_PM_SLEEP
1741
static int pxa2xx_spi_suspend(struct device *dev)
1742
{
1743
	struct driver_data *drv_data = dev_get_drvdata(dev);
1744
	struct ssp_device *ssp = drv_data->ssp;
1745
	int status;
1746

1747
	status = spi_master_suspend(drv_data->master);
1748 1749
	if (status != 0)
		return status;
1750
	pxa2xx_spi_write(drv_data, SSCR0, 0);
1751 1752 1753

	if (!pm_runtime_suspended(dev))
		clk_disable_unprepare(ssp->clk);
1754 1755 1756 1757

	return 0;
}

1758
static int pxa2xx_spi_resume(struct device *dev)
1759
{
1760
	struct driver_data *drv_data = dev_get_drvdata(dev);
1761
	struct ssp_device *ssp = drv_data->ssp;
1762
	int status;
1763 1764

	/* Enable the SSP clock */
1765 1766
	if (!pm_runtime_suspended(dev))
		clk_prepare_enable(ssp->clk);
1767

1768
	/* Restore LPSS private register bits */
1769 1770
	if (is_lpss_ssp(drv_data))
		lpss_ssp_setup(drv_data);
1771

1772
	/* Start the queue running */
1773
	status = spi_master_resume(drv_data->master);
1774
	if (status != 0) {
1775
		dev_err(dev, "problem starting queue (%d)\n", status);
1776 1777 1778 1779 1780
		return status;
	}

	return 0;
}
1781 1782
#endif

1783
#ifdef CONFIG_PM
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799
static int pxa2xx_spi_runtime_suspend(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_disable_unprepare(drv_data->ssp->clk);
	return 0;
}

static int pxa2xx_spi_runtime_resume(struct device *dev)
{
	struct driver_data *drv_data = dev_get_drvdata(dev);

	clk_prepare_enable(drv_data->ssp->clk);
	return 0;
}
#endif
1800

1801
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1802 1803 1804
	SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
	SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
			   pxa2xx_spi_runtime_resume, NULL)
1805
};
1806 1807 1808

static struct platform_driver driver = {
	.driver = {
1809 1810
		.name	= "pxa2xx-spi",
		.pm	= &pxa2xx_spi_pm_ops,
1811
		.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
1812
	},
1813
	.probe = pxa2xx_spi_probe,
1814
	.remove = pxa2xx_spi_remove,
1815 1816 1817 1818 1819
	.shutdown = pxa2xx_spi_shutdown,
};

static int __init pxa2xx_spi_init(void)
{
1820
	return platform_driver_register(&driver);
1821
}
A
Antonio Ospite 已提交
1822
subsys_initcall(pxa2xx_spi_init);
1823 1824 1825 1826 1827 1828

static void __exit pxa2xx_spi_exit(void)
{
	platform_driver_unregister(&driver);
}
module_exit(pxa2xx_spi_exit);