pxa3xx_nand.c 51.7 KB
Newer Older
E
eric miao 已提交
1 2 3 4 5 6 7 8 9
/*
 * drivers/mtd/nand/pxa3xx_nand.c
 *
 * Copyright © 2005 Intel Corporation
 * Copyright © 2006 Marvell International Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
10 11
 *
 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
E
eric miao 已提交
12 13
 */

14
#include <linux/kernel.h>
E
eric miao 已提交
15 16 17
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
18
#include <linux/dmaengine.h>
E
eric miao 已提交
19
#include <linux/dma-mapping.h>
20
#include <linux/dma/pxa-dma.h>
E
eric miao 已提交
21 22 23 24 25
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
26
#include <linux/io.h>
27
#include <linux/iopoll.h>
28
#include <linux/irq.h>
29
#include <linux/slab.h>
30 31
#include <linux/of.h>
#include <linux/of_device.h>
32
#include <linux/of_mtd.h>
33
#include <linux/platform_data/mtd-nand-pxa3xx.h>
E
eric miao 已提交
34

35 36
#define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
#define NAND_STOP_DELAY		msecs_to_jiffies(40)
37
#define PAGE_CHUNK_SIZE		(2048)
E
eric miao 已提交
38

39 40
/*
 * Define a buffer size for the initial command that detects the flash device:
41 42 43 44 45
 * STATUS, READID and PARAM.
 * ONFI param page is 256 bytes, and there are three redundant copies
 * to be read. JEDEC param page is 512 bytes, and there are also three
 * redundant copies to be read.
 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
46
 */
47
#define INIT_BUFFER_SIZE	2048
48

E
eric miao 已提交
49 50 51 52 53 54 55 56
/* registers and bit definitions */
#define NDCR		(0x00) /* Control register */
#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
#define NDSR		(0x14) /* Status Register */
#define NDPCR		(0x18) /* Page Count Register */
#define NDBDR0		(0x1C) /* Bad Block Register 0 */
#define NDBDR1		(0x20) /* Bad Block Register 1 */
57
#define NDECCCTRL	(0x28) /* ECC control */
E
eric miao 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
#define NDDB		(0x40) /* Data Buffer */
#define NDCB0		(0x48) /* Command Buffer0 */
#define NDCB1		(0x4C) /* Command Buffer1 */
#define NDCB2		(0x50) /* Command Buffer2 */

#define NDCR_SPARE_EN		(0x1 << 31)
#define NDCR_ECC_EN		(0x1 << 30)
#define NDCR_DMA_EN		(0x1 << 29)
#define NDCR_ND_RUN		(0x1 << 28)
#define NDCR_DWIDTH_C		(0x1 << 27)
#define NDCR_DWIDTH_M		(0x1 << 26)
#define NDCR_PAGE_SZ		(0x1 << 24)
#define NDCR_NCSX		(0x1 << 23)
#define NDCR_ND_MODE		(0x3 << 21)
#define NDCR_NAND_MODE   	(0x0)
#define NDCR_CLR_PG_CNT		(0x1 << 20)
74 75
#define NFCV1_NDCR_ARB_CNTL	(0x1 << 19)
#define NFCV2_NDCR_STOP_ON_UNCOR	(0x1 << 19)
E
eric miao 已提交
76 77 78 79 80 81
#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)

#define NDCR_RA_START		(0x1 << 15)
#define NDCR_PG_PER_BLK		(0x1 << 14)
#define NDCR_ND_ARB_EN		(0x1 << 12)
L
Lei Wen 已提交
82
#define NDCR_INT_MASK           (0xFFF)
E
eric miao 已提交
83 84

#define NDSR_MASK		(0xfff)
85 86 87
#define NDSR_ERR_CNT_OFF	(16)
#define NDSR_ERR_CNT_MASK       (0x1f)
#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
L
Lei Wen 已提交
88 89
#define NDSR_RDY                (0x1 << 12)
#define NDSR_FLASH_RDY          (0x1 << 11)
E
eric miao 已提交
90 91 92 93 94 95
#define NDSR_CS0_PAGED		(0x1 << 10)
#define NDSR_CS1_PAGED		(0x1 << 9)
#define NDSR_CS0_CMDD		(0x1 << 8)
#define NDSR_CS1_CMDD		(0x1 << 7)
#define NDSR_CS0_BBD		(0x1 << 6)
#define NDSR_CS1_BBD		(0x1 << 5)
96 97
#define NDSR_UNCORERR		(0x1 << 4)
#define NDSR_CORERR		(0x1 << 3)
E
eric miao 已提交
98 99 100 101
#define NDSR_WRDREQ		(0x1 << 2)
#define NDSR_RDDREQ		(0x1 << 1)
#define NDSR_WRCMDREQ		(0x1)

102
#define NDCB0_LEN_OVRD		(0x1 << 28)
103
#define NDCB0_ST_ROW_EN         (0x1 << 26)
E
eric miao 已提交
104 105
#define NDCB0_AUTO_RS		(0x1 << 25)
#define NDCB0_CSEL		(0x1 << 24)
106 107
#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
E
eric miao 已提交
108 109 110 111 112 113 114 115 116 117
#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
#define NDCB0_NC		(0x1 << 20)
#define NDCB0_DBC		(0x1 << 19)
#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
#define NDCB0_CMD2_MASK		(0xff << 8)
#define NDCB0_CMD1_MASK		(0xff)
#define NDCB0_ADDR_CYC_SHIFT	(16)

118 119 120 121 122 123 124 125
#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
#define EXT_CMD_TYPE_READ	4 /* Read */
#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
#define EXT_CMD_TYPE_FINAL	3 /* Final command */
#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */

126 127 128 129 130 131 132
/*
 * This should be large enough to read 'ONFI' and 'JEDEC'.
 * Let's use 7 bytes, which is the maximum ID count supported
 * by the controller (see NDCR_RD_ID_CNT_MASK).
 */
#define READ_ID_BYTES		7

E
eric miao 已提交
133 134
/* macros for registers read/write */
#define nand_writel(info, off, val)	\
135
	writel_relaxed((val), (info)->mmio_base + (off))
E
eric miao 已提交
136 137

#define nand_readl(info, off)		\
138
	readl_relaxed((info)->mmio_base + (off))
E
eric miao 已提交
139 140 141 142 143 144

/* error code and state */
enum {
	ERR_NONE	= 0,
	ERR_DMABUSERR	= -1,
	ERR_SENDCMD	= -2,
145
	ERR_UNCORERR	= -3,
E
eric miao 已提交
146
	ERR_BBERR	= -4,
147
	ERR_CORERR	= -5,
E
eric miao 已提交
148 149 150
};

enum {
L
Lei Wen 已提交
151
	STATE_IDLE = 0,
152
	STATE_PREPARED,
E
eric miao 已提交
153 154 155 156 157 158
	STATE_CMD_HANDLE,
	STATE_DMA_READING,
	STATE_DMA_WRITING,
	STATE_DMA_DONE,
	STATE_PIO_READING,
	STATE_PIO_WRITING,
L
Lei Wen 已提交
159 160
	STATE_CMD_DONE,
	STATE_READY,
E
eric miao 已提交
161 162
};

163 164 165 166 167
enum pxa3xx_nand_variant {
	PXA3XX_NAND_VARIANT_PXA,
	PXA3XX_NAND_VARIANT_ARMADA370,
};

168 169 170 171 172 173 174
struct pxa3xx_nand_host {
	struct nand_chip	chip;
	struct mtd_info         *mtd;
	void			*info_data;

	/* page size of attached chip */
	int			use_ecc;
175
	int			cs;
E
eric miao 已提交
176

177 178 179 180 181 182
	/* calculated from pxa3xx_nand_flash data */
	unsigned int		col_addr_cycles;
	unsigned int		row_addr_cycles;
};

struct pxa3xx_nand_info {
183
	struct nand_hw_control	controller;
E
eric miao 已提交
184 185 186 187
	struct platform_device	 *pdev;

	struct clk		*clk;
	void __iomem		*mmio_base;
188
	unsigned long		mmio_phys;
189
	struct completion	cmd_complete, dev_ready;
E
eric miao 已提交
190 191 192

	unsigned int 		buf_start;
	unsigned int		buf_count;
193
	unsigned int		buf_size;
194 195
	unsigned int		data_buff_pos;
	unsigned int		oob_buff_pos;
E
eric miao 已提交
196 197

	/* DMA information */
198 199 200 201
	struct scatterlist	sg;
	enum dma_data_direction	dma_dir;
	struct dma_chan		*dma_chan;
	dma_cookie_t		dma_cookie;
E
eric miao 已提交
202 203 204 205
	int			drcmr_dat;
	int			drcmr_cmd;

	unsigned char		*data_buff;
206
	unsigned char		*oob_buff;
E
eric miao 已提交
207 208 209
	dma_addr_t 		data_buff_phys;
	int 			data_dma_ch;

210
	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
E
eric miao 已提交
211 212
	unsigned int		state;

213 214 215 216 217 218
	/*
	 * This driver supports NFCv1 (as found in PXA SoC)
	 * and NFCv2 (as found in Armada 370/XP SoC).
	 */
	enum pxa3xx_nand_variant variant;

219
	int			cs;
E
eric miao 已提交
220
	int			use_ecc;	/* use HW ECC ? */
221
	int			ecc_bch;	/* using BCH ECC? */
E
eric miao 已提交
222
	int			use_dma;	/* use DMA ? */
223
	int			use_spare;	/* use spare ? */
224
	int			need_wait;
E
eric miao 已提交
225

226
	unsigned int		data_size;	/* data to be read from FIFO */
227
	unsigned int		chunk_size;	/* split commands chunk size */
228
	unsigned int		oob_size;
229 230
	unsigned int		spare_size;
	unsigned int		ecc_size;
231 232
	unsigned int		ecc_err_cnt;
	unsigned int		max_bitflips;
E
eric miao 已提交
233 234
	int 			retcode;

235 236 237 238 239
	/* cached register value */
	uint32_t		reg_ndcr;
	uint32_t		ndtr0cs0;
	uint32_t		ndtr1cs0;

E
eric miao 已提交
240 241 242 243
	/* generated NDCBx register values */
	uint32_t		ndcb0;
	uint32_t		ndcb1;
	uint32_t		ndcb2;
244
	uint32_t		ndcb3;
E
eric miao 已提交
245 246
};

247
static bool use_dma = 1;
E
eric miao 已提交
248
module_param(use_dma, bool, 0444);
L
Lucas De Marchi 已提交
249
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
E
eric miao 已提交
250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
struct pxa3xx_nand_timing {
	unsigned int	tCH;  /* Enable signal hold time */
	unsigned int	tCS;  /* Enable signal setup time */
	unsigned int	tWH;  /* ND_nWE high duration */
	unsigned int	tWP;  /* ND_nWE pulse time */
	unsigned int	tRH;  /* ND_nRE high duration */
	unsigned int	tRP;  /* ND_nRE pulse width */
	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
};

struct pxa3xx_nand_flash {
	uint32_t	chip_id;
	unsigned int	flash_width;	/* Width of Flash memory (DWIDTH_M) */
	unsigned int	dfc_width;	/* Width of flash controller(DWIDTH_C) */
	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
};

270
static struct pxa3xx_nand_timing timing[] = {
271 272 273 274
	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
275 276
};

277
static struct pxa3xx_nand_flash builtin_flash_types[] = {
278 279 280 281 282 283 284 285
	{ 0x46ec, 16, 16, &timing[1] },
	{ 0xdaec,  8,  8, &timing[1] },
	{ 0xd7ec,  8,  8, &timing[1] },
	{ 0xa12c,  8,  8, &timing[2] },
	{ 0xb12c, 16, 16, &timing[2] },
	{ 0xdc2c,  8,  8, &timing[2] },
	{ 0xcc2c, 16, 16, &timing[2] },
	{ 0xba20, 16, 16, &timing[3] },
286 287
};

288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };

static struct nand_bbt_descr bbt_main_descr = {
	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
		| NAND_BBT_2BIT | NAND_BBT_VERSION,
	.offs =	8,
	.len = 6,
	.veroffs = 14,
	.maxblocks = 8,		/* Last 8 blocks in each chip */
	.pattern = bbt_pattern
};

static struct nand_bbt_descr bbt_mirror_descr = {
	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
		| NAND_BBT_2BIT | NAND_BBT_VERSION,
	.offs =	8,
	.len = 6,
	.veroffs = 14,
	.maxblocks = 8,		/* Last 8 blocks in each chip */
	.pattern = bbt_mirror_pattern
};

311 312 313 314 315 316 317 318 319 320
static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
	.eccbytes = 32,
	.eccpos = {
		32, 33, 34, 35, 36, 37, 38, 39,
		40, 41, 42, 43, 44, 45, 46, 47,
		48, 49, 50, 51, 52, 53, 54, 55,
		56, 57, 58, 59, 60, 61, 62, 63},
	.oobfree = { {2, 30} }
};

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
	.eccbytes = 64,
	.eccpos = {
		32,  33,  34,  35,  36,  37,  38,  39,
		40,  41,  42,  43,  44,  45,  46,  47,
		48,  49,  50,  51,  52,  53,  54,  55,
		56,  57,  58,  59,  60,  61,  62,  63,
		96,  97,  98,  99,  100, 101, 102, 103,
		104, 105, 106, 107, 108, 109, 110, 111,
		112, 113, 114, 115, 116, 117, 118, 119,
		120, 121, 122, 123, 124, 125, 126, 127},
	/* Bootrom looks in bytes 0 & 5 for bad blocks */
	.oobfree = { {6, 26}, { 64, 32} }
};

static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
	.eccbytes = 128,
	.eccpos = {
		32,  33,  34,  35,  36,  37,  38,  39,
		40,  41,  42,  43,  44,  45,  46,  47,
		48,  49,  50,  51,  52,  53,  54,  55,
		56,  57,  58,  59,  60,  61,  62,  63},
	.oobfree = { }
};

E
eric miao 已提交
346 347 348 349 350 351 352 353 354 355 356 357
#define NDTR0_tCH(c)	(min((c), 7) << 19)
#define NDTR0_tCS(c)	(min((c), 7) << 16)
#define NDTR0_tWH(c)	(min((c), 7) << 11)
#define NDTR0_tWP(c)	(min((c), 7) << 8)
#define NDTR0_tRH(c)	(min((c), 7) << 3)
#define NDTR0_tRP(c)	(min((c), 7) << 0)

#define NDTR1_tR(c)	(min((c), 65535) << 16)
#define NDTR1_tWHR(c)	(min((c), 15) << 4)
#define NDTR1_tAR(c)	(min((c), 15) << 0)

/* convert nano-seconds to nand flash controller clock cycles */
A
Axel Lin 已提交
358
#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
E
eric miao 已提交
359

360
static const struct of_device_id pxa3xx_nand_dt_ids[] = {
361 362 363 364
	{
		.compatible = "marvell,pxa3xx-nand",
		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
	},
365 366 367 368
	{
		.compatible = "marvell,armada370-nand",
		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
	},
369 370 371 372 373 374 375 376 377 378 379 380 381 382
	{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);

static enum pxa3xx_nand_variant
pxa3xx_nand_get_variant(struct platform_device *pdev)
{
	const struct of_device_id *of_id =
			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
	if (!of_id)
		return PXA3XX_NAND_VARIANT_PXA;
	return (enum pxa3xx_nand_variant)of_id->data;
}

383
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
384
				   const struct pxa3xx_nand_timing *t)
E
eric miao 已提交
385
{
386
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399 400
	unsigned long nand_clk = clk_get_rate(info->clk);
	uint32_t ndtr0, ndtr1;

	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));

	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));

401 402
	info->ndtr0cs0 = ndtr0;
	info->ndtr1cs0 = ndtr1;
E
eric miao 已提交
403 404 405 406
	nand_writel(info, NDTR0CS0, ndtr0);
	nand_writel(info, NDTR1CS0, ndtr1);
}

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
				       const struct nand_sdr_timings *t)
{
	struct pxa3xx_nand_info *info = host->info_data;
	struct nand_chip *chip = &host->chip;
	unsigned long nand_clk = clk_get_rate(info->clk);
	uint32_t ndtr0, ndtr1;

	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
	u32 tR = chip->chip_delay * 1000;
	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);

	/* fallback to a default value if tR = 0 */
	if (!tR)
		tR = 20000;

	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));

	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));

	info->ndtr0cs0 = ndtr0;
	info->ndtr1cs0 = ndtr1;
	nand_writel(info, NDTR0CS0, ndtr0);
	nand_writel(info, NDTR1CS0, ndtr1);
}

static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
					   unsigned int *flash_width,
					   unsigned int *dfc_width)
{
	struct nand_chip *chip = &host->chip;
	struct pxa3xx_nand_info *info = host->info_data;
	const struct pxa3xx_nand_flash *f = NULL;
	int i, id, ntypes;

	ntypes = ARRAY_SIZE(builtin_flash_types);

	chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);

	id = chip->read_byte(host->mtd);
	id |= chip->read_byte(host->mtd) << 0x8;

	for (i = 0; i < ntypes; i++) {
		f = &builtin_flash_types[i];

		if (f->chip_id == id)
			break;
	}

	if (i == ntypes) {
		dev_err(&info->pdev->dev, "Error: timings not found\n");
		return -EINVAL;
	}

	pxa3xx_nand_set_timing(host, f->timing);

	*flash_width = f->flash_width;
	*dfc_width = f->dfc_width;

	return 0;
}

static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
					 int mode)
{
	const struct nand_sdr_timings *timings;

	mode = fls(mode) - 1;
	if (mode < 0)
		mode = 0;

	timings = onfi_async_timing_mode_to_sdr_timings(mode);
	if (IS_ERR(timings))
		return PTR_ERR(timings);

	pxa3xx_nand_set_sdr_timing(host, timings);

	return 0;
}

static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
{
	struct nand_chip *chip = &host->chip;
	struct pxa3xx_nand_info *info = host->info_data;
	unsigned int flash_width = 0, dfc_width = 0;
	int mode, err;

	mode = onfi_get_async_timing_mode(chip);
	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
		err = pxa3xx_nand_init_timings_compat(host, &flash_width,
						      &dfc_width);
		if (err)
			return err;

		if (flash_width == 16) {
			info->reg_ndcr |= NDCR_DWIDTH_M;
			chip->options |= NAND_BUSWIDTH_16;
		}

		info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
	} else {
		err = pxa3xx_nand_init_timings_onfi(host, mode);
		if (err)
			return err;
	}

	return 0;
}

529 530 531 532 533
/*
 * Set the data and OOB size, depending on the selected
 * spare and ECC configuration.
 * Only applicable to READ0, READOOB and PAGEPROG commands.
 */
534 535
static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
				struct mtd_info *mtd)
E
eric miao 已提交
536
{
537
	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
538

539
	info->data_size = mtd->writesize;
540
	if (!oob_enable)
541 542
		return;

543 544 545
	info->oob_size = info->spare_size;
	if (!info->use_ecc)
		info->oob_size += info->ecc_size;
546 547
}

L
Lei Wen 已提交
548 549 550 551 552 553 554 555 556 557
/**
 * NOTE: it is a must to set ND_RUN firstly, then write
 * command buffer, otherwise, it does not work.
 * We enable all the interrupt at the same time, and
 * let pxa3xx_nand_irq to handle all logic.
 */
static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
{
	uint32_t ndcr;

558
	ndcr = info->reg_ndcr;
559

560
	if (info->use_ecc) {
561
		ndcr |= NDCR_ECC_EN;
562 563 564
		if (info->ecc_bch)
			nand_writel(info, NDECCCTRL, 0x1);
	} else {
565
		ndcr &= ~NDCR_ECC_EN;
566 567 568
		if (info->ecc_bch)
			nand_writel(info, NDECCCTRL, 0x0);
	}
569 570 571 572 573 574

	if (info->use_dma)
		ndcr |= NDCR_DMA_EN;
	else
		ndcr &= ~NDCR_DMA_EN;

575 576 577 578 579
	if (info->use_spare)
		ndcr |= NDCR_SPARE_EN;
	else
		ndcr &= ~NDCR_SPARE_EN;

L
Lei Wen 已提交
580 581 582 583
	ndcr |= NDCR_ND_RUN;

	/* clear status bits and run */
	nand_writel(info, NDSR, NDSR_MASK);
584
	nand_writel(info, NDCR, 0);
L
Lei Wen 已提交
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603
	nand_writel(info, NDCR, ndcr);
}

static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
{
	uint32_t ndcr;
	int timeout = NAND_STOP_DELAY;

	/* wait RUN bit in NDCR become 0 */
	ndcr = nand_readl(info, NDCR);
	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
		ndcr = nand_readl(info, NDCR);
		udelay(1);
	}

	if (timeout <= 0) {
		ndcr &= ~NDCR_ND_RUN;
		nand_writel(info, NDCR, ndcr);
	}
604 605 606
	if (info->dma_chan)
		dmaengine_terminate_all(info->dma_chan);

L
Lei Wen 已提交
607 608 609 610
	/* clear status bits */
	nand_writel(info, NDSR, NDSR_MASK);
}

611 612
static void __maybe_unused
enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
E
eric miao 已提交
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627
{
	uint32_t ndcr;

	ndcr = nand_readl(info, NDCR);
	nand_writel(info, NDCR, ndcr & ~int_mask);
}

static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
{
	uint32_t ndcr;

	ndcr = nand_readl(info, NDCR);
	nand_writel(info, NDCR, ndcr | int_mask);
}

628 629 630
static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
{
	if (info->ecc_bch) {
631 632
		u32 val;
		int ret;
633 634 635 636 637 638 639 640 641 642

		/*
		 * According to the datasheet, when reading from NDDB
		 * with BCH enabled, after each 32 bytes reads, we
		 * have to make sure that the NDSR.RDDREQ bit is set.
		 *
		 * Drain the FIFO 8 32 bits reads at a time, and skip
		 * the polling on the last read.
		 */
		while (len > 8) {
643
			ioread32_rep(info->mmio_base + NDDB, data, 8);
644

645 646 647 648 649 650
			ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
							 val & NDSR_RDDREQ, 1000, 5000);
			if (ret) {
				dev_err(&info->pdev->dev,
					"Timeout on RDDREQ while draining the FIFO\n");
				return;
651 652 653 654 655 656 657
			}

			data += 32;
			len -= 8;
		}
	}

658
	ioread32_rep(info->mmio_base + NDDB, data, len);
659 660
}

L
Lei Wen 已提交
661
static void handle_data_pio(struct pxa3xx_nand_info *info)
E
eric miao 已提交
662
{
663
	unsigned int do_bytes = min(info->data_size, info->chunk_size);
664

E
eric miao 已提交
665 666
	switch (info->state) {
	case STATE_PIO_WRITING:
667 668 669
		writesl(info->mmio_base + NDDB,
			info->data_buff + info->data_buff_pos,
			DIV_ROUND_UP(do_bytes, 4));
670

671
		if (info->oob_size > 0)
672 673 674
			writesl(info->mmio_base + NDDB,
				info->oob_buff + info->oob_buff_pos,
				DIV_ROUND_UP(info->oob_size, 4));
E
eric miao 已提交
675 676
		break;
	case STATE_PIO_READING:
677 678 679
		drain_fifo(info,
			   info->data_buff + info->data_buff_pos,
			   DIV_ROUND_UP(do_bytes, 4));
680

681
		if (info->oob_size > 0)
682 683 684
			drain_fifo(info,
				   info->oob_buff + info->oob_buff_pos,
				   DIV_ROUND_UP(info->oob_size, 4));
E
eric miao 已提交
685 686
		break;
	default:
687
		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
E
eric miao 已提交
688
				info->state);
L
Lei Wen 已提交
689
		BUG();
E
eric miao 已提交
690
	}
691 692 693 694 695

	/* Update buffer pointers for multi-page read/write */
	info->data_buff_pos += do_bytes;
	info->oob_buff_pos += info->oob_size;
	info->data_size -= do_bytes;
E
eric miao 已提交
696 697
}

698
static void pxa3xx_nand_data_dma_irq(void *data)
E
eric miao 已提交
699
{
700 701 702
	struct pxa3xx_nand_info *info = data;
	struct dma_tx_state state;
	enum dma_status status;
E
eric miao 已提交
703

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
	status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
	if (likely(status == DMA_COMPLETE)) {
		info->state = STATE_DMA_DONE;
	} else {
		dev_err(&info->pdev->dev, "DMA error on data channel\n");
		info->retcode = ERR_DMABUSERR;
	}
	dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);

	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
	enable_int(info, NDCR_INT_MASK);
}

static void start_data_dma(struct pxa3xx_nand_info *info)
{
	enum dma_transfer_direction direction;
	struct dma_async_tx_descriptor *tx;
E
eric miao 已提交
721

L
Lei Wen 已提交
722 723
	switch (info->state) {
	case STATE_DMA_WRITING:
724 725
		info->dma_dir = DMA_TO_DEVICE;
		direction = DMA_MEM_TO_DEV;
L
Lei Wen 已提交
726 727
		break;
	case STATE_DMA_READING:
728 729
		info->dma_dir = DMA_FROM_DEVICE;
		direction = DMA_DEV_TO_MEM;
L
Lei Wen 已提交
730 731
		break;
	default:
732
		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
L
Lei Wen 已提交
733 734
				info->state);
		BUG();
E
eric miao 已提交
735
	}
736 737 738 739 740 741 742 743 744
	info->sg.length = info->data_size +
		(info->oob_size ? info->spare_size + info->ecc_size : 0);
	dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);

	tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
				     DMA_PREP_INTERRUPT);
	if (!tx) {
		dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
		return;
E
eric miao 已提交
745
	}
746 747 748 749 750 751
	tx->callback = pxa3xx_nand_data_dma_irq;
	tx->callback_param = info;
	info->dma_cookie = dmaengine_submit(tx);
	dma_async_issue_pending(info->dma_chan);
	dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
		__func__, direction, info->dma_cookie, info->sg.length);
E
eric miao 已提交
752 753
}

754 755 756 757 758 759 760 761 762 763 764 765
static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
{
	struct pxa3xx_nand_info *info = data;

	handle_data_pio(info);

	info->state = STATE_CMD_DONE;
	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);

	return IRQ_HANDLED;
}

E
eric miao 已提交
766 767 768
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
	struct pxa3xx_nand_info *info = devid;
769
	unsigned int status, is_completed = 0, is_ready = 0;
770
	unsigned int ready, cmd_done;
771
	irqreturn_t ret = IRQ_HANDLED;
772 773 774 775 776 777 778 779

	if (info->cs == 0) {
		ready           = NDSR_FLASH_RDY;
		cmd_done        = NDSR_CS0_CMDD;
	} else {
		ready           = NDSR_RDY;
		cmd_done        = NDSR_CS1_CMDD;
	}
E
eric miao 已提交
780 781 782

	status = nand_readl(info, NDSR);

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
	if (status & NDSR_UNCORERR)
		info->retcode = ERR_UNCORERR;
	if (status & NDSR_CORERR) {
		info->retcode = ERR_CORERR;
		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
		    info->ecc_bch)
			info->ecc_err_cnt = NDSR_ERR_CNT(status);
		else
			info->ecc_err_cnt = 1;

		/*
		 * Each chunk composing a page is corrected independently,
		 * and we need to store maximum number of corrected bitflips
		 * to return it to the MTD layer in ecc.read_page().
		 */
		info->max_bitflips = max_t(unsigned int,
					   info->max_bitflips,
					   info->ecc_err_cnt);
	}
L
Lei Wen 已提交
802 803
	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
		/* whether use dma to transfer data */
E
eric miao 已提交
804
		if (info->use_dma) {
L
Lei Wen 已提交
805 806 807 808 809
			disable_int(info, NDCR_INT_MASK);
			info->state = (status & NDSR_RDDREQ) ?
				      STATE_DMA_READING : STATE_DMA_WRITING;
			start_data_dma(info);
			goto NORMAL_IRQ_EXIT;
E
eric miao 已提交
810
		} else {
L
Lei Wen 已提交
811 812
			info->state = (status & NDSR_RDDREQ) ?
				      STATE_PIO_READING : STATE_PIO_WRITING;
813 814
			ret = IRQ_WAKE_THREAD;
			goto NORMAL_IRQ_EXIT;
E
eric miao 已提交
815 816
		}
	}
817
	if (status & cmd_done) {
L
Lei Wen 已提交
818 819
		info->state = STATE_CMD_DONE;
		is_completed = 1;
E
eric miao 已提交
820
	}
821
	if (status & ready) {
L
Lei Wen 已提交
822
		info->state = STATE_READY;
823
		is_ready = 1;
824
	}
E
eric miao 已提交
825

826 827 828 829 830 831 832
	/*
	 * Clear all status bit before issuing the next command, which
	 * can and will alter the status bits and will deserve a new
	 * interrupt on its own. This lets the controller exit the IRQ
	 */
	nand_writel(info, NDSR, status);

L
Lei Wen 已提交
833 834 835
	if (status & NDSR_WRCMDREQ) {
		status &= ~NDSR_WRCMDREQ;
		info->state = STATE_CMD_HANDLE;
836 837 838 839 840 841 842 843 844

		/*
		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
		 * must be loaded by writing directly either 12 or 16
		 * bytes directly to NDCB0, four bytes at a time.
		 *
		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
		 * but each NDCBx register can be read.
		 */
L
Lei Wen 已提交
845 846 847
		nand_writel(info, NDCB0, info->ndcb0);
		nand_writel(info, NDCB0, info->ndcb1);
		nand_writel(info, NDCB0, info->ndcb2);
848 849 850 851

		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
			nand_writel(info, NDCB0, info->ndcb3);
E
eric miao 已提交
852 853
	}

L
Lei Wen 已提交
854 855
	if (is_completed)
		complete(&info->cmd_complete);
856 857
	if (is_ready)
		complete(&info->dev_ready);
L
Lei Wen 已提交
858
NORMAL_IRQ_EXIT:
859
	return ret;
E
eric miao 已提交
860 861 862 863 864 865 866 867 868 869
}

static inline int is_buf_blank(uint8_t *buf, size_t len)
{
	for (; len > 0; len--)
		if (*buf++ != 0xff)
			return 0;
	return 1;
}

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
static void set_command_address(struct pxa3xx_nand_info *info,
		unsigned int page_size, uint16_t column, int page_addr)
{
	/* small page addr setting */
	if (page_size < PAGE_CHUNK_SIZE) {
		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
				| (column & 0xFF);

		info->ndcb2 = 0;
	} else {
		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
				| (column & 0xFFFF);

		if (page_addr & 0xFF0000)
			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
		else
			info->ndcb2 = 0;
	}
}

890
static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
E
eric miao 已提交
891
{
892 893 894
	struct pxa3xx_nand_host *host = info->host[info->cs];
	struct mtd_info *mtd = host->mtd;

895
	/* reset data and oob column point to handle data */
896 897
	info->buf_start		= 0;
	info->buf_count		= 0;
898
	info->oob_size		= 0;
899 900
	info->data_buff_pos	= 0;
	info->oob_buff_pos	= 0;
901
	info->use_ecc		= 0;
902
	info->use_spare		= 1;
903
	info->retcode		= ERR_NONE;
904
	info->ecc_err_cnt	= 0;
905
	info->ndcb3		= 0;
906
	info->need_wait		= 0;
E
eric miao 已提交
907 908

	switch (command) {
909 910 911
	case NAND_CMD_READ0:
	case NAND_CMD_PAGEPROG:
		info->use_ecc = 1;
E
eric miao 已提交
912
	case NAND_CMD_READOOB:
913
		pxa3xx_set_datasize(info, mtd);
E
eric miao 已提交
914
		break;
915 916 917
	case NAND_CMD_PARAM:
		info->use_spare = 0;
		break;
918 919 920 921 922
	default:
		info->ndcb1 = 0;
		info->ndcb2 = 0;
		break;
	}
923 924 925 926 927 928 929 930 931 932 933 934 935

	/*
	 * If we are about to issue a read command, or about to set
	 * the write address, then clean the data buffer.
	 */
	if (command == NAND_CMD_READ0 ||
	    command == NAND_CMD_READOOB ||
	    command == NAND_CMD_SEQIN) {

		info->buf_count = mtd->writesize + mtd->oobsize;
		memset(info->data_buff, 0xFF, info->buf_count);
	}

936 937 938
}

static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
939
		int ext_cmd_type, uint16_t column, int page_addr)
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
{
	int addr_cycle, exec_cmd;
	struct pxa3xx_nand_host *host;
	struct mtd_info *mtd;

	host = info->host[info->cs];
	mtd = host->mtd;
	addr_cycle = 0;
	exec_cmd = 1;

	if (info->cs != 0)
		info->ndcb0 = NDCB0_CSEL;
	else
		info->ndcb0 = 0;

	if (command == NAND_CMD_SEQIN)
		exec_cmd = 0;
957

958 959
	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
				    + host->col_addr_cycles);
E
eric miao 已提交
960

961 962
	switch (command) {
	case NAND_CMD_READOOB:
E
eric miao 已提交
963
	case NAND_CMD_READ0:
964 965 966 967 968
		info->buf_start = column;
		info->ndcb0 |= NDCB0_CMD_TYPE(0)
				| addr_cycle
				| NAND_CMD_READ0;

969
		if (command == NAND_CMD_READOOB)
970
			info->buf_start += mtd->writesize;
971

972 973 974 975 976 977
		/*
		 * Multiple page read needs an 'extended command type' field,
		 * which is either naked-read or last-read according to the
		 * state.
		 */
		if (mtd->writesize == PAGE_CHUNK_SIZE) {
978
			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
979 980 981 982 983 984 985
		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
					| NDCB0_LEN_OVRD
					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
			info->ndcb3 = info->chunk_size +
				      info->oob_size;
		}
E
eric miao 已提交
986

987 988 989
		set_command_address(info, mtd->writesize, column, page_addr);
		break;

E
eric miao 已提交
990
	case NAND_CMD_SEQIN:
991

992 993
		info->buf_start = column;
		set_command_address(info, mtd->writesize, 0, page_addr);
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007

		/*
		 * Multiple page programming needs to execute the initial
		 * SEQIN command that sets the page address.
		 */
		if (mtd->writesize > PAGE_CHUNK_SIZE) {
			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
				| addr_cycle
				| command;
			/* No data transfer in this case */
			info->data_size = 0;
			exec_cmd = 1;
		}
E
eric miao 已提交
1008
		break;
1009

E
eric miao 已提交
1010
	case NAND_CMD_PAGEPROG:
1011 1012 1013 1014 1015
		if (is_buf_blank(info->data_buff,
					(mtd->writesize + mtd->oobsize))) {
			exec_cmd = 0;
			break;
		}
E
eric miao 已提交
1016

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
		/* Second command setting for large pages */
		if (mtd->writesize > PAGE_CHUNK_SIZE) {
			/*
			 * Multiple page write uses the 'extended command'
			 * field. This can be used to issue a command dispatch
			 * or a naked-write depending on the current stage.
			 */
			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
					| NDCB0_LEN_OVRD
					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
			info->ndcb3 = info->chunk_size +
				      info->oob_size;

			/*
			 * This is the command dispatch that completes a chunked
			 * page program operation.
			 */
			if (info->data_size == 0) {
				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
					| command;
				info->ndcb1 = 0;
				info->ndcb2 = 0;
				info->ndcb3 = 0;
			}
		} else {
			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
					| NDCB0_AUTO_RS
					| NDCB0_ST_ROW_EN
					| NDCB0_DBC
					| (NAND_CMD_PAGEPROG << 8)
					| NAND_CMD_SEQIN
					| addr_cycle;
		}
E
eric miao 已提交
1051
		break;
1052

1053
	case NAND_CMD_PARAM:
1054
		info->buf_count = INIT_BUFFER_SIZE;
1055 1056
		info->ndcb0 |= NDCB0_CMD_TYPE(0)
				| NDCB0_ADDR_CYC(1)
1057
				| NDCB0_LEN_OVRD
1058
				| command;
1059
		info->ndcb1 = (column & 0xFF);
1060 1061
		info->ndcb3 = INIT_BUFFER_SIZE;
		info->data_size = INIT_BUFFER_SIZE;
1062 1063
		break;

E
eric miao 已提交
1064
	case NAND_CMD_READID:
1065
		info->buf_count = READ_ID_BYTES;
1066 1067
		info->ndcb0 |= NDCB0_CMD_TYPE(3)
				| NDCB0_ADDR_CYC(1)
1068
				| command;
1069
		info->ndcb1 = (column & 0xFF);
1070 1071 1072

		info->data_size = 8;
		break;
E
eric miao 已提交
1073
	case NAND_CMD_STATUS:
1074 1075 1076
		info->buf_count = 1;
		info->ndcb0 |= NDCB0_CMD_TYPE(4)
				| NDCB0_ADDR_CYC(1)
1077
				| command;
1078 1079 1080 1081 1082 1083 1084 1085 1086

		info->data_size = 8;
		break;

	case NAND_CMD_ERASE1:
		info->ndcb0 |= NDCB0_CMD_TYPE(2)
				| NDCB0_AUTO_RS
				| NDCB0_ADDR_CYC(3)
				| NDCB0_DBC
1087 1088
				| (NAND_CMD_ERASE2 << 8)
				| NAND_CMD_ERASE1;
1089 1090 1091
		info->ndcb1 = page_addr;
		info->ndcb2 = 0;

E
eric miao 已提交
1092 1093
		break;
	case NAND_CMD_RESET:
1094
		info->ndcb0 |= NDCB0_CMD_TYPE(5)
1095
				| command;
1096 1097 1098 1099 1100

		break;

	case NAND_CMD_ERASE2:
		exec_cmd = 0;
E
eric miao 已提交
1101
		break;
1102

E
eric miao 已提交
1103
	default:
1104
		exec_cmd = 0;
1105 1106
		dev_err(&info->pdev->dev, "non-supported command %x\n",
				command);
E
eric miao 已提交
1107 1108 1109
		break;
	}

1110 1111 1112
	return exec_cmd;
}

1113 1114
static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
			 int column, int page_addr)
1115
{
1116 1117
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1118
	struct pxa3xx_nand_info *info = host->info_data;
1119
	int exec_cmd;
1120 1121 1122 1123 1124 1125

	/*
	 * if this is a x16 device ,then convert the input
	 * "byte" address into a "word" address appropriate
	 * for indexing a word-oriented device
	 */
1126
	if (info->reg_ndcr & NDCR_DWIDTH_M)
1127 1128
		column /= 2;

1129 1130 1131 1132 1133 1134 1135
	/*
	 * There may be different NAND chip hooked to
	 * different chip select, so check whether
	 * chip select has been changed, if yes, reset the timing
	 */
	if (info->cs != host->cs) {
		info->cs = host->cs;
1136 1137
		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1138 1139
	}

1140 1141
	prepare_start_command(info, command);

1142
	info->state = STATE_PREPARED;
1143 1144
	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);

L
Lei Wen 已提交
1145 1146
	if (exec_cmd) {
		init_completion(&info->cmd_complete);
1147 1148
		init_completion(&info->dev_ready);
		info->need_wait = 1;
L
Lei Wen 已提交
1149 1150
		pxa3xx_nand_start(info);

1151 1152
		if (!wait_for_completion_timeout(&info->cmd_complete,
		    CHIP_DELAY_TIMEOUT)) {
1153
			dev_err(&info->pdev->dev, "Wait time out!!!\n");
L
Lei Wen 已提交
1154 1155 1156 1157
			/* Stop State Machine for next command cycle */
			pxa3xx_nand_stop(info);
		}
	}
1158
	info->state = STATE_IDLE;
L
Lei Wen 已提交
1159 1160
}

1161 1162 1163
static void nand_cmdfunc_extended(struct mtd_info *mtd,
				  const unsigned command,
				  int column, int page_addr)
1164
{
1165 1166
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1167
	struct pxa3xx_nand_info *info = host->info_data;
1168
	int exec_cmd, ext_cmd_type;
1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	/*
	 * if this is a x16 device then convert the input
	 * "byte" address into a "word" address appropriate
	 * for indexing a word-oriented device
	 */
	if (info->reg_ndcr & NDCR_DWIDTH_M)
		column /= 2;

	/*
	 * There may be different NAND chip hooked to
	 * different chip select, so check whether
	 * chip select has been changed, if yes, reset the timing
	 */
	if (info->cs != host->cs) {
		info->cs = host->cs;
		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
	}

	/* Select the extended command for the first command */
	switch (command) {
	case NAND_CMD_READ0:
	case NAND_CMD_READOOB:
		ext_cmd_type = EXT_CMD_TYPE_MONO;
		break;
1195 1196 1197 1198 1199 1200
	case NAND_CMD_SEQIN:
		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
		break;
	case NAND_CMD_PAGEPROG:
		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
		break;
1201 1202
	default:
		ext_cmd_type = 0;
1203
		break;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	}

	prepare_start_command(info, command);

	/*
	 * Prepare the "is ready" completion before starting a command
	 * transaction sequence. If the command is not executed the
	 * completion will be completed, see below.
	 *
	 * We can do that inside the loop because the command variable
	 * is invariant and thus so is the exec_cmd.
	 */
	info->need_wait = 1;
	init_completion(&info->dev_ready);
	do {
		info->state = STATE_PREPARED;
		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
					       column, page_addr);
		if (!exec_cmd) {
			info->need_wait = 0;
			complete(&info->dev_ready);
			break;
		}

		init_completion(&info->cmd_complete);
		pxa3xx_nand_start(info);

1231 1232
		if (!wait_for_completion_timeout(&info->cmd_complete,
		    CHIP_DELAY_TIMEOUT)) {
1233 1234 1235 1236 1237 1238 1239
			dev_err(&info->pdev->dev, "Wait time out!!!\n");
			/* Stop State Machine for next command cycle */
			pxa3xx_nand_stop(info);
			break;
		}

		/* Check if the sequence is complete */
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
		if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
			break;

		/*
		 * After a splitted program command sequence has issued
		 * the command dispatch, the command sequence is complete.
		 */
		if (info->data_size == 0 &&
		    command == NAND_CMD_PAGEPROG &&
		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1250 1251 1252 1253 1254 1255 1256 1257
			break;

		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
			/* Last read: issue a 'last naked read' */
			if (info->data_size == info->chunk_size)
				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
			else
				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1258 1259 1260 1261 1262 1263 1264 1265

		/*
		 * If a splitted program command has no more data to transfer,
		 * the command dispatch must be issued to complete.
		 */
		} else if (command == NAND_CMD_PAGEPROG &&
			   info->data_size == 0) {
				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1266 1267 1268 1269 1270 1271
		}
	} while (1);

	info->state = STATE_IDLE;
}

1272
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1273 1274
		struct nand_chip *chip, const uint8_t *buf, int oob_required,
		int page)
L
Lei Wen 已提交
1275 1276 1277
{
	chip->write_buf(mtd, buf, mtd->writesize);
	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1278 1279

	return 0;
L
Lei Wen 已提交
1280 1281 1282
}

static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1283 1284
		struct nand_chip *chip, uint8_t *buf, int oob_required,
		int page)
L
Lei Wen 已提交
1285
{
1286
	struct pxa3xx_nand_host *host = chip->priv;
1287
	struct pxa3xx_nand_info *info = host->info_data;
L
Lei Wen 已提交
1288 1289 1290 1291

	chip->read_buf(mtd, buf, mtd->writesize);
	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);

1292 1293 1294 1295
	if (info->retcode == ERR_CORERR && info->use_ecc) {
		mtd->ecc_stats.corrected += info->ecc_err_cnt;

	} else if (info->retcode == ERR_UNCORERR) {
L
Lei Wen 已提交
1296 1297 1298
		/*
		 * for blank page (all 0xff), HW will calculate its ECC as
		 * 0, which is different from the ECC information within
1299
		 * OOB, ignore such uncorrectable errors
L
Lei Wen 已提交
1300 1301
		 */
		if (is_buf_blank(buf, mtd->writesize))
1302 1303
			info->retcode = ERR_NONE;
		else
L
Lei Wen 已提交
1304
			mtd->ecc_stats.failed++;
E
eric miao 已提交
1305
	}
L
Lei Wen 已提交
1306

1307
	return info->max_bitflips;
E
eric miao 已提交
1308 1309 1310 1311
}

static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
{
1312 1313
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1314
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
	char retval = 0xFF;

	if (info->buf_start < info->buf_count)
		/* Has just send a new command? */
		retval = info->data_buff[info->buf_start++];

	return retval;
}

static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
{
1326 1327
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1328
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
	u16 retval = 0xFFFF;

	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
		retval = *((u16 *)(info->data_buff+info->buf_start));
		info->buf_start += 2;
	}
	return retval;
}

static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
1340 1341
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1342
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1343 1344 1345 1346 1347 1348 1349 1350 1351
	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);

	memcpy(buf, info->data_buff + info->buf_start, real_len);
	info->buf_start += real_len;
}

static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
		const uint8_t *buf, int len)
{
1352 1353
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1354
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367
	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);

	memcpy(info->data_buff + info->buf_start, buf, real_len);
	info->buf_start += real_len;
}

static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
{
	return;
}

static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
1368 1369
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1370
	struct pxa3xx_nand_info *info = host->info_data;
1371 1372 1373

	if (info->need_wait) {
		info->need_wait = 0;
1374 1375
		if (!wait_for_completion_timeout(&info->dev_ready,
		    CHIP_DELAY_TIMEOUT)) {
1376 1377 1378 1379
			dev_err(&info->pdev->dev, "Ready time out!!!\n");
			return NAND_STATUS_FAIL;
		}
	}
E
eric miao 已提交
1380 1381 1382 1383 1384

	/* pxa3xx_nand_send_command has waited for command complete */
	if (this->state == FL_WRITING || this->state == FL_ERASING) {
		if (info->retcode == ERR_NONE)
			return 0;
1385 1386
		else
			return NAND_STATUS_FAIL;
E
eric miao 已提交
1387 1388
	}

1389
	return NAND_STATUS_READY;
E
eric miao 已提交
1390 1391
}

1392
static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
E
eric miao 已提交
1393
{
1394
	struct pxa3xx_nand_host *host = info->host[info->cs];
E
eric miao 已提交
1395
	struct platform_device *pdev = info->pdev;
J
Jingoo Han 已提交
1396
	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1397
	const struct nand_sdr_timings *timings;
E
eric miao 已提交
1398

1399 1400
	/* Configure default flash values */
	info->chunk_size = PAGE_CHUNK_SIZE;
1401 1402 1403
	info->reg_ndcr = 0x0; /* enable all interrupts */
	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
	info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1404 1405
	info->reg_ndcr |= NDCR_SPARE_EN;

1406 1407 1408 1409 1410 1411
	/* use the common timing to make a try */
	timings = onfi_async_timing_mode_to_sdr_timings(0);
	if (IS_ERR(timings))
		return PTR_ERR(timings);

	pxa3xx_nand_set_sdr_timing(host, timings);
1412 1413 1414 1415 1416 1417 1418 1419 1420
	return 0;
}

static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
{
	struct pxa3xx_nand_host *host = info->host[info->cs];
	struct mtd_info *mtd = host->mtd;
	struct nand_chip *chip = mtd->priv;

1421 1422 1423
	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
E
eric miao 已提交
1424 1425
}

1426
static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1427
{
1428 1429
	struct platform_device *pdev = info->pdev;
	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1430 1431
	uint32_t ndcr = nand_readl(info, NDCR);

1432
	/* Set an initial chunk size */
1433
	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1434 1435
	info->reg_ndcr = ndcr &
		~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1436
	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1437 1438
	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1439 1440
}

E
eric miao 已提交
1441 1442 1443
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
	struct platform_device *pdev = info->pdev;
1444 1445 1446 1447
	struct dma_slave_config	config;
	dma_cap_mask_t mask;
	struct pxad_param param;
	int ret;
E
eric miao 已提交
1448

1449 1450 1451 1452
	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
	if (info->data_buff == NULL)
		return -ENOMEM;
	if (use_dma == 0)
E
eric miao 已提交
1453 1454
		return 0;

1455 1456 1457
	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;
E
eric miao 已提交
1458

1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
	sg_init_one(&info->sg, info->data_buff, info->buf_size);
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	param.prio = PXAD_PRIO_LOWEST;
	param.drcmr = info->drcmr_dat;
	info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
							  &param, &pdev->dev,
							  "data");
	if (!info->dma_chan) {
		dev_err(&pdev->dev, "unable to request data dma channel\n");
		return -ENODEV;
	}
E
eric miao 已提交
1471

1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	memset(&config, 0, sizeof(config));
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	config.src_addr = info->mmio_phys + NDDB;
	config.dst_addr = info->mmio_phys + NDDB;
	config.src_maxburst = 32;
	config.dst_maxburst = 32;
	ret = dmaengine_slave_config(info->dma_chan, &config);
	if (ret < 0) {
		dev_err(&info->pdev->dev,
			"dma channel configuration failed: %d\n",
			ret);
		return ret;
E
eric miao 已提交
1485 1486
	}

1487 1488 1489 1490 1491
	/*
	 * Now that DMA buffers are allocated we turn on
	 * DMA proper for I/O operations.
	 */
	info->use_dma = 1;
E
eric miao 已提交
1492 1493 1494
	return 0;
}

1495 1496
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{
1497
	if (info->use_dma) {
1498 1499
		dmaengine_terminate_all(info->dma_chan);
		dma_release_channel(info->dma_chan);
1500
	}
1501 1502
	kfree(info->data_buff);
}
1503

1504 1505
static int pxa_ecc_init(struct pxa3xx_nand_info *info,
			struct nand_ecc_ctrl *ecc,
1506
			int strength, int ecc_stepsize, int page_size)
1507
{
1508
	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1509
		info->chunk_size = 2048;
1510 1511 1512 1513 1514 1515
		info->spare_size = 40;
		info->ecc_size = 24;
		ecc->mode = NAND_ECC_HW;
		ecc->size = 512;
		ecc->strength = 1;

1516
	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1517
		info->chunk_size = 512;
1518 1519 1520 1521 1522 1523
		info->spare_size = 8;
		info->ecc_size = 8;
		ecc->mode = NAND_ECC_HW;
		ecc->size = 512;
		ecc->strength = 1;

1524 1525 1526 1527
	/*
	 * Required ECC: 4-bit correction per 512 bytes
	 * Select: 16-bit correction per 2048 bytes
	 */
1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
		info->ecc_bch = 1;
		info->chunk_size = 2048;
		info->spare_size = 32;
		info->ecc_size = 32;
		ecc->mode = NAND_ECC_HW;
		ecc->size = info->chunk_size;
		ecc->layout = &ecc_layout_2KB_bch4bit;
		ecc->strength = 16;

1538
	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1539 1540 1541 1542 1543 1544 1545 1546 1547
		info->ecc_bch = 1;
		info->chunk_size = 2048;
		info->spare_size = 32;
		info->ecc_size = 32;
		ecc->mode = NAND_ECC_HW;
		ecc->size = info->chunk_size;
		ecc->layout = &ecc_layout_4KB_bch4bit;
		ecc->strength = 16;

1548 1549 1550 1551 1552
	/*
	 * Required ECC: 8-bit correction per 512 bytes
	 * Select: 16-bit correction per 1024 bytes
	 */
	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1553 1554 1555 1556 1557 1558 1559 1560
		info->ecc_bch = 1;
		info->chunk_size = 1024;
		info->spare_size = 0;
		info->ecc_size = 32;
		ecc->mode = NAND_ECC_HW;
		ecc->size = info->chunk_size;
		ecc->layout = &ecc_layout_4KB_bch8bit;
		ecc->strength = 16;
1561 1562 1563 1564 1565
	} else {
		dev_err(&info->pdev->dev,
			"ECC strength %d at page size %d is not supported\n",
			strength, page_size);
		return -ENODEV;
1566
	}
1567 1568 1569

	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
		 ecc->strength, ecc->size);
1570 1571 1572
	return 0;
}

1573
static int pxa3xx_nand_scan(struct mtd_info *mtd)
E
eric miao 已提交
1574
{
1575 1576
	struct nand_chip *chip = mtd->priv;
	struct pxa3xx_nand_host *host = chip->priv;
1577
	struct pxa3xx_nand_info *info = host->info_data;
1578
	struct platform_device *pdev = info->pdev;
J
Jingoo Han 已提交
1579
	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1580
	int ret;
1581
	uint16_t ecc_strength, ecc_step;
1582

1583 1584 1585 1586 1587 1588
	if (pdata->keep_config) {
		pxa3xx_nand_detect_config(info);
	} else {
		ret = pxa3xx_nand_config_ident(info);
		if (ret)
			return ret;
1589 1590
	}

1591
	if (info->reg_ndcr & NDCR_DWIDTH_M)
1592 1593
		chip->options |= NAND_BUSWIDTH_16;

1594 1595 1596 1597
	/* Device detection must be done with ECC disabled */
	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
		nand_writel(info, NDECCCTRL, 0x0);

1598
	if (nand_scan_ident(mtd, 1, NULL))
1599
		return -ENODEV;
1600

1601 1602 1603 1604 1605 1606 1607 1608 1609
	if (!pdata->keep_config) {
		ret = pxa3xx_nand_init(host);
		if (ret) {
			dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
				ret);
			return ret;
		}
	}

1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
	if (pdata->flash_bbt) {
		/*
		 * We'll use a bad block table stored in-flash and don't
		 * allow writing the bad block marker to the flash.
		 */
		chip->bbt_options |= NAND_BBT_USE_FLASH |
				     NAND_BBT_NO_OOB_BBM;
		chip->bbt_td = &bbt_main_descr;
		chip->bbt_md = &bbt_mirror_descr;
	}

1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
	/*
	 * If the page size is bigger than the FIFO size, let's check
	 * we are given the right variant and then switch to the extended
	 * (aka splitted) command handling,
	 */
	if (mtd->writesize > PAGE_CHUNK_SIZE) {
		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
			chip->cmdfunc = nand_cmdfunc_extended;
		} else {
			dev_err(&info->pdev->dev,
				"unsupported page size on this variant\n");
			return -ENODEV;
		}
	}

1636 1637 1638 1639 1640 1641 1642
	if (pdata->ecc_strength && pdata->ecc_step_size) {
		ecc_strength = pdata->ecc_strength;
		ecc_step = pdata->ecc_step_size;
	} else {
		ecc_strength = chip->ecc_strength_ds;
		ecc_step = chip->ecc_step_ds;
	}
1643 1644 1645 1646 1647 1648 1649 1650 1651

	/* Set default ECC strength requirements on non-ONFI devices */
	if (ecc_strength < 1 && ecc_step < 1) {
		ecc_strength = 1;
		ecc_step = 512;
	}

	ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
			   ecc_step, mtd->writesize);
1652 1653
	if (ret)
		return ret;
1654

1655
	/* calculate addressing information */
1656 1657 1658 1659 1660
	if (mtd->writesize >= 2048)
		host->col_addr_cycles = 2;
	else
		host->col_addr_cycles = 1;

1661 1662 1663 1664 1665 1666 1667 1668
	/* release the initial buffer */
	kfree(info->data_buff);

	/* allocate the real data + oob buffer */
	info->buf_size = mtd->writesize + mtd->oobsize;
	ret = pxa3xx_nand_init_buff(info);
	if (ret)
		return ret;
1669
	info->oob_buff = info->data_buff + mtd->writesize;
1670

1671
	if ((mtd->size >> chip->page_shift) > 65536)
1672
		host->row_addr_cycles = 3;
1673
	else
1674
		host->row_addr_cycles = 2;
1675 1676 1677 1678

	if (!pdata->keep_config)
		pxa3xx_nand_config_tail(info);

1679
	return nand_scan_tail(mtd);
E
eric miao 已提交
1680 1681
}

1682
static int alloc_nand_resource(struct platform_device *pdev)
E
eric miao 已提交
1683
{
1684
	struct device_node *np = pdev->dev.of_node;
1685
	struct pxa3xx_nand_platform_data *pdata;
E
eric miao 已提交
1686
	struct pxa3xx_nand_info *info;
1687
	struct pxa3xx_nand_host *host;
1688
	struct nand_chip *chip = NULL;
E
eric miao 已提交
1689 1690
	struct mtd_info *mtd;
	struct resource *r;
1691
	int ret, irq, cs;
E
eric miao 已提交
1692

J
Jingoo Han 已提交
1693
	pdata = dev_get_platdata(&pdev->dev);
1694 1695
	if (pdata->num_cs <= 0)
		return -ENODEV;
1696 1697 1698
	info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
			    sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
	if (!info)
1699
		return -ENOMEM;
E
eric miao 已提交
1700 1701

	info->pdev = pdev;
1702
	info->variant = pxa3xx_nand_get_variant(pdev);
1703
	for (cs = 0; cs < pdata->num_cs; cs++) {
1704
		mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1705 1706 1707 1708 1709 1710
		chip = (struct nand_chip *)(&mtd[1]);
		host = (struct pxa3xx_nand_host *)chip;
		info->host[cs] = host;
		host->mtd = mtd;
		host->cs = cs;
		host->info_data = info;
1711
		mtd->priv = chip;
1712
		mtd->dev.parent = &pdev->dev;
1713 1714
		/* FIXME: all chips use the same device tree partitions */
		nand_set_flash_node(chip, np);
1715

1716
		chip->priv = host;
1717 1718 1719 1720 1721 1722 1723 1724 1725
		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
		chip->controller        = &info->controller;
		chip->waitfunc		= pxa3xx_nand_waitfunc;
		chip->select_chip	= pxa3xx_nand_select_chip;
		chip->read_word		= pxa3xx_nand_read_word;
		chip->read_byte		= pxa3xx_nand_read_byte;
		chip->read_buf		= pxa3xx_nand_read_buf;
		chip->write_buf		= pxa3xx_nand_write_buf;
1726
		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1727
		chip->cmdfunc		= nand_cmdfunc;
1728
	}
1729 1730 1731

	spin_lock_init(&chip->controller->lock);
	init_waitqueue_head(&chip->controller->wq);
1732
	info->clk = devm_clk_get(&pdev->dev, NULL);
E
eric miao 已提交
1733 1734
	if (IS_ERR(info->clk)) {
		dev_err(&pdev->dev, "failed to get nand clock\n");
1735
		return PTR_ERR(info->clk);
E
eric miao 已提交
1736
	}
1737 1738 1739
	ret = clk_prepare_enable(info->clk);
	if (ret < 0)
		return ret;
E
eric miao 已提交
1740

1741
	if (use_dma) {
1742 1743 1744 1745 1746 1747
		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
		if (r == NULL) {
			dev_err(&pdev->dev,
				"no resource defined for data DMA\n");
			ret = -ENXIO;
			goto fail_disable_clk;
1748
		}
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
		info->drcmr_dat = r->start;

		r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
		if (r == NULL) {
			dev_err(&pdev->dev,
				"no resource defined for cmd DMA\n");
			ret = -ENXIO;
			goto fail_disable_clk;
		}
		info->drcmr_cmd = r->start;
E
eric miao 已提交
1759 1760 1761 1762 1763 1764
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no IRQ resource defined\n");
		ret = -ENXIO;
1765
		goto fail_disable_clk;
E
eric miao 已提交
1766 1767 1768
	}

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1769 1770 1771
	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(info->mmio_base)) {
		ret = PTR_ERR(info->mmio_base);
1772
		goto fail_disable_clk;
E
eric miao 已提交
1773
	}
1774
	info->mmio_phys = r->start;
E
eric miao 已提交
1775

1776 1777 1778 1779 1780
	/* Allocate a buffer to allow flash detection */
	info->buf_size = INIT_BUFFER_SIZE;
	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
	if (info->data_buff == NULL) {
		ret = -ENOMEM;
1781
		goto fail_disable_clk;
1782
	}
E
eric miao 已提交
1783

1784 1785 1786
	/* initialize all interrupts to be disabled */
	disable_int(info, NDSR_MASK);

1787 1788 1789
	ret = request_threaded_irq(irq, pxa3xx_nand_irq,
				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
				   pdev->name, info);
E
eric miao 已提交
1790 1791 1792 1793 1794
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to request IRQ\n");
		goto fail_free_buf;
	}

1795
	platform_set_drvdata(pdev, info);
E
eric miao 已提交
1796

1797
	return 0;
E
eric miao 已提交
1798 1799

fail_free_buf:
1800
	free_irq(irq, info);
1801
	kfree(info->data_buff);
1802
fail_disable_clk:
1803
	clk_disable_unprepare(info->clk);
1804
	return ret;
E
eric miao 已提交
1805 1806 1807 1808
}

static int pxa3xx_nand_remove(struct platform_device *pdev)
{
1809
	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1810 1811
	struct pxa3xx_nand_platform_data *pdata;
	int irq, cs;
E
eric miao 已提交
1812

1813 1814 1815
	if (!info)
		return 0;

J
Jingoo Han 已提交
1816
	pdata = dev_get_platdata(&pdev->dev);
E
eric miao 已提交
1817

1818 1819 1820
	irq = platform_get_irq(pdev, 0);
	if (irq >= 0)
		free_irq(irq, info);
1821
	pxa3xx_nand_free_buff(info);
1822

1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
	/*
	 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
	 * In order to prevent a lockup of the system bus, the DFI bus
	 * arbitration is granted to SMC upon driver removal. This is done by
	 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
	 * access to the bus anymore.
	 */
	nand_writel(info, NDCR,
		    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
		    NFCV1_NDCR_ARB_CNTL);
1833
	clk_disable_unprepare(info->clk);
1834

1835 1836
	for (cs = 0; cs < pdata->num_cs; cs++)
		nand_release(info->host[cs]->mtd);
E
eric miao 已提交
1837 1838 1839
	return 0;
}

1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858
static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
{
	struct pxa3xx_nand_platform_data *pdata;
	struct device_node *np = pdev->dev.of_node;
	const struct of_device_id *of_id =
			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);

	if (!of_id)
		return 0;

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
		pdata->enable_arbiter = 1;
	if (of_get_property(np, "marvell,nand-keep-config", NULL))
		pdata->keep_config = 1;
	of_property_read_u32(np, "num-cs", &pdata->num_cs);
1859
	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1860

1861 1862 1863 1864 1865 1866 1867 1868
	pdata->ecc_strength = of_get_nand_ecc_strength(np);
	if (pdata->ecc_strength < 0)
		pdata->ecc_strength = 0;

	pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
	if (pdata->ecc_step_size < 0)
		pdata->ecc_step_size = 0;

1869 1870 1871 1872 1873
	pdev->dev.platform_data = pdata;

	return 0;
}

1874 1875 1876 1877
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
	struct pxa3xx_nand_platform_data *pdata;
	struct pxa3xx_nand_info *info;
1878
	int ret, cs, probe_success, dma_available;
1879

1880 1881 1882
	dma_available = IS_ENABLED(CONFIG_ARM) &&
		(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
	if (use_dma && !dma_available) {
1883 1884 1885 1886
		use_dma = 0;
		dev_warn(&pdev->dev,
			 "This platform can't do DMA on this device\n");
	}
1887

1888 1889 1890 1891
	ret = pxa3xx_nand_probe_dt(pdev);
	if (ret)
		return ret;

J
Jingoo Han 已提交
1892
	pdata = dev_get_platdata(&pdev->dev);
1893 1894 1895 1896 1897
	if (!pdata) {
		dev_err(&pdev->dev, "no platform data defined\n");
		return -ENODEV;
	}

1898 1899 1900 1901 1902
	ret = alloc_nand_resource(pdev);
	if (ret) {
		dev_err(&pdev->dev, "alloc nand resource failed\n");
		return ret;
	}
1903

1904
	info = platform_get_drvdata(pdev);
1905 1906
	probe_success = 0;
	for (cs = 0; cs < pdata->num_cs; cs++) {
1907
		struct mtd_info *mtd = info->host[cs]->mtd;
1908

1909 1910 1911 1912 1913 1914
		/*
		 * The mtd name matches the one used in 'mtdparts' kernel
		 * parameter. This name cannot be changed or otherwise
		 * user's mtd partitions configuration would get broken.
		 */
		mtd->name = "pxa3xx_nand-0";
1915
		info->cs = cs;
1916
		ret = pxa3xx_nand_scan(mtd);
1917 1918 1919 1920 1921 1922
		if (ret) {
			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
				cs);
			continue;
		}

1923 1924
		ret = mtd_device_register(mtd, pdata->parts[cs],
					  pdata->nr_parts[cs]);
1925 1926 1927 1928 1929
		if (!ret)
			probe_success = 1;
	}

	if (!probe_success) {
1930 1931 1932 1933
		pxa3xx_nand_remove(pdev);
		return -ENODEV;
	}

1934
	return 0;
1935 1936
}

E
eric miao 已提交
1937
#ifdef CONFIG_PM
1938
static int pxa3xx_nand_suspend(struct device *dev)
E
eric miao 已提交
1939
{
1940
	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
E
eric miao 已提交
1941

L
Lei Wen 已提交
1942
	if (info->state) {
1943
		dev_err(dev, "driver busy, state = %d\n", info->state);
E
eric miao 已提交
1944 1945 1946
		return -EAGAIN;
	}

1947
	clk_disable(info->clk);
E
eric miao 已提交
1948 1949 1950
	return 0;
}

1951
static int pxa3xx_nand_resume(struct device *dev)
E
eric miao 已提交
1952
{
1953
	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1954 1955 1956 1957 1958
	int ret;

	ret = clk_enable(info->clk);
	if (ret < 0)
		return ret;
1959 1960 1961

	/* We don't want to handle interrupt without calling mtd routine */
	disable_int(info, NDCR_INT_MASK);
E
eric miao 已提交
1962

1963 1964 1965 1966 1967 1968
	/*
	 * Directly set the chip select to a invalid value,
	 * then the driver would reset the timing according
	 * to current chip select at the beginning of cmdfunc
	 */
	info->cs = 0xff;
E
eric miao 已提交
1969

1970 1971 1972 1973 1974 1975 1976
	/*
	 * As the spec says, the NDSR would be updated to 0x1800 when
	 * doing the nand_clk disable/enable.
	 * To prevent it damaging state machine of the driver, clear
	 * all status before resume
	 */
	nand_writel(info, NDSR, NDSR_MASK);
1977

1978
	return 0;
E
eric miao 已提交
1979 1980 1981 1982 1983 1984
}
#else
#define pxa3xx_nand_suspend	NULL
#define pxa3xx_nand_resume	NULL
#endif

1985 1986 1987 1988 1989
static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
	.suspend	= pxa3xx_nand_suspend,
	.resume		= pxa3xx_nand_resume,
};

E
eric miao 已提交
1990 1991 1992
static struct platform_driver pxa3xx_nand_driver = {
	.driver = {
		.name	= "pxa3xx-nand",
1993
		.of_match_table = pxa3xx_nand_dt_ids,
1994
		.pm	= &pxa3xx_nand_pm_ops,
E
eric miao 已提交
1995 1996 1997 1998 1999
	},
	.probe		= pxa3xx_nand_probe,
	.remove		= pxa3xx_nand_remove,
};

2000
module_platform_driver(pxa3xx_nand_driver);
E
eric miao 已提交
2001 2002 2003

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");