pxa3xx_nand.c 55.2 KB
Newer Older
E
eric miao 已提交
1 2 3 4 5 6 7 8 9
/*
 * drivers/mtd/nand/pxa3xx_nand.c
 *
 * Copyright © 2005 Intel Corporation
 * Copyright © 2006 Marvell International Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
10 11
 *
 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
E
eric miao 已提交
12 13
 */

14
#include <linux/kernel.h>
E
eric miao 已提交
15 16 17
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
18
#include <linux/dmaengine.h>
E
eric miao 已提交
19
#include <linux/dma-mapping.h>
20
#include <linux/dma/pxa-dma.h>
E
eric miao 已提交
21 22 23
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/mtd/mtd.h>
24
#include <linux/mtd/rawnand.h>
E
eric miao 已提交
25
#include <linux/mtd/partitions.h>
26
#include <linux/io.h>
27
#include <linux/iopoll.h>
28
#include <linux/irq.h>
29
#include <linux/slab.h>
30 31
#include <linux/of.h>
#include <linux/of_device.h>
32
#include <linux/platform_data/mtd-nand-pxa3xx.h>
33 34
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
E
eric miao 已提交
35

36 37
#define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
#define NAND_STOP_DELAY		msecs_to_jiffies(40)
38
#define PAGE_CHUNK_SIZE		(2048)
E
eric miao 已提交
39

40 41
/*
 * Define a buffer size for the initial command that detects the flash device:
42 43 44 45 46
 * STATUS, READID and PARAM.
 * ONFI param page is 256 bytes, and there are three redundant copies
 * to be read. JEDEC param page is 512 bytes, and there are also three
 * redundant copies to be read.
 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
47
 */
48
#define INIT_BUFFER_SIZE	2048
49

50 51 52 53
/* System control register and bit to enable NAND on some SoCs */
#define GENCONF_SOC_DEVICE_MUX	0x208
#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)

E
eric miao 已提交
54 55 56 57 58 59 60 61
/* registers and bit definitions */
#define NDCR		(0x00) /* Control register */
#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
#define NDSR		(0x14) /* Status Register */
#define NDPCR		(0x18) /* Page Count Register */
#define NDBDR0		(0x1C) /* Bad Block Register 0 */
#define NDBDR1		(0x20) /* Bad Block Register 1 */
62
#define NDECCCTRL	(0x28) /* ECC control */
E
eric miao 已提交
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
#define NDDB		(0x40) /* Data Buffer */
#define NDCB0		(0x48) /* Command Buffer0 */
#define NDCB1		(0x4C) /* Command Buffer1 */
#define NDCB2		(0x50) /* Command Buffer2 */

#define NDCR_SPARE_EN		(0x1 << 31)
#define NDCR_ECC_EN		(0x1 << 30)
#define NDCR_DMA_EN		(0x1 << 29)
#define NDCR_ND_RUN		(0x1 << 28)
#define NDCR_DWIDTH_C		(0x1 << 27)
#define NDCR_DWIDTH_M		(0x1 << 26)
#define NDCR_PAGE_SZ		(0x1 << 24)
#define NDCR_NCSX		(0x1 << 23)
#define NDCR_ND_MODE		(0x3 << 21)
#define NDCR_NAND_MODE   	(0x0)
#define NDCR_CLR_PG_CNT		(0x1 << 20)
79 80
#define NFCV1_NDCR_ARB_CNTL	(0x1 << 19)
#define NFCV2_NDCR_STOP_ON_UNCOR	(0x1 << 19)
E
eric miao 已提交
81 82 83 84 85 86
#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)

#define NDCR_RA_START		(0x1 << 15)
#define NDCR_PG_PER_BLK		(0x1 << 14)
#define NDCR_ND_ARB_EN		(0x1 << 12)
L
Lei Wen 已提交
87
#define NDCR_INT_MASK           (0xFFF)
E
eric miao 已提交
88 89

#define NDSR_MASK		(0xfff)
90 91 92
#define NDSR_ERR_CNT_OFF	(16)
#define NDSR_ERR_CNT_MASK       (0x1f)
#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
L
Lei Wen 已提交
93 94
#define NDSR_RDY                (0x1 << 12)
#define NDSR_FLASH_RDY          (0x1 << 11)
E
eric miao 已提交
95 96 97 98 99 100
#define NDSR_CS0_PAGED		(0x1 << 10)
#define NDSR_CS1_PAGED		(0x1 << 9)
#define NDSR_CS0_CMDD		(0x1 << 8)
#define NDSR_CS1_CMDD		(0x1 << 7)
#define NDSR_CS0_BBD		(0x1 << 6)
#define NDSR_CS1_BBD		(0x1 << 5)
101 102
#define NDSR_UNCORERR		(0x1 << 4)
#define NDSR_CORERR		(0x1 << 3)
E
eric miao 已提交
103 104 105 106
#define NDSR_WRDREQ		(0x1 << 2)
#define NDSR_RDDREQ		(0x1 << 1)
#define NDSR_WRCMDREQ		(0x1)

107
#define NDCB0_LEN_OVRD		(0x1 << 28)
108
#define NDCB0_ST_ROW_EN         (0x1 << 26)
E
eric miao 已提交
109 110
#define NDCB0_AUTO_RS		(0x1 << 25)
#define NDCB0_CSEL		(0x1 << 24)
111 112
#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
E
eric miao 已提交
113 114 115 116 117 118 119 120 121 122
#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
#define NDCB0_NC		(0x1 << 20)
#define NDCB0_DBC		(0x1 << 19)
#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
#define NDCB0_CMD2_MASK		(0xff << 8)
#define NDCB0_CMD1_MASK		(0xff)
#define NDCB0_ADDR_CYC_SHIFT	(16)

123 124 125 126 127 128 129 130
#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
#define EXT_CMD_TYPE_READ	4 /* Read */
#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
#define EXT_CMD_TYPE_FINAL	3 /* Final command */
#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */

131 132 133 134 135 136 137
/*
 * This should be large enough to read 'ONFI' and 'JEDEC'.
 * Let's use 7 bytes, which is the maximum ID count supported
 * by the controller (see NDCR_RD_ID_CNT_MASK).
 */
#define READ_ID_BYTES		7

E
eric miao 已提交
138
/* macros for registers read/write */
139 140 141 142 143 144 145
#define nand_writel(info, off, val)					\
	do {								\
		dev_vdbg(&info->pdev->dev,				\
			 "%s():%d nand_writel(0x%x, 0x%04x)\n",		\
			 __func__, __LINE__, (val), (off));		\
		writel_relaxed((val), (info)->mmio_base + (off));	\
	} while (0)
E
eric miao 已提交
146

147 148 149 150 151 152 153 154 155
#define nand_readl(info, off)						\
	({								\
		unsigned int _v;					\
		_v = readl_relaxed((info)->mmio_base + (off));		\
		dev_vdbg(&info->pdev->dev,				\
			 "%s():%d nand_readl(0x%04x) = 0x%x\n",		\
			 __func__, __LINE__, (off), _v);		\
		_v;							\
	})
E
eric miao 已提交
156 157 158 159 160 161

/* error code and state */
enum {
	ERR_NONE	= 0,
	ERR_DMABUSERR	= -1,
	ERR_SENDCMD	= -2,
162
	ERR_UNCORERR	= -3,
E
eric miao 已提交
163
	ERR_BBERR	= -4,
164
	ERR_CORERR	= -5,
E
eric miao 已提交
165 166 167
};

enum {
L
Lei Wen 已提交
168
	STATE_IDLE = 0,
169
	STATE_PREPARED,
E
eric miao 已提交
170 171 172 173 174 175
	STATE_CMD_HANDLE,
	STATE_DMA_READING,
	STATE_DMA_WRITING,
	STATE_DMA_DONE,
	STATE_PIO_READING,
	STATE_PIO_WRITING,
L
Lei Wen 已提交
176 177
	STATE_CMD_DONE,
	STATE_READY,
E
eric miao 已提交
178 179
};

180 181 182
enum pxa3xx_nand_variant {
	PXA3XX_NAND_VARIANT_PXA,
	PXA3XX_NAND_VARIANT_ARMADA370,
183
	PXA3XX_NAND_VARIANT_ARMADA_8K,
184 185
};

186 187 188 189 190 191
struct pxa3xx_nand_host {
	struct nand_chip	chip;
	void			*info_data;

	/* page size of attached chip */
	int			use_ecc;
192
	int			cs;
E
eric miao 已提交
193

194 195 196 197 198 199
	/* calculated from pxa3xx_nand_flash data */
	unsigned int		col_addr_cycles;
	unsigned int		row_addr_cycles;
};

struct pxa3xx_nand_info {
200
	struct nand_hw_control	controller;
E
eric miao 已提交
201 202 203 204
	struct platform_device	 *pdev;

	struct clk		*clk;
	void __iomem		*mmio_base;
205
	unsigned long		mmio_phys;
206
	struct completion	cmd_complete, dev_ready;
E
eric miao 已提交
207 208 209

	unsigned int 		buf_start;
	unsigned int		buf_count;
210
	unsigned int		buf_size;
211 212
	unsigned int		data_buff_pos;
	unsigned int		oob_buff_pos;
E
eric miao 已提交
213 214

	/* DMA information */
215 216 217 218
	struct scatterlist	sg;
	enum dma_data_direction	dma_dir;
	struct dma_chan		*dma_chan;
	dma_cookie_t		dma_cookie;
E
eric miao 已提交
219 220 221
	int			drcmr_dat;

	unsigned char		*data_buff;
222
	unsigned char		*oob_buff;
E
eric miao 已提交
223 224 225
	dma_addr_t 		data_buff_phys;
	int 			data_dma_ch;

226
	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
E
eric miao 已提交
227 228
	unsigned int		state;

229 230 231 232 233 234
	/*
	 * This driver supports NFCv1 (as found in PXA SoC)
	 * and NFCv2 (as found in Armada 370/XP SoC).
	 */
	enum pxa3xx_nand_variant variant;

235
	int			cs;
E
eric miao 已提交
236
	int			use_ecc;	/* use HW ECC ? */
237
	int			ecc_bch;	/* using BCH ECC? */
E
eric miao 已提交
238
	int			use_dma;	/* use DMA ? */
239
	int			use_spare;	/* use spare ? */
240
	int			need_wait;
E
eric miao 已提交
241

242 243 244 245
	/* Amount of real data per full chunk */
	unsigned int		chunk_size;

	/* Amount of spare data per full chunk */
246
	unsigned int		spare_size;
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

	/* Number of full chunks (i.e chunk_size + spare_size) */
	unsigned int            nfullchunks;

	/*
	 * Total number of chunks. If equal to nfullchunks, then there
	 * are only full chunks. Otherwise, there is one last chunk of
	 * size (last_chunk_size + last_spare_size)
	 */
	unsigned int            ntotalchunks;

	/* Amount of real data in the last chunk */
	unsigned int		last_chunk_size;

	/* Amount of spare data in the last chunk */
	unsigned int		last_spare_size;

264
	unsigned int		ecc_size;
265 266
	unsigned int		ecc_err_cnt;
	unsigned int		max_bitflips;
E
eric miao 已提交
267 268
	int 			retcode;

269 270 271 272 273 274 275 276 277 278 279
	/*
	 * Variables only valid during command
	 * execution. step_chunk_size and step_spare_size is the
	 * amount of real data and spare data in the current
	 * chunk. cur_chunk is the current chunk being
	 * read/programmed.
	 */
	unsigned int		step_chunk_size;
	unsigned int		step_spare_size;
	unsigned int            cur_chunk;

280 281 282 283 284
	/* cached register value */
	uint32_t		reg_ndcr;
	uint32_t		ndtr0cs0;
	uint32_t		ndtr1cs0;

E
eric miao 已提交
285 286 287 288
	/* generated NDCBx register values */
	uint32_t		ndcb0;
	uint32_t		ndcb1;
	uint32_t		ndcb2;
289
	uint32_t		ndcb3;
E
eric miao 已提交
290 291
};

292
static bool use_dma = 1;
E
eric miao 已提交
293
module_param(use_dma, bool, 0444);
L
Lucas De Marchi 已提交
294
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
E
eric miao 已提交
295

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314
struct pxa3xx_nand_timing {
	unsigned int	tCH;  /* Enable signal hold time */
	unsigned int	tCS;  /* Enable signal setup time */
	unsigned int	tWH;  /* ND_nWE high duration */
	unsigned int	tWP;  /* ND_nWE pulse time */
	unsigned int	tRH;  /* ND_nRE high duration */
	unsigned int	tRP;  /* ND_nRE pulse width */
	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
};

struct pxa3xx_nand_flash {
	uint32_t	chip_id;
	unsigned int	flash_width;	/* Width of Flash memory (DWIDTH_M) */
	unsigned int	dfc_width;	/* Width of flash controller(DWIDTH_C) */
	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
};

315
static struct pxa3xx_nand_timing timing[] = {
316 317 318 319
	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
320 321
};

322
static struct pxa3xx_nand_flash builtin_flash_types[] = {
323 324 325 326 327 328 329 330
	{ 0x46ec, 16, 16, &timing[1] },
	{ 0xdaec,  8,  8, &timing[1] },
	{ 0xd7ec,  8,  8, &timing[1] },
	{ 0xa12c,  8,  8, &timing[2] },
	{ 0xb12c, 16, 16, &timing[2] },
	{ 0xdc2c,  8,  8, &timing[2] },
	{ 0xcc2c, 16, 16, &timing[2] },
	{ 0xba20, 16, 16, &timing[3] },
331 332
};

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
				struct mtd_oob_region *oobregion)
{
	struct nand_chip *chip = mtd_to_nand(mtd);
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
	struct pxa3xx_nand_info *info = host->info_data;
	int nchunks = mtd->writesize / info->chunk_size;

	if (section >= nchunks)
		return -ERANGE;

	oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
			    info->spare_size;
	oobregion->length = info->ecc_size;

	return 0;
}

static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
				 struct mtd_oob_region *oobregion)
{
	struct nand_chip *chip = mtd_to_nand(mtd);
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
	struct pxa3xx_nand_info *info = host->info_data;
	int nchunks = mtd->writesize / info->chunk_size;

	if (section >= nchunks)
		return -ERANGE;

	if (!info->spare_size)
		return 0;

	oobregion->offset = section * (info->ecc_size + info->spare_size);
	oobregion->length = info->spare_size;
	if (!section) {
		/*
		 * Bootrom looks in bytes 0 & 5 for bad blocks for the
		 * 4KB page / 4bit BCH combination.
		 */
		if (mtd->writesize == 4096 && info->chunk_size == 2048) {
			oobregion->offset += 6;
			oobregion->length -= 6;
		} else {
			oobregion->offset += 2;
			oobregion->length -= 2;
		}
	}

	return 0;
}

static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
	.ecc = pxa3xx_ooblayout_ecc,
	.free = pxa3xx_ooblayout_free,
};

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };

static struct nand_bbt_descr bbt_main_descr = {
	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
		| NAND_BBT_2BIT | NAND_BBT_VERSION,
	.offs =	8,
	.len = 6,
	.veroffs = 14,
	.maxblocks = 8,		/* Last 8 blocks in each chip */
	.pattern = bbt_pattern
};

static struct nand_bbt_descr bbt_mirror_descr = {
	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
		| NAND_BBT_2BIT | NAND_BBT_VERSION,
	.offs =	8,
	.len = 6,
	.veroffs = 14,
	.maxblocks = 8,		/* Last 8 blocks in each chip */
	.pattern = bbt_mirror_pattern
};

E
eric miao 已提交
412 413 414 415 416 417 418 419 420 421 422 423
#define NDTR0_tCH(c)	(min((c), 7) << 19)
#define NDTR0_tCS(c)	(min((c), 7) << 16)
#define NDTR0_tWH(c)	(min((c), 7) << 11)
#define NDTR0_tWP(c)	(min((c), 7) << 8)
#define NDTR0_tRH(c)	(min((c), 7) << 3)
#define NDTR0_tRP(c)	(min((c), 7) << 0)

#define NDTR1_tR(c)	(min((c), 65535) << 16)
#define NDTR1_tWHR(c)	(min((c), 15) << 4)
#define NDTR1_tAR(c)	(min((c), 15) << 0)

/* convert nano-seconds to nand flash controller clock cycles */
A
Axel Lin 已提交
424
#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
E
eric miao 已提交
425

426
static const struct of_device_id pxa3xx_nand_dt_ids[] = {
427 428 429 430
	{
		.compatible = "marvell,pxa3xx-nand",
		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
	},
431 432 433 434
	{
		.compatible = "marvell,armada370-nand",
		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
	},
435 436 437 438
	{
		.compatible = "marvell,armada-8k-nand",
		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K,
	},
439 440 441 442 443 444 445 446 447 448 449 450 451 452
	{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);

static enum pxa3xx_nand_variant
pxa3xx_nand_get_variant(struct platform_device *pdev)
{
	const struct of_device_id *of_id =
			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
	if (!of_id)
		return PXA3XX_NAND_VARIANT_PXA;
	return (enum pxa3xx_nand_variant)of_id->data;
}

453
static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
454
				   const struct pxa3xx_nand_timing *t)
E
eric miao 已提交
455
{
456
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
457 458 459 460 461 462 463 464 465 466 467 468 469 470
	unsigned long nand_clk = clk_get_rate(info->clk);
	uint32_t ndtr0, ndtr1;

	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));

	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));

471 472
	info->ndtr0cs0 = ndtr0;
	info->ndtr1cs0 = ndtr1;
E
eric miao 已提交
473 474 475 476
	nand_writel(info, NDTR0CS0, ndtr0);
	nand_writel(info, NDTR1CS0, ndtr1);
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
				       const struct nand_sdr_timings *t)
{
	struct pxa3xx_nand_info *info = host->info_data;
	struct nand_chip *chip = &host->chip;
	unsigned long nand_clk = clk_get_rate(info->clk);
	uint32_t ndtr0, ndtr1;

	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
	u32 tR = chip->chip_delay * 1000;
	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);

	/* fallback to a default value if tR = 0 */
	if (!tR)
		tR = 20000;

	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));

	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));

	info->ndtr0cs0 = ndtr0;
	info->ndtr1cs0 = ndtr1;
	nand_writel(info, NDTR0CS0, ndtr0);
	nand_writel(info, NDTR1CS0, ndtr1);
}

static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
					   unsigned int *flash_width,
					   unsigned int *dfc_width)
{
	struct nand_chip *chip = &host->chip;
	struct pxa3xx_nand_info *info = host->info_data;
	const struct pxa3xx_nand_flash *f = NULL;
523
	struct mtd_info *mtd = nand_to_mtd(&host->chip);
524 525 526 527
	int i, id, ntypes;

	ntypes = ARRAY_SIZE(builtin_flash_types);

528
	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
529

530 531
	id = chip->read_byte(mtd);
	id |= chip->read_byte(mtd) << 0x8;
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599

	for (i = 0; i < ntypes; i++) {
		f = &builtin_flash_types[i];

		if (f->chip_id == id)
			break;
	}

	if (i == ntypes) {
		dev_err(&info->pdev->dev, "Error: timings not found\n");
		return -EINVAL;
	}

	pxa3xx_nand_set_timing(host, f->timing);

	*flash_width = f->flash_width;
	*dfc_width = f->dfc_width;

	return 0;
}

static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
					 int mode)
{
	const struct nand_sdr_timings *timings;

	mode = fls(mode) - 1;
	if (mode < 0)
		mode = 0;

	timings = onfi_async_timing_mode_to_sdr_timings(mode);
	if (IS_ERR(timings))
		return PTR_ERR(timings);

	pxa3xx_nand_set_sdr_timing(host, timings);

	return 0;
}

static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
{
	struct nand_chip *chip = &host->chip;
	struct pxa3xx_nand_info *info = host->info_data;
	unsigned int flash_width = 0, dfc_width = 0;
	int mode, err;

	mode = onfi_get_async_timing_mode(chip);
	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
		err = pxa3xx_nand_init_timings_compat(host, &flash_width,
						      &dfc_width);
		if (err)
			return err;

		if (flash_width == 16) {
			info->reg_ndcr |= NDCR_DWIDTH_M;
			chip->options |= NAND_BUSWIDTH_16;
		}

		info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
	} else {
		err = pxa3xx_nand_init_timings_onfi(host, mode);
		if (err)
			return err;
	}

	return 0;
}

L
Lei Wen 已提交
600 601 602 603 604 605 606 607 608 609
/**
 * NOTE: it is a must to set ND_RUN firstly, then write
 * command buffer, otherwise, it does not work.
 * We enable all the interrupt at the same time, and
 * let pxa3xx_nand_irq to handle all logic.
 */
static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
{
	uint32_t ndcr;

610
	ndcr = info->reg_ndcr;
611

612
	if (info->use_ecc) {
613
		ndcr |= NDCR_ECC_EN;
614 615 616
		if (info->ecc_bch)
			nand_writel(info, NDECCCTRL, 0x1);
	} else {
617
		ndcr &= ~NDCR_ECC_EN;
618 619 620
		if (info->ecc_bch)
			nand_writel(info, NDECCCTRL, 0x0);
	}
621 622 623 624 625 626

	if (info->use_dma)
		ndcr |= NDCR_DMA_EN;
	else
		ndcr &= ~NDCR_DMA_EN;

627 628 629 630 631
	if (info->use_spare)
		ndcr |= NDCR_SPARE_EN;
	else
		ndcr &= ~NDCR_SPARE_EN;

L
Lei Wen 已提交
632 633 634 635
	ndcr |= NDCR_ND_RUN;

	/* clear status bits and run */
	nand_writel(info, NDSR, NDSR_MASK);
636
	nand_writel(info, NDCR, 0);
L
Lei Wen 已提交
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	nand_writel(info, NDCR, ndcr);
}

static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
{
	uint32_t ndcr;
	int timeout = NAND_STOP_DELAY;

	/* wait RUN bit in NDCR become 0 */
	ndcr = nand_readl(info, NDCR);
	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
		ndcr = nand_readl(info, NDCR);
		udelay(1);
	}

	if (timeout <= 0) {
		ndcr &= ~NDCR_ND_RUN;
		nand_writel(info, NDCR, ndcr);
	}
656 657 658
	if (info->dma_chan)
		dmaengine_terminate_all(info->dma_chan);

L
Lei Wen 已提交
659 660 661 662
	/* clear status bits */
	nand_writel(info, NDSR, NDSR_MASK);
}

663 664
static void __maybe_unused
enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
E
eric miao 已提交
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
{
	uint32_t ndcr;

	ndcr = nand_readl(info, NDCR);
	nand_writel(info, NDCR, ndcr & ~int_mask);
}

static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
{
	uint32_t ndcr;

	ndcr = nand_readl(info, NDCR);
	nand_writel(info, NDCR, ndcr | int_mask);
}

680 681 682
static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
{
	if (info->ecc_bch) {
683 684
		u32 val;
		int ret;
685 686 687 688 689 690 691 692 693 694

		/*
		 * According to the datasheet, when reading from NDDB
		 * with BCH enabled, after each 32 bytes reads, we
		 * have to make sure that the NDSR.RDDREQ bit is set.
		 *
		 * Drain the FIFO 8 32 bits reads at a time, and skip
		 * the polling on the last read.
		 */
		while (len > 8) {
695
			ioread32_rep(info->mmio_base + NDDB, data, 8);
696

697 698 699 700 701 702
			ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
							 val & NDSR_RDDREQ, 1000, 5000);
			if (ret) {
				dev_err(&info->pdev->dev,
					"Timeout on RDDREQ while draining the FIFO\n");
				return;
703 704 705 706 707 708 709
			}

			data += 32;
			len -= 8;
		}
	}

710
	ioread32_rep(info->mmio_base + NDDB, data, len);
711 712
}

L
Lei Wen 已提交
713
static void handle_data_pio(struct pxa3xx_nand_info *info)
E
eric miao 已提交
714 715 716
{
	switch (info->state) {
	case STATE_PIO_WRITING:
717 718 719 720
		if (info->step_chunk_size)
			writesl(info->mmio_base + NDDB,
				info->data_buff + info->data_buff_pos,
				DIV_ROUND_UP(info->step_chunk_size, 4));
721

722
		if (info->step_spare_size)
723 724
			writesl(info->mmio_base + NDDB,
				info->oob_buff + info->oob_buff_pos,
725
				DIV_ROUND_UP(info->step_spare_size, 4));
E
eric miao 已提交
726 727
		break;
	case STATE_PIO_READING:
728 729 730 731
		if (info->step_chunk_size)
			drain_fifo(info,
				   info->data_buff + info->data_buff_pos,
				   DIV_ROUND_UP(info->step_chunk_size, 4));
732

733
		if (info->step_spare_size)
734 735
			drain_fifo(info,
				   info->oob_buff + info->oob_buff_pos,
736
				   DIV_ROUND_UP(info->step_spare_size, 4));
E
eric miao 已提交
737 738
		break;
	default:
739
		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
E
eric miao 已提交
740
				info->state);
L
Lei Wen 已提交
741
		BUG();
E
eric miao 已提交
742
	}
743 744

	/* Update buffer pointers for multi-page read/write */
745 746
	info->data_buff_pos += info->step_chunk_size;
	info->oob_buff_pos += info->step_spare_size;
E
eric miao 已提交
747 748
}

749
static void pxa3xx_nand_data_dma_irq(void *data)
E
eric miao 已提交
750
{
751 752 753
	struct pxa3xx_nand_info *info = data;
	struct dma_tx_state state;
	enum dma_status status;
E
eric miao 已提交
754

755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
	status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
	if (likely(status == DMA_COMPLETE)) {
		info->state = STATE_DMA_DONE;
	} else {
		dev_err(&info->pdev->dev, "DMA error on data channel\n");
		info->retcode = ERR_DMABUSERR;
	}
	dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);

	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
	enable_int(info, NDCR_INT_MASK);
}

static void start_data_dma(struct pxa3xx_nand_info *info)
{
	enum dma_transfer_direction direction;
	struct dma_async_tx_descriptor *tx;
E
eric miao 已提交
772

L
Lei Wen 已提交
773 774
	switch (info->state) {
	case STATE_DMA_WRITING:
775 776
		info->dma_dir = DMA_TO_DEVICE;
		direction = DMA_MEM_TO_DEV;
L
Lei Wen 已提交
777 778
		break;
	case STATE_DMA_READING:
779 780
		info->dma_dir = DMA_FROM_DEVICE;
		direction = DMA_DEV_TO_MEM;
L
Lei Wen 已提交
781 782
		break;
	default:
783
		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
L
Lei Wen 已提交
784 785
				info->state);
		BUG();
E
eric miao 已提交
786
	}
787 788 789
	info->sg.length = info->chunk_size;
	if (info->use_spare)
		info->sg.length += info->spare_size + info->ecc_size;
790 791 792 793 794 795 796
	dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);

	tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
				     DMA_PREP_INTERRUPT);
	if (!tx) {
		dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
		return;
E
eric miao 已提交
797
	}
798 799 800 801 802 803
	tx->callback = pxa3xx_nand_data_dma_irq;
	tx->callback_param = info;
	info->dma_cookie = dmaengine_submit(tx);
	dma_async_issue_pending(info->dma_chan);
	dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
		__func__, direction, info->dma_cookie, info->sg.length);
E
eric miao 已提交
804 805
}

806 807 808 809 810 811 812 813 814 815 816 817
static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
{
	struct pxa3xx_nand_info *info = data;

	handle_data_pio(info);

	info->state = STATE_CMD_DONE;
	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);

	return IRQ_HANDLED;
}

E
eric miao 已提交
818 819 820
static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
{
	struct pxa3xx_nand_info *info = devid;
821
	unsigned int status, is_completed = 0, is_ready = 0;
822
	unsigned int ready, cmd_done;
823
	irqreturn_t ret = IRQ_HANDLED;
824 825 826 827 828 829 830 831

	if (info->cs == 0) {
		ready           = NDSR_FLASH_RDY;
		cmd_done        = NDSR_CS0_CMDD;
	} else {
		ready           = NDSR_RDY;
		cmd_done        = NDSR_CS1_CMDD;
	}
E
eric miao 已提交
832 833 834

	status = nand_readl(info, NDSR);

835 836 837 838
	if (status & NDSR_UNCORERR)
		info->retcode = ERR_UNCORERR;
	if (status & NDSR_CORERR) {
		info->retcode = ERR_CORERR;
839 840
		if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
		     info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
841 842 843 844 845 846 847 848 849 850 851 852 853 854
		    info->ecc_bch)
			info->ecc_err_cnt = NDSR_ERR_CNT(status);
		else
			info->ecc_err_cnt = 1;

		/*
		 * Each chunk composing a page is corrected independently,
		 * and we need to store maximum number of corrected bitflips
		 * to return it to the MTD layer in ecc.read_page().
		 */
		info->max_bitflips = max_t(unsigned int,
					   info->max_bitflips,
					   info->ecc_err_cnt);
	}
L
Lei Wen 已提交
855 856
	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
		/* whether use dma to transfer data */
E
eric miao 已提交
857
		if (info->use_dma) {
L
Lei Wen 已提交
858 859 860 861 862
			disable_int(info, NDCR_INT_MASK);
			info->state = (status & NDSR_RDDREQ) ?
				      STATE_DMA_READING : STATE_DMA_WRITING;
			start_data_dma(info);
			goto NORMAL_IRQ_EXIT;
E
eric miao 已提交
863
		} else {
L
Lei Wen 已提交
864 865
			info->state = (status & NDSR_RDDREQ) ?
				      STATE_PIO_READING : STATE_PIO_WRITING;
866 867
			ret = IRQ_WAKE_THREAD;
			goto NORMAL_IRQ_EXIT;
E
eric miao 已提交
868 869
		}
	}
870
	if (status & cmd_done) {
L
Lei Wen 已提交
871 872
		info->state = STATE_CMD_DONE;
		is_completed = 1;
E
eric miao 已提交
873
	}
874
	if (status & ready) {
L
Lei Wen 已提交
875
		info->state = STATE_READY;
876
		is_ready = 1;
877
	}
E
eric miao 已提交
878

879 880 881 882 883 884 885
	/*
	 * Clear all status bit before issuing the next command, which
	 * can and will alter the status bits and will deserve a new
	 * interrupt on its own. This lets the controller exit the IRQ
	 */
	nand_writel(info, NDSR, status);

L
Lei Wen 已提交
886 887 888
	if (status & NDSR_WRCMDREQ) {
		status &= ~NDSR_WRCMDREQ;
		info->state = STATE_CMD_HANDLE;
889 890 891 892 893 894 895 896 897

		/*
		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
		 * must be loaded by writing directly either 12 or 16
		 * bytes directly to NDCB0, four bytes at a time.
		 *
		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
		 * but each NDCBx register can be read.
		 */
L
Lei Wen 已提交
898 899 900
		nand_writel(info, NDCB0, info->ndcb0);
		nand_writel(info, NDCB0, info->ndcb1);
		nand_writel(info, NDCB0, info->ndcb2);
901 902

		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
903 904
		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
		    info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
905
			nand_writel(info, NDCB0, info->ndcb3);
E
eric miao 已提交
906 907
	}

L
Lei Wen 已提交
908 909
	if (is_completed)
		complete(&info->cmd_complete);
910 911
	if (is_ready)
		complete(&info->dev_ready);
L
Lei Wen 已提交
912
NORMAL_IRQ_EXIT:
913
	return ret;
E
eric miao 已提交
914 915 916 917 918 919 920 921 922 923
}

static inline int is_buf_blank(uint8_t *buf, size_t len)
{
	for (; len > 0; len--)
		if (*buf++ != 0xff)
			return 0;
	return 1;
}

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
static void set_command_address(struct pxa3xx_nand_info *info,
		unsigned int page_size, uint16_t column, int page_addr)
{
	/* small page addr setting */
	if (page_size < PAGE_CHUNK_SIZE) {
		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
				| (column & 0xFF);

		info->ndcb2 = 0;
	} else {
		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
				| (column & 0xFFFF);

		if (page_addr & 0xFF0000)
			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
		else
			info->ndcb2 = 0;
	}
}

944
static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
E
eric miao 已提交
945
{
946
	struct pxa3xx_nand_host *host = info->host[info->cs];
947
	struct mtd_info *mtd = nand_to_mtd(&host->chip);
948

949
	/* reset data and oob column point to handle data */
950 951
	info->buf_start		= 0;
	info->buf_count		= 0;
952 953
	info->data_buff_pos	= 0;
	info->oob_buff_pos	= 0;
954 955 956
	info->step_chunk_size   = 0;
	info->step_spare_size   = 0;
	info->cur_chunk         = 0;
957
	info->use_ecc		= 0;
958
	info->use_spare		= 1;
959
	info->retcode		= ERR_NONE;
960
	info->ecc_err_cnt	= 0;
961
	info->ndcb3		= 0;
962
	info->need_wait		= 0;
E
eric miao 已提交
963 964

	switch (command) {
965
	case NAND_CMD_READ0:
966
	case NAND_CMD_READOOB:
967 968
	case NAND_CMD_PAGEPROG:
		info->use_ecc = 1;
E
eric miao 已提交
969
		break;
970 971 972
	case NAND_CMD_PARAM:
		info->use_spare = 0;
		break;
973 974 975 976 977
	default:
		info->ndcb1 = 0;
		info->ndcb2 = 0;
		break;
	}
978 979 980 981 982 983 984 985 986 987 988 989 990

	/*
	 * If we are about to issue a read command, or about to set
	 * the write address, then clean the data buffer.
	 */
	if (command == NAND_CMD_READ0 ||
	    command == NAND_CMD_READOOB ||
	    command == NAND_CMD_SEQIN) {

		info->buf_count = mtd->writesize + mtd->oobsize;
		memset(info->data_buff, 0xFF, info->buf_count);
	}

991 992 993
}

static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
994
		int ext_cmd_type, uint16_t column, int page_addr)
995 996 997 998 999 1000
{
	int addr_cycle, exec_cmd;
	struct pxa3xx_nand_host *host;
	struct mtd_info *mtd;

	host = info->host[info->cs];
1001
	mtd = nand_to_mtd(&host->chip);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
	addr_cycle = 0;
	exec_cmd = 1;

	if (info->cs != 0)
		info->ndcb0 = NDCB0_CSEL;
	else
		info->ndcb0 = 0;

	if (command == NAND_CMD_SEQIN)
		exec_cmd = 0;
1012

1013 1014
	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
				    + host->col_addr_cycles);
E
eric miao 已提交
1015

1016 1017
	switch (command) {
	case NAND_CMD_READOOB:
E
eric miao 已提交
1018
	case NAND_CMD_READ0:
1019 1020 1021 1022 1023
		info->buf_start = column;
		info->ndcb0 |= NDCB0_CMD_TYPE(0)
				| addr_cycle
				| NAND_CMD_READ0;

1024
		if (command == NAND_CMD_READOOB)
1025
			info->buf_start += mtd->writesize;
1026

1027 1028 1029 1030 1031 1032 1033 1034
		if (info->cur_chunk < info->nfullchunks) {
			info->step_chunk_size = info->chunk_size;
			info->step_spare_size = info->spare_size;
		} else {
			info->step_chunk_size = info->last_chunk_size;
			info->step_spare_size = info->last_spare_size;
		}

1035 1036 1037 1038 1039 1040
		/*
		 * Multiple page read needs an 'extended command type' field,
		 * which is either naked-read or last-read according to the
		 * state.
		 */
		if (mtd->writesize == PAGE_CHUNK_SIZE) {
1041
			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
1042 1043 1044 1045
		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
					| NDCB0_LEN_OVRD
					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1046 1047
			info->ndcb3 = info->step_chunk_size +
				info->step_spare_size;
1048
		}
E
eric miao 已提交
1049

1050 1051 1052
		set_command_address(info, mtd->writesize, column, page_addr);
		break;

E
eric miao 已提交
1053
	case NAND_CMD_SEQIN:
1054

1055 1056
		info->buf_start = column;
		set_command_address(info, mtd->writesize, 0, page_addr);
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

		/*
		 * Multiple page programming needs to execute the initial
		 * SEQIN command that sets the page address.
		 */
		if (mtd->writesize > PAGE_CHUNK_SIZE) {
			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
				| addr_cycle
				| command;
			exec_cmd = 1;
		}
E
eric miao 已提交
1069
		break;
1070

E
eric miao 已提交
1071
	case NAND_CMD_PAGEPROG:
1072 1073 1074 1075 1076
		if (is_buf_blank(info->data_buff,
					(mtd->writesize + mtd->oobsize))) {
			exec_cmd = 0;
			break;
		}
E
eric miao 已提交
1077

1078 1079 1080 1081 1082 1083 1084 1085
		if (info->cur_chunk < info->nfullchunks) {
			info->step_chunk_size = info->chunk_size;
			info->step_spare_size = info->spare_size;
		} else {
			info->step_chunk_size = info->last_chunk_size;
			info->step_spare_size = info->last_spare_size;
		}

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
		/* Second command setting for large pages */
		if (mtd->writesize > PAGE_CHUNK_SIZE) {
			/*
			 * Multiple page write uses the 'extended command'
			 * field. This can be used to issue a command dispatch
			 * or a naked-write depending on the current stage.
			 */
			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
					| NDCB0_LEN_OVRD
					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1096 1097
			info->ndcb3 = info->step_chunk_size +
				      info->step_spare_size;
1098 1099 1100 1101 1102

			/*
			 * This is the command dispatch that completes a chunked
			 * page program operation.
			 */
1103
			if (info->cur_chunk == info->ntotalchunks) {
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
					| command;
				info->ndcb1 = 0;
				info->ndcb2 = 0;
				info->ndcb3 = 0;
			}
		} else {
			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
					| NDCB0_AUTO_RS
					| NDCB0_ST_ROW_EN
					| NDCB0_DBC
					| (NAND_CMD_PAGEPROG << 8)
					| NAND_CMD_SEQIN
					| addr_cycle;
		}
E
eric miao 已提交
1120
		break;
1121

1122
	case NAND_CMD_PARAM:
1123
		info->buf_count = INIT_BUFFER_SIZE;
1124 1125
		info->ndcb0 |= NDCB0_CMD_TYPE(0)
				| NDCB0_ADDR_CYC(1)
1126
				| NDCB0_LEN_OVRD
1127
				| command;
1128
		info->ndcb1 = (column & 0xFF);
1129
		info->ndcb3 = INIT_BUFFER_SIZE;
1130
		info->step_chunk_size = INIT_BUFFER_SIZE;
1131 1132
		break;

E
eric miao 已提交
1133
	case NAND_CMD_READID:
1134
		info->buf_count = READ_ID_BYTES;
1135 1136
		info->ndcb0 |= NDCB0_CMD_TYPE(3)
				| NDCB0_ADDR_CYC(1)
1137
				| command;
1138
		info->ndcb1 = (column & 0xFF);
1139

1140
		info->step_chunk_size = 8;
1141
		break;
E
eric miao 已提交
1142
	case NAND_CMD_STATUS:
1143 1144 1145
		info->buf_count = 1;
		info->ndcb0 |= NDCB0_CMD_TYPE(4)
				| NDCB0_ADDR_CYC(1)
1146
				| command;
1147

1148
		info->step_chunk_size = 8;
1149 1150 1151 1152 1153 1154 1155
		break;

	case NAND_CMD_ERASE1:
		info->ndcb0 |= NDCB0_CMD_TYPE(2)
				| NDCB0_AUTO_RS
				| NDCB0_ADDR_CYC(3)
				| NDCB0_DBC
1156 1157
				| (NAND_CMD_ERASE2 << 8)
				| NAND_CMD_ERASE1;
1158 1159 1160
		info->ndcb1 = page_addr;
		info->ndcb2 = 0;

E
eric miao 已提交
1161 1162
		break;
	case NAND_CMD_RESET:
1163
		info->ndcb0 |= NDCB0_CMD_TYPE(5)
1164
				| command;
1165 1166 1167 1168 1169

		break;

	case NAND_CMD_ERASE2:
		exec_cmd = 0;
E
eric miao 已提交
1170
		break;
1171

E
eric miao 已提交
1172
	default:
1173
		exec_cmd = 0;
1174 1175
		dev_err(&info->pdev->dev, "non-supported command %x\n",
				command);
E
eric miao 已提交
1176 1177 1178
		break;
	}

1179 1180 1181
	return exec_cmd;
}

1182 1183
static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
			 int column, int page_addr)
1184
{
1185
	struct nand_chip *chip = mtd_to_nand(mtd);
1186
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1187
	struct pxa3xx_nand_info *info = host->info_data;
1188
	int exec_cmd;
1189 1190 1191 1192 1193 1194

	/*
	 * if this is a x16 device ,then convert the input
	 * "byte" address into a "word" address appropriate
	 * for indexing a word-oriented device
	 */
1195
	if (info->reg_ndcr & NDCR_DWIDTH_M)
1196 1197
		column /= 2;

1198 1199 1200 1201 1202 1203 1204
	/*
	 * There may be different NAND chip hooked to
	 * different chip select, so check whether
	 * chip select has been changed, if yes, reset the timing
	 */
	if (info->cs != host->cs) {
		info->cs = host->cs;
1205 1206
		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1207 1208
	}

1209 1210
	prepare_start_command(info, command);

1211
	info->state = STATE_PREPARED;
1212 1213
	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);

L
Lei Wen 已提交
1214 1215
	if (exec_cmd) {
		init_completion(&info->cmd_complete);
1216 1217
		init_completion(&info->dev_ready);
		info->need_wait = 1;
L
Lei Wen 已提交
1218 1219
		pxa3xx_nand_start(info);

1220 1221
		if (!wait_for_completion_timeout(&info->cmd_complete,
		    CHIP_DELAY_TIMEOUT)) {
1222
			dev_err(&info->pdev->dev, "Wait time out!!!\n");
L
Lei Wen 已提交
1223 1224 1225 1226
			/* Stop State Machine for next command cycle */
			pxa3xx_nand_stop(info);
		}
	}
1227
	info->state = STATE_IDLE;
L
Lei Wen 已提交
1228 1229
}

1230 1231 1232
static void nand_cmdfunc_extended(struct mtd_info *mtd,
				  const unsigned command,
				  int column, int page_addr)
1233
{
1234
	struct nand_chip *chip = mtd_to_nand(mtd);
1235
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1236
	struct pxa3xx_nand_info *info = host->info_data;
1237
	int exec_cmd, ext_cmd_type;
1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263

	/*
	 * if this is a x16 device then convert the input
	 * "byte" address into a "word" address appropriate
	 * for indexing a word-oriented device
	 */
	if (info->reg_ndcr & NDCR_DWIDTH_M)
		column /= 2;

	/*
	 * There may be different NAND chip hooked to
	 * different chip select, so check whether
	 * chip select has been changed, if yes, reset the timing
	 */
	if (info->cs != host->cs) {
		info->cs = host->cs;
		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
	}

	/* Select the extended command for the first command */
	switch (command) {
	case NAND_CMD_READ0:
	case NAND_CMD_READOOB:
		ext_cmd_type = EXT_CMD_TYPE_MONO;
		break;
1264 1265 1266 1267 1268 1269
	case NAND_CMD_SEQIN:
		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
		break;
	case NAND_CMD_PAGEPROG:
		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
		break;
1270 1271
	default:
		ext_cmd_type = 0;
1272
		break;
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
	}

	prepare_start_command(info, command);

	/*
	 * Prepare the "is ready" completion before starting a command
	 * transaction sequence. If the command is not executed the
	 * completion will be completed, see below.
	 *
	 * We can do that inside the loop because the command variable
	 * is invariant and thus so is the exec_cmd.
	 */
	info->need_wait = 1;
	init_completion(&info->dev_ready);
	do {
		info->state = STATE_PREPARED;
1289

1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
					       column, page_addr);
		if (!exec_cmd) {
			info->need_wait = 0;
			complete(&info->dev_ready);
			break;
		}

		init_completion(&info->cmd_complete);
		pxa3xx_nand_start(info);

1301 1302
		if (!wait_for_completion_timeout(&info->cmd_complete,
		    CHIP_DELAY_TIMEOUT)) {
1303 1304 1305 1306 1307 1308
			dev_err(&info->pdev->dev, "Wait time out!!!\n");
			/* Stop State Machine for next command cycle */
			pxa3xx_nand_stop(info);
			break;
		}

1309 1310 1311 1312 1313 1314 1315 1316
		/* Only a few commands need several steps */
		if (command != NAND_CMD_PAGEPROG &&
		    command != NAND_CMD_READ0    &&
		    command != NAND_CMD_READOOB)
			break;

		info->cur_chunk++;

1317
		/* Check if the sequence is complete */
1318
		if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
1319 1320 1321 1322 1323 1324
			break;

		/*
		 * After a splitted program command sequence has issued
		 * the command dispatch, the command sequence is complete.
		 */
1325
		if (info->cur_chunk == (info->ntotalchunks + 1) &&
1326 1327
		    command == NAND_CMD_PAGEPROG &&
		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1328 1329 1330 1331
			break;

		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
			/* Last read: issue a 'last naked read' */
1332
			if (info->cur_chunk == info->ntotalchunks - 1)
1333 1334 1335
				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
			else
				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1336 1337 1338 1339 1340 1341

		/*
		 * If a splitted program command has no more data to transfer,
		 * the command dispatch must be issued to complete.
		 */
		} else if (command == NAND_CMD_PAGEPROG &&
1342
			   info->cur_chunk == info->ntotalchunks) {
1343
				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1344 1345 1346 1347 1348 1349
		}
	} while (1);

	info->state = STATE_IDLE;
}

1350
static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1351 1352
		struct nand_chip *chip, const uint8_t *buf, int oob_required,
		int page)
L
Lei Wen 已提交
1353 1354 1355
{
	chip->write_buf(mtd, buf, mtd->writesize);
	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1356 1357

	return 0;
L
Lei Wen 已提交
1358 1359 1360
}

static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1361 1362
		struct nand_chip *chip, uint8_t *buf, int oob_required,
		int page)
L
Lei Wen 已提交
1363
{
1364
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1365
	struct pxa3xx_nand_info *info = host->info_data;
L
Lei Wen 已提交
1366 1367 1368 1369

	chip->read_buf(mtd, buf, mtd->writesize);
	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);

1370 1371 1372 1373
	if (info->retcode == ERR_CORERR && info->use_ecc) {
		mtd->ecc_stats.corrected += info->ecc_err_cnt;

	} else if (info->retcode == ERR_UNCORERR) {
L
Lei Wen 已提交
1374 1375 1376
		/*
		 * for blank page (all 0xff), HW will calculate its ECC as
		 * 0, which is different from the ECC information within
1377
		 * OOB, ignore such uncorrectable errors
L
Lei Wen 已提交
1378 1379
		 */
		if (is_buf_blank(buf, mtd->writesize))
1380 1381
			info->retcode = ERR_NONE;
		else
L
Lei Wen 已提交
1382
			mtd->ecc_stats.failed++;
E
eric miao 已提交
1383
	}
L
Lei Wen 已提交
1384

1385
	return info->max_bitflips;
E
eric miao 已提交
1386 1387 1388 1389
}

static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
{
1390
	struct nand_chip *chip = mtd_to_nand(mtd);
1391
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1392
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
	char retval = 0xFF;

	if (info->buf_start < info->buf_count)
		/* Has just send a new command? */
		retval = info->data_buff[info->buf_start++];

	return retval;
}

static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
{
1404
	struct nand_chip *chip = mtd_to_nand(mtd);
1405
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1406
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
	u16 retval = 0xFFFF;

	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
		retval = *((u16 *)(info->data_buff+info->buf_start));
		info->buf_start += 2;
	}
	return retval;
}

static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
1418
	struct nand_chip *chip = mtd_to_nand(mtd);
1419
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1420
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1421 1422 1423 1424 1425 1426 1427 1428 1429
	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);

	memcpy(buf, info->data_buff + info->buf_start, real_len);
	info->buf_start += real_len;
}

static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
		const uint8_t *buf, int len)
{
1430
	struct nand_chip *chip = mtd_to_nand(mtd);
1431
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1432
	struct pxa3xx_nand_info *info = host->info_data;
E
eric miao 已提交
1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);

	memcpy(info->data_buff + info->buf_start, buf, real_len);
	info->buf_start += real_len;
}

static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
{
	return;
}

static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
{
1446
	struct nand_chip *chip = mtd_to_nand(mtd);
1447
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1448
	struct pxa3xx_nand_info *info = host->info_data;
1449 1450 1451

	if (info->need_wait) {
		info->need_wait = 0;
1452 1453
		if (!wait_for_completion_timeout(&info->dev_ready,
		    CHIP_DELAY_TIMEOUT)) {
1454 1455 1456 1457
			dev_err(&info->pdev->dev, "Ready time out!!!\n");
			return NAND_STATUS_FAIL;
		}
	}
E
eric miao 已提交
1458 1459 1460 1461 1462

	/* pxa3xx_nand_send_command has waited for command complete */
	if (this->state == FL_WRITING || this->state == FL_ERASING) {
		if (info->retcode == ERR_NONE)
			return 0;
1463 1464
		else
			return NAND_STATUS_FAIL;
E
eric miao 已提交
1465 1466
	}

1467
	return NAND_STATUS_READY;
E
eric miao 已提交
1468 1469
}

1470
static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
E
eric miao 已提交
1471
{
1472
	struct pxa3xx_nand_host *host = info->host[info->cs];
E
eric miao 已提交
1473
	struct platform_device *pdev = info->pdev;
J
Jingoo Han 已提交
1474
	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1475
	const struct nand_sdr_timings *timings;
E
eric miao 已提交
1476

1477 1478
	/* Configure default flash values */
	info->chunk_size = PAGE_CHUNK_SIZE;
1479 1480 1481
	info->reg_ndcr = 0x0; /* enable all interrupts */
	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
	info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1482 1483
	info->reg_ndcr |= NDCR_SPARE_EN;

1484 1485 1486 1487 1488 1489
	/* use the common timing to make a try */
	timings = onfi_async_timing_mode_to_sdr_timings(0);
	if (IS_ERR(timings))
		return PTR_ERR(timings);

	pxa3xx_nand_set_sdr_timing(host, timings);
1490 1491 1492 1493 1494 1495
	return 0;
}

static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
{
	struct pxa3xx_nand_host *host = info->host[info->cs];
1496 1497
	struct nand_chip *chip = &host->chip;
	struct mtd_info *mtd = nand_to_mtd(chip);
1498

1499 1500 1501
	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
E
eric miao 已提交
1502 1503
}

1504
static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1505
{
1506 1507
	struct platform_device *pdev = info->pdev;
	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1508 1509
	uint32_t ndcr = nand_readl(info, NDCR);

1510
	/* Set an initial chunk size */
1511
	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1512 1513
	info->reg_ndcr = ndcr &
		~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1514
	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1515 1516
	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1517 1518
}

E
eric miao 已提交
1519 1520 1521
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
	struct platform_device *pdev = info->pdev;
1522 1523 1524 1525
	struct dma_slave_config	config;
	dma_cap_mask_t mask;
	struct pxad_param param;
	int ret;
E
eric miao 已提交
1526

1527 1528 1529 1530
	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
	if (info->data_buff == NULL)
		return -ENOMEM;
	if (use_dma == 0)
E
eric miao 已提交
1531 1532
		return 0;

1533 1534 1535
	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;
E
eric miao 已提交
1536

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
	sg_init_one(&info->sg, info->data_buff, info->buf_size);
	dma_cap_zero(mask);
	dma_cap_set(DMA_SLAVE, mask);
	param.prio = PXAD_PRIO_LOWEST;
	param.drcmr = info->drcmr_dat;
	info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
							  &param, &pdev->dev,
							  "data");
	if (!info->dma_chan) {
		dev_err(&pdev->dev, "unable to request data dma channel\n");
		return -ENODEV;
	}
E
eric miao 已提交
1549

1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	memset(&config, 0, sizeof(config));
	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	config.src_addr = info->mmio_phys + NDDB;
	config.dst_addr = info->mmio_phys + NDDB;
	config.src_maxburst = 32;
	config.dst_maxburst = 32;
	ret = dmaengine_slave_config(info->dma_chan, &config);
	if (ret < 0) {
		dev_err(&info->pdev->dev,
			"dma channel configuration failed: %d\n",
			ret);
		return ret;
E
eric miao 已提交
1563 1564
	}

1565 1566 1567 1568 1569
	/*
	 * Now that DMA buffers are allocated we turn on
	 * DMA proper for I/O operations.
	 */
	info->use_dma = 1;
E
eric miao 已提交
1570 1571 1572
	return 0;
}

1573 1574
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{
1575
	if (info->use_dma) {
1576 1577
		dmaengine_terminate_all(info->dma_chan);
		dma_release_channel(info->dma_chan);
1578
	}
1579 1580
	kfree(info->data_buff);
}
1581

1582
static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1583
			struct mtd_info *mtd,
1584
			int strength, int ecc_stepsize, int page_size)
1585
{
1586 1587 1588
	struct nand_chip *chip = mtd_to_nand(mtd);
	struct nand_ecc_ctrl *ecc = &chip->ecc;

1589
	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1590 1591
		info->nfullchunks = 1;
		info->ntotalchunks = 1;
1592
		info->chunk_size = 2048;
1593 1594 1595 1596 1597 1598
		info->spare_size = 40;
		info->ecc_size = 24;
		ecc->mode = NAND_ECC_HW;
		ecc->size = 512;
		ecc->strength = 1;

1599
	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1600 1601
		info->nfullchunks = 1;
		info->ntotalchunks = 1;
1602
		info->chunk_size = 512;
1603 1604 1605 1606 1607 1608
		info->spare_size = 8;
		info->ecc_size = 8;
		ecc->mode = NAND_ECC_HW;
		ecc->size = 512;
		ecc->strength = 1;

1609 1610 1611 1612
	/*
	 * Required ECC: 4-bit correction per 512 bytes
	 * Select: 16-bit correction per 2048 bytes
	 */
1613 1614
	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
		info->ecc_bch = 1;
1615 1616
		info->nfullchunks = 1;
		info->ntotalchunks = 1;
1617 1618 1619 1620 1621
		info->chunk_size = 2048;
		info->spare_size = 32;
		info->ecc_size = 32;
		ecc->mode = NAND_ECC_HW;
		ecc->size = info->chunk_size;
1622
		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1623 1624
		ecc->strength = 16;

1625
	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1626
		info->ecc_bch = 1;
1627 1628
		info->nfullchunks = 2;
		info->ntotalchunks = 2;
1629 1630 1631 1632 1633
		info->chunk_size = 2048;
		info->spare_size = 32;
		info->ecc_size = 32;
		ecc->mode = NAND_ECC_HW;
		ecc->size = info->chunk_size;
1634
		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1635 1636
		ecc->strength = 16;

1637 1638 1639 1640 1641
	/*
	 * Required ECC: 8-bit correction per 512 bytes
	 * Select: 16-bit correction per 1024 bytes
	 */
	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1642
		info->ecc_bch = 1;
1643 1644
		info->nfullchunks = 4;
		info->ntotalchunks = 5;
1645 1646
		info->chunk_size = 1024;
		info->spare_size = 0;
1647 1648
		info->last_chunk_size = 0;
		info->last_spare_size = 64;
1649 1650 1651
		info->ecc_size = 32;
		ecc->mode = NAND_ECC_HW;
		ecc->size = info->chunk_size;
1652
		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
1653
		ecc->strength = 16;
1654 1655 1656 1657 1658
	} else {
		dev_err(&info->pdev->dev,
			"ECC strength %d at page size %d is not supported\n",
			strength, page_size);
		return -ENODEV;
1659
	}
1660 1661 1662

	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
		 ecc->strength, ecc->size);
1663 1664 1665
	return 0;
}

1666
static int pxa3xx_nand_scan(struct mtd_info *mtd)
E
eric miao 已提交
1667
{
1668
	struct nand_chip *chip = mtd_to_nand(mtd);
1669
	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1670
	struct pxa3xx_nand_info *info = host->info_data;
1671
	struct platform_device *pdev = info->pdev;
J
Jingoo Han 已提交
1672
	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1673
	int ret;
1674
	uint16_t ecc_strength, ecc_step;
1675

1676 1677 1678 1679 1680 1681
	if (pdata->keep_config) {
		pxa3xx_nand_detect_config(info);
	} else {
		ret = pxa3xx_nand_config_ident(info);
		if (ret)
			return ret;
1682 1683
	}

1684
	if (info->reg_ndcr & NDCR_DWIDTH_M)
1685 1686
		chip->options |= NAND_BUSWIDTH_16;

1687
	/* Device detection must be done with ECC disabled */
1688 1689
	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
	    info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
1690 1691
		nand_writel(info, NDECCCTRL, 0x0);

1692 1693 1694 1695 1696 1697
	if (pdata->flash_bbt)
		chip->bbt_options |= NAND_BBT_USE_FLASH;

	chip->ecc.strength = pdata->ecc_strength;
	chip->ecc.size = pdata->ecc_step_size;

1698 1699 1700
	ret = nand_scan_ident(mtd, 1, NULL);
	if (ret)
		return ret;
1701

1702 1703 1704 1705 1706 1707 1708 1709 1710
	if (!pdata->keep_config) {
		ret = pxa3xx_nand_init(host);
		if (ret) {
			dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
				ret);
			return ret;
		}
	}

1711
	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
1712 1713 1714 1715
		/*
		 * We'll use a bad block table stored in-flash and don't
		 * allow writing the bad block marker to the flash.
		 */
1716
		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
1717 1718 1719 1720
		chip->bbt_td = &bbt_main_descr;
		chip->bbt_md = &bbt_mirror_descr;
	}

1721 1722 1723 1724 1725 1726
	/*
	 * If the page size is bigger than the FIFO size, let's check
	 * we are given the right variant and then switch to the extended
	 * (aka splitted) command handling,
	 */
	if (mtd->writesize > PAGE_CHUNK_SIZE) {
1727 1728
		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
		    info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1729 1730 1731 1732 1733 1734 1735 1736
			chip->cmdfunc = nand_cmdfunc_extended;
		} else {
			dev_err(&info->pdev->dev,
				"unsupported page size on this variant\n");
			return -ENODEV;
		}
	}

1737 1738 1739
	ecc_strength = chip->ecc.strength;
	ecc_step = chip->ecc.size;
	if (!ecc_strength || !ecc_step) {
1740 1741 1742
		ecc_strength = chip->ecc_strength_ds;
		ecc_step = chip->ecc_step_ds;
	}
1743 1744 1745 1746 1747 1748 1749

	/* Set default ECC strength requirements on non-ONFI devices */
	if (ecc_strength < 1 && ecc_step < 1) {
		ecc_strength = 1;
		ecc_step = 512;
	}

1750
	ret = pxa_ecc_init(info, mtd, ecc_strength,
1751
			   ecc_step, mtd->writesize);
1752 1753
	if (ret)
		return ret;
1754

1755
	/* calculate addressing information */
1756 1757 1758 1759 1760
	if (mtd->writesize >= 2048)
		host->col_addr_cycles = 2;
	else
		host->col_addr_cycles = 1;

1761 1762 1763 1764 1765 1766 1767 1768
	/* release the initial buffer */
	kfree(info->data_buff);

	/* allocate the real data + oob buffer */
	info->buf_size = mtd->writesize + mtd->oobsize;
	ret = pxa3xx_nand_init_buff(info);
	if (ret)
		return ret;
1769
	info->oob_buff = info->data_buff + mtd->writesize;
1770

1771
	if ((mtd->size >> chip->page_shift) > 65536)
1772
		host->row_addr_cycles = 3;
1773
	else
1774
		host->row_addr_cycles = 2;
1775 1776 1777 1778

	if (!pdata->keep_config)
		pxa3xx_nand_config_tail(info);

1779
	return nand_scan_tail(mtd);
E
eric miao 已提交
1780 1781
}

1782
static int alloc_nand_resource(struct platform_device *pdev)
E
eric miao 已提交
1783
{
1784
	struct device_node *np = pdev->dev.of_node;
1785
	struct pxa3xx_nand_platform_data *pdata;
E
eric miao 已提交
1786
	struct pxa3xx_nand_info *info;
1787
	struct pxa3xx_nand_host *host;
1788
	struct nand_chip *chip = NULL;
E
eric miao 已提交
1789 1790
	struct mtd_info *mtd;
	struct resource *r;
1791
	int ret, irq, cs;
E
eric miao 已提交
1792

J
Jingoo Han 已提交
1793
	pdata = dev_get_platdata(&pdev->dev);
1794 1795
	if (pdata->num_cs <= 0) {
		dev_err(&pdev->dev, "invalid number of chip selects\n");
1796
		return -ENODEV;
1797 1798
	}

1799 1800 1801
	info = devm_kzalloc(&pdev->dev,
			    sizeof(*info) + sizeof(*host) * pdata->num_cs,
			    GFP_KERNEL);
1802
	if (!info)
1803
		return -ENOMEM;
E
eric miao 已提交
1804 1805

	info->pdev = pdev;
1806
	info->variant = pxa3xx_nand_get_variant(pdev);
1807
	for (cs = 0; cs < pdata->num_cs; cs++) {
1808 1809
		host = (void *)&info[1] + sizeof(*host) * cs;
		chip = &host->chip;
1810
		nand_set_controller_data(chip, host);
1811
		mtd = nand_to_mtd(chip);
1812 1813 1814
		info->host[cs] = host;
		host->cs = cs;
		host->info_data = info;
1815
		mtd->dev.parent = &pdev->dev;
1816 1817
		/* FIXME: all chips use the same device tree partitions */
		nand_set_flash_node(chip, np);
1818

1819
		nand_set_controller_data(chip, host);
1820 1821 1822 1823 1824 1825 1826 1827 1828
		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
		chip->controller        = &info->controller;
		chip->waitfunc		= pxa3xx_nand_waitfunc;
		chip->select_chip	= pxa3xx_nand_select_chip;
		chip->read_word		= pxa3xx_nand_read_word;
		chip->read_byte		= pxa3xx_nand_read_byte;
		chip->read_buf		= pxa3xx_nand_read_buf;
		chip->write_buf		= pxa3xx_nand_write_buf;
1829
		chip->options		|= NAND_NO_SUBPAGE_WRITE;
1830
		chip->cmdfunc		= nand_cmdfunc;
1831 1832
		chip->onfi_set_features	= nand_onfi_get_set_features_notsupp;
		chip->onfi_get_features	= nand_onfi_get_set_features_notsupp;
1833
	}
1834

1835
	nand_hw_control_init(chip->controller);
1836
	info->clk = devm_clk_get(&pdev->dev, NULL);
E
eric miao 已提交
1837
	if (IS_ERR(info->clk)) {
1838 1839 1840
		ret = PTR_ERR(info->clk);
		dev_err(&pdev->dev, "failed to get nand clock: %d\n", ret);
		return ret;
E
eric miao 已提交
1841
	}
1842 1843 1844
	ret = clk_prepare_enable(info->clk);
	if (ret < 0)
		return ret;
E
eric miao 已提交
1845

1846
	if (!np && use_dma) {
1847 1848 1849 1850 1851 1852
		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
		if (r == NULL) {
			dev_err(&pdev->dev,
				"no resource defined for data DMA\n");
			ret = -ENXIO;
			goto fail_disable_clk;
1853
		}
1854
		info->drcmr_dat = r->start;
E
eric miao 已提交
1855 1856 1857 1858 1859 1860
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no IRQ resource defined\n");
		ret = -ENXIO;
1861
		goto fail_disable_clk;
E
eric miao 已提交
1862 1863 1864
	}

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1865 1866 1867
	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
	if (IS_ERR(info->mmio_base)) {
		ret = PTR_ERR(info->mmio_base);
1868
		dev_err(&pdev->dev, "failed to map register space: %d\n", ret);
1869
		goto fail_disable_clk;
E
eric miao 已提交
1870
	}
1871
	info->mmio_phys = r->start;
E
eric miao 已提交
1872

1873 1874 1875 1876 1877
	/* Allocate a buffer to allow flash detection */
	info->buf_size = INIT_BUFFER_SIZE;
	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
	if (info->data_buff == NULL) {
		ret = -ENOMEM;
1878
		goto fail_disable_clk;
1879
	}
E
eric miao 已提交
1880

1881 1882 1883
	/* initialize all interrupts to be disabled */
	disable_int(info, NDSR_MASK);

1884 1885 1886
	ret = request_threaded_irq(irq, pxa3xx_nand_irq,
				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
				   pdev->name, info);
E
eric miao 已提交
1887
	if (ret < 0) {
1888
		dev_err(&pdev->dev, "failed to request IRQ: %d\n", ret);
E
eric miao 已提交
1889 1890 1891
		goto fail_free_buf;
	}

1892
	platform_set_drvdata(pdev, info);
E
eric miao 已提交
1893

1894
	return 0;
E
eric miao 已提交
1895 1896

fail_free_buf:
1897
	free_irq(irq, info);
1898
	kfree(info->data_buff);
1899
fail_disable_clk:
1900
	clk_disable_unprepare(info->clk);
1901
	return ret;
E
eric miao 已提交
1902 1903 1904 1905
}

static int pxa3xx_nand_remove(struct platform_device *pdev)
{
1906
	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1907 1908
	struct pxa3xx_nand_platform_data *pdata;
	int irq, cs;
E
eric miao 已提交
1909

1910 1911 1912
	if (!info)
		return 0;

J
Jingoo Han 已提交
1913
	pdata = dev_get_platdata(&pdev->dev);
E
eric miao 已提交
1914

1915 1916 1917
	irq = platform_get_irq(pdev, 0);
	if (irq >= 0)
		free_irq(irq, info);
1918
	pxa3xx_nand_free_buff(info);
1919

1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
	/*
	 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
	 * In order to prevent a lockup of the system bus, the DFI bus
	 * arbitration is granted to SMC upon driver removal. This is done by
	 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
	 * access to the bus anymore.
	 */
	nand_writel(info, NDCR,
		    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
		    NFCV1_NDCR_ARB_CNTL);
1930
	clk_disable_unprepare(info->clk);
1931

1932
	for (cs = 0; cs < pdata->num_cs; cs++)
1933
		nand_release(nand_to_mtd(&info->host[cs]->chip));
E
eric miao 已提交
1934 1935 1936
	return 0;
}

1937 1938 1939 1940 1941 1942 1943 1944 1945 1946
static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
{
	struct pxa3xx_nand_platform_data *pdata;
	struct device_node *np = pdev->dev.of_node;
	const struct of_device_id *of_id =
			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);

	if (!of_id)
		return 0;

1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964
	/*
	 * Some SoCs like A7k/A8k need to enable manually the NAND
	 * controller to avoid being bootloader dependent. This is done
	 * through the use of a single bit in the System Functions registers.
	 */
	if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
		struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
			pdev->dev.of_node, "marvell,system-controller");
		u32 reg;

		if (IS_ERR(sysctrl_base))
			return PTR_ERR(sysctrl_base);

		regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
		reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
		regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
	}

1965 1966 1967 1968 1969 1970 1971 1972 1973
	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata)
		return -ENOMEM;

	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
		pdata->enable_arbiter = 1;
	if (of_get_property(np, "marvell,nand-keep-config", NULL))
		pdata->keep_config = 1;
	of_property_read_u32(np, "num-cs", &pdata->num_cs);
1974

1975 1976 1977 1978 1979
	pdev->dev.platform_data = pdata;

	return 0;
}

1980 1981 1982 1983
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
	struct pxa3xx_nand_platform_data *pdata;
	struct pxa3xx_nand_info *info;
1984
	int ret, cs, probe_success, dma_available;
1985

1986 1987 1988
	dma_available = IS_ENABLED(CONFIG_ARM) &&
		(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
	if (use_dma && !dma_available) {
1989 1990 1991 1992
		use_dma = 0;
		dev_warn(&pdev->dev,
			 "This platform can't do DMA on this device\n");
	}
1993

1994 1995 1996 1997
	ret = pxa3xx_nand_probe_dt(pdev);
	if (ret)
		return ret;

J
Jingoo Han 已提交
1998
	pdata = dev_get_platdata(&pdev->dev);
1999 2000 2001 2002 2003
	if (!pdata) {
		dev_err(&pdev->dev, "no platform data defined\n");
		return -ENODEV;
	}

2004
	ret = alloc_nand_resource(pdev);
2005
	if (ret)
2006
		return ret;
2007

2008
	info = platform_get_drvdata(pdev);
2009 2010
	probe_success = 0;
	for (cs = 0; cs < pdata->num_cs; cs++) {
2011
		struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
2012

2013 2014 2015 2016 2017 2018
		/*
		 * The mtd name matches the one used in 'mtdparts' kernel
		 * parameter. This name cannot be changed or otherwise
		 * user's mtd partitions configuration would get broken.
		 */
		mtd->name = "pxa3xx_nand-0";
2019
		info->cs = cs;
2020
		ret = pxa3xx_nand_scan(mtd);
2021 2022 2023 2024 2025 2026
		if (ret) {
			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
				cs);
			continue;
		}

2027 2028
		ret = mtd_device_register(mtd, pdata->parts[cs],
					  pdata->nr_parts[cs]);
2029 2030 2031 2032 2033
		if (!ret)
			probe_success = 1;
	}

	if (!probe_success) {
2034 2035 2036 2037
		pxa3xx_nand_remove(pdev);
		return -ENODEV;
	}

2038
	return 0;
2039 2040
}

E
eric miao 已提交
2041
#ifdef CONFIG_PM
2042
static int pxa3xx_nand_suspend(struct device *dev)
E
eric miao 已提交
2043
{
2044
	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
E
eric miao 已提交
2045

L
Lei Wen 已提交
2046
	if (info->state) {
2047
		dev_err(dev, "driver busy, state = %d\n", info->state);
E
eric miao 已提交
2048 2049 2050
		return -EAGAIN;
	}

2051
	clk_disable(info->clk);
E
eric miao 已提交
2052 2053 2054
	return 0;
}

2055
static int pxa3xx_nand_resume(struct device *dev)
E
eric miao 已提交
2056
{
2057
	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
2058 2059 2060 2061 2062
	int ret;

	ret = clk_enable(info->clk);
	if (ret < 0)
		return ret;
2063 2064 2065

	/* We don't want to handle interrupt without calling mtd routine */
	disable_int(info, NDCR_INT_MASK);
E
eric miao 已提交
2066

2067 2068 2069 2070 2071 2072
	/*
	 * Directly set the chip select to a invalid value,
	 * then the driver would reset the timing according
	 * to current chip select at the beginning of cmdfunc
	 */
	info->cs = 0xff;
E
eric miao 已提交
2073

2074 2075 2076 2077 2078 2079 2080
	/*
	 * As the spec says, the NDSR would be updated to 0x1800 when
	 * doing the nand_clk disable/enable.
	 * To prevent it damaging state machine of the driver, clear
	 * all status before resume
	 */
	nand_writel(info, NDSR, NDSR_MASK);
2081

2082
	return 0;
E
eric miao 已提交
2083 2084 2085 2086 2087 2088
}
#else
#define pxa3xx_nand_suspend	NULL
#define pxa3xx_nand_resume	NULL
#endif

2089 2090 2091 2092 2093
static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
	.suspend	= pxa3xx_nand_suspend,
	.resume		= pxa3xx_nand_resume,
};

E
eric miao 已提交
2094 2095 2096
static struct platform_driver pxa3xx_nand_driver = {
	.driver = {
		.name	= "pxa3xx-nand",
2097
		.of_match_table = pxa3xx_nand_dt_ids,
2098
		.pm	= &pxa3xx_nand_pm_ops,
E
eric miao 已提交
2099 2100 2101 2102 2103
	},
	.probe		= pxa3xx_nand_probe,
	.remove		= pxa3xx_nand_remove,
};

2104
module_platform_driver(pxa3xx_nand_driver);
E
eric miao 已提交
2105 2106 2107

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");