cfi.h 11.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3

/* Common Flash Interface structures 
 * See http://support.intel.com/design/flash/technote/index.htm
T
Todd Poynor 已提交
4
 * $Id: cfi.h,v 1.54 2005/06/06 23:04:36 tpoynor Exp $
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
 */

#ifndef __MTD_CFI_H__
#define __MTD_CFI_H__

#include <linux/config.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/map.h>
#include <linux/mtd/cfi_endian.h>

#ifdef CONFIG_MTD_CFI_I1
#define cfi_interleave(cfi) 1
#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
#else
#define cfi_interleave_is_1(cfi) (0)
#endif

#ifdef CONFIG_MTD_CFI_I2
# ifdef cfi_interleave
#  undef cfi_interleave
#  define cfi_interleave(cfi) ((cfi)->interleave)
# else
#  define cfi_interleave(cfi) 2
# endif
#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
#else
#define cfi_interleave_is_2(cfi) (0)
#endif

#ifdef CONFIG_MTD_CFI_I4
# ifdef cfi_interleave
#  undef cfi_interleave
#  define cfi_interleave(cfi) ((cfi)->interleave)
# else
#  define cfi_interleave(cfi) 4
# endif
#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
#else
#define cfi_interleave_is_4(cfi) (0)
#endif

#ifdef CONFIG_MTD_CFI_I8
# ifdef cfi_interleave
#  undef cfi_interleave
#  define cfi_interleave(cfi) ((cfi)->interleave)
# else
#  define cfi_interleave(cfi) 8
# endif
#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
#else
#define cfi_interleave_is_8(cfi) (0)
#endif

static inline int cfi_interleave_supported(int i)
{
	switch (i) {
#ifdef CONFIG_MTD_CFI_I1
	case 1:
#endif
#ifdef CONFIG_MTD_CFI_I2
	case 2:
#endif
#ifdef CONFIG_MTD_CFI_I4
	case 4:
#endif
#ifdef CONFIG_MTD_CFI_I8
	case 8:
#endif
		return 1;

	default:
		return 0;
	}
}


/* NB: these values must represents the number of bytes needed to meet the 
 *     device type (x8, x16, x32).  Eg. a 32 bit device is 4 x 8 bytes. 
 *     These numbers are used in calculations.
 */
#define CFI_DEVICETYPE_X8  (8 / 8)
#define CFI_DEVICETYPE_X16 (16 / 8)
#define CFI_DEVICETYPE_X32 (32 / 8)
#define CFI_DEVICETYPE_X64 (64 / 8)

/* NB: We keep these structures in memory in HOST byteorder, except
 * where individually noted.
 */

/* Basic Query Structure */
struct cfi_ident {
	uint8_t  qry[3];
	uint16_t P_ID;
	uint16_t P_ADR;
	uint16_t A_ID;
	uint16_t A_ADR;
	uint8_t  VccMin;
	uint8_t  VccMax;
	uint8_t  VppMin;
	uint8_t  VppMax;
	uint8_t  WordWriteTimeoutTyp;
	uint8_t  BufWriteTimeoutTyp;
	uint8_t  BlockEraseTimeoutTyp;
	uint8_t  ChipEraseTimeoutTyp;
	uint8_t  WordWriteTimeoutMax;
	uint8_t  BufWriteTimeoutMax;
	uint8_t  BlockEraseTimeoutMax;
	uint8_t  ChipEraseTimeoutMax;
	uint8_t  DevSize;
	uint16_t InterfaceDesc;
	uint16_t MaxBufWriteSize;
	uint8_t  NumEraseRegions;
	uint32_t EraseRegionInfo[0]; /* Not host ordered */
} __attribute__((packed));

/* Extended Query Structure for both PRI and ALT */

struct cfi_extquery {
	uint8_t  pri[3];
	uint8_t  MajorVersion;
	uint8_t  MinorVersion;
} __attribute__((packed));

/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */

struct cfi_pri_intelext {
	uint8_t  pri[3];
	uint8_t  MajorVersion;
	uint8_t  MinorVersion;
	uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
				    block follows - FIXME - not currently supported */
	uint8_t  SuspendCmdSupport;
	uint16_t BlkStatusRegMask;
	uint8_t  VccOptimal;
	uint8_t  VppOptimal;
	uint8_t  NumProtectionFields;
	uint16_t ProtRegAddr;
	uint8_t  FactProtRegSize;
	uint8_t  UserProtRegSize;
	uint8_t  extra[0];
} __attribute__((packed));

N
Nicolas Pitre 已提交
151 152 153 154 155 156 157 158
struct cfi_intelext_otpinfo {
	uint32_t ProtRegAddr;
	uint16_t FactGroups;
	uint8_t  FactProtRegSize;
	uint16_t UserGroups;
	uint8_t  UserProtRegSize;
} __attribute__((packed));

L
Linus Torvalds 已提交
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
struct cfi_intelext_blockinfo {
	uint16_t NumIdentBlocks;
	uint16_t BlockSize;
	uint16_t MinBlockEraseCycles;
	uint8_t  BitsPerCell;
	uint8_t  BlockCap;
} __attribute__((packed));

struct cfi_intelext_regioninfo {
	uint16_t NumIdentPartitions;
	uint8_t  NumOpAllowed;
	uint8_t  NumOpAllowedSimProgMode;
	uint8_t  NumOpAllowedSimEraMode;
	uint8_t  NumBlockTypes;
	struct cfi_intelext_blockinfo BlockTypes[1];
} __attribute__((packed));

/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */

struct cfi_pri_amdstd {
	uint8_t  pri[3];
	uint8_t  MajorVersion;
	uint8_t  MinorVersion;
	uint8_t  SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
	uint8_t  EraseSuspend;
	uint8_t  BlkProt;
	uint8_t  TmpBlkUnprotect;
	uint8_t  BlkProtUnprot;
	uint8_t  SimultaneousOps;
	uint8_t  BurstMode;
	uint8_t  PageMode;
	uint8_t  VppMin;
	uint8_t  VppMax;
	uint8_t  TopBottom;
} __attribute__((packed));

struct cfi_pri_query {
	uint8_t  NumFields;
	uint32_t ProtField[1]; /* Not host ordered */
} __attribute__((packed));

struct cfi_bri_query {
	uint8_t  PageModeReadCap;
	uint8_t  NumFields;
	uint32_t ConfField[1]; /* Not host ordered */
} __attribute__((packed));

#define P_ID_NONE               0x0000
#define P_ID_INTEL_EXT          0x0001
#define P_ID_AMD_STD            0x0002
#define P_ID_INTEL_STD          0x0003
#define P_ID_AMD_EXT            0x0004
#define P_ID_WINBOND            0x0006
#define P_ID_ST_ADV             0x0020
#define P_ID_MITSUBISHI_STD     0x0100
#define P_ID_MITSUBISHI_EXT     0x0101
#define P_ID_SST_PAGE           0x0102
#define P_ID_INTEL_PERFORMANCE  0x0200
#define P_ID_INTEL_DATA         0x0210
#define P_ID_RESERVED           0xffff


#define CFI_MODE_CFI	1
#define CFI_MODE_JEDEC	0

struct cfi_private {
	uint16_t cmdset;
	void *cmdset_priv;
	int interleave;
	int device_type;
	int cfi_mode;		/* Are we a JEDEC device pretending to be CFI? */
	int addr_unlock1;
	int addr_unlock2;
	struct mtd_info *(*cmdset_setup)(struct map_info *);
	struct cfi_ident *cfiq; /* For now only one. We insist that all devs
				  must be of the same type. */
	int mfr, id;
	int numchips;
	unsigned long chipshift; /* Because they're of the same type */
	const char *im_name;	 /* inter_module name for cmdset_setup */
	struct flchip chips[0];  /* per-chip data structure for each chip */
};

/*
 * Returns the command address according to the given geometry.
 */
static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
{
	return (cmd_ofs * type) * interleave;
}

/*
 * Transforms the CFI command for the given geometry (bus width & interleave).
 * It looks too long to be inline, but in the common case it should almost all
 * get optimised away. 
 */
255
static inline map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi)
L
Linus Torvalds 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
{
	map_word val = { {0} };
	int wordwidth, words_per_bus, chip_mode, chips_per_word;
	unsigned long onecmd;
	int i;

	/* We do it this way to give the compiler a fighting chance 
	   of optimising away all the crap for 'bankwidth' larger than
	   an unsigned long, in the common case where that support is
	   disabled */
	if (map_bankwidth_is_large(map)) {
		wordwidth = sizeof(unsigned long);
		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
	} else {
		wordwidth = map_bankwidth(map);
		words_per_bus = 1;
	}
	
	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);

	/* First, determine what the bit-pattern should be for a single
	   device, according to chip mode and endianness... */
	switch (chip_mode) {
	default: BUG();
	case 1:
		onecmd = cmd;
		break;
	case 2:
		onecmd = cpu_to_cfi16(cmd);
		break;
	case 4:
		onecmd = cpu_to_cfi32(cmd);
		break;
	}

	/* Now replicate it across the size of an unsigned long, or 
	   just to the bus width as appropriate */
	switch (chips_per_word) {
	default: BUG();
#if BITS_PER_LONG >= 64
	case 8:
		onecmd |= (onecmd << (chip_mode * 32));
#endif
	case 4:
		onecmd |= (onecmd << (chip_mode * 16));
	case 2:
		onecmd |= (onecmd << (chip_mode * 8));
	case 1:
		;
	}

	/* And finally, for the multi-word case, replicate it 
	   in all words in the structure */
	for (i=0; i < words_per_bus; i++) {
		val.x[i] = onecmd;
	}

	return val;
}
#define CMD(x)  cfi_build_cmd((x), map, cfi)

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380

static inline unsigned char cfi_merge_status(map_word val, struct map_info *map, 
					   struct cfi_private *cfi)
{
	int wordwidth, words_per_bus, chip_mode, chips_per_word;
	unsigned long onestat, res = 0;
	int i;

	/* We do it this way to give the compiler a fighting chance 
	   of optimising away all the crap for 'bankwidth' larger than
	   an unsigned long, in the common case where that support is
	   disabled */
	if (map_bankwidth_is_large(map)) {
		wordwidth = sizeof(unsigned long);
		words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1
	} else {
		wordwidth = map_bankwidth(map);
		words_per_bus = 1;
	}
	
	chip_mode = map_bankwidth(map) / cfi_interleave(cfi);
	chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map);

	onestat = val.x[0];
	/* Or all status words together */
	for (i=1; i < words_per_bus; i++) {
		onestat |= val.x[i];
	}

	res = onestat;
	switch(chips_per_word) {
	default: BUG();
#if BITS_PER_LONG >= 64
	case 8:
		res |= (onestat >> (chip_mode * 32));
#endif
	case 4:
		res |= (onestat >> (chip_mode * 16));
	case 2:
		res |= (onestat >> (chip_mode * 8));
	case 1:
		;
	}

	/* Last, determine what the bit-pattern should be for a single
	   device, according to chip mode and endianness... */
	switch (chip_mode) {
	case 1:
		break;
	case 2:
		res = cfi16_to_cpu(res);
		break;
	case 4:
		res = cfi32_to_cpu(res);
		break;
	default: BUG();
	}
	return res;
}

#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)


L
Linus Torvalds 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
/*
 * Sends a CFI command to a bank of flash for the given geometry.
 *
 * Returns the offset in flash where the command was written.
 * If prev_val is non-null, it will be set to the value at the command address,
 * before the command was written.
 */
static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
				struct map_info *map, struct cfi_private *cfi,
				int type, map_word *prev_val)
{
	map_word val;
	uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);

	val = cfi_build_cmd(cmd, map, cfi);

	if (prev_val)
		*prev_val = map_read(map, addr);

	map_write(map, val, addr);

	return addr - base;
}

static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
{
	map_word val = map_read(map, addr);

	if (map_bankwidth_is_1(map)) {
		return val.x[0];
	} else if (map_bankwidth_is_2(map)) {
		return cfi16_to_cpu(val.x[0]);
	} else {
		/* No point in a 64-bit byteswap since that would just be
		   swapping the responses from different chips, and we are
		   only interested in one chip (a representative sample) */
		return cfi32_to_cpu(val.x[0]);
	}
}

static inline void cfi_udelay(int us)
{
	if (us >= 1000) {
		msleep((us+999)/1000);
	} else {
		udelay(us);
		cond_resched();
	}
}

struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
			     const char* name);
struct cfi_fixup {
	uint16_t mfr;
	uint16_t id;
	void (*fixup)(struct mtd_info *mtd, void* param);
	void* param;
};

#define CFI_MFR_ANY 0xffff
#define CFI_ID_ANY  0xffff

#define CFI_MFR_AMD 0x0001
#define CFI_MFR_ST  0x0020 	/* STMicroelectronics */

void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);

typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
			      unsigned long adr, int len, void *thunk);

int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
	loff_t ofs, size_t len, void *thunk);


#endif /* __MTD_CFI_H__ */