mtdpart.c 27.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Simple MTD partitioning layer
 *
D
David Woodhouse 已提交
4 5 6
 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
L
Linus Torvalds 已提交
7
 *
D
David Woodhouse 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
21
 *
22
 */
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
32
#include <linux/err.h>
33
#include <linux/of.h>
L
Linus Torvalds 已提交
34

35 36
#include "mtdcore.h"

L
Linus Torvalds 已提交
37 38
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
39
static DEFINE_MUTEX(mtd_partitions_mutex);
L
Linus Torvalds 已提交
40

41 42 43 44 45 46 47
/**
 * struct mtd_part - our partition node structure
 *
 * @mtd: struct holding partition details
 * @parent: parent mtd - flash device or another partition
 * @offset: partition offset relative to the *flash device*
 */
L
Linus Torvalds 已提交
48 49
struct mtd_part {
	struct mtd_info mtd;
50
	struct mtd_info *parent;
51
	uint64_t offset;
L
Linus Torvalds 已提交
52 53 54 55 56
	struct list_head list;
};

/*
 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
57
 * the pointer to that structure.
L
Linus Torvalds 已提交
58
 */
59 60 61 62
static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
{
	return container_of(mtd, struct mtd_part, mtd);
}
L
Linus Torvalds 已提交
63

64 65 66 67 68 69 70 71 72
static u64 part_absolute_offset(struct mtd_info *mtd)
{
	struct mtd_part *part = mtd_to_part(mtd);

	if (!mtd_is_partition(mtd))
		return 0;

	return part_absolute_offset(part->parent) + part->offset;
}
73 74

/*
L
Linus Torvalds 已提交
75 76 77 78
 * MTD methods which simply translate the effective address and pass through
 * to the _real_ device.
 */

79 80
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
81
{
82
	struct mtd_part *part = mtd_to_part(mtd);
83
	struct mtd_ecc_stats stats;
84 85
	int res;

86 87
	stats = part->parent->ecc_stats;
	res = part->parent->_read(part->parent, from + part->offset, len,
M
Mike Dunn 已提交
88
				  retlen, buf);
89 90
	if (unlikely(mtd_is_eccerr(res)))
		mtd->ecc_stats.failed +=
91
			part->parent->ecc_stats.failed - stats.failed;
92 93
	else
		mtd->ecc_stats.corrected +=
94
			part->parent->ecc_stats.corrected - stats.corrected;
95
	return res;
L
Linus Torvalds 已提交
96 97
}

98 99
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, void **virt, resource_size_t *phys)
L
Linus Torvalds 已提交
100
{
101
	struct mtd_part *part = mtd_to_part(mtd);
102

103
	return part->parent->_point(part->parent, from + part->offset, len,
M
Mike Dunn 已提交
104
				    retlen, virt, phys);
L
Linus Torvalds 已提交
105
}
106

107
static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
L
Linus Torvalds 已提交
108
{
109
	struct mtd_part *part = mtd_to_part(mtd);
L
Linus Torvalds 已提交
110

111
	return part->parent->_unpoint(part->parent, from + part->offset, len);
L
Linus Torvalds 已提交
112 113
}

114
static int part_read_oob(struct mtd_info *mtd, loff_t from,
115
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
116
{
117
	struct mtd_part *part = mtd_to_part(mtd);
118
	struct mtd_ecc_stats stats;
119
	int res;
120

121
	stats = part->parent->ecc_stats;
122
	res = part->parent->_read_oob(part->parent, from + part->offset, ops);
123 124 125 126 127 128
	if (unlikely(mtd_is_eccerr(res)))
		mtd->ecc_stats.failed +=
			part->parent->ecc_stats.failed - stats.failed;
	else
		mtd->ecc_stats.corrected +=
			part->parent->ecc_stats.corrected - stats.corrected;
129
	return res;
L
Linus Torvalds 已提交
130 131
}

132 133
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
134
{
135
	struct mtd_part *part = mtd_to_part(mtd);
136
	return part->parent->_read_user_prot_reg(part->parent, from, len,
M
Mike Dunn 已提交
137
						 retlen, buf);
L
Linus Torvalds 已提交
138 139
}

140 141
static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
				   size_t *retlen, struct otp_info *buf)
142
{
143
	struct mtd_part *part = mtd_to_part(mtd);
144
	return part->parent->_get_user_prot_info(part->parent, len, retlen,
145
						 buf);
146 147
}

148 149
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
150
{
151
	struct mtd_part *part = mtd_to_part(mtd);
152
	return part->parent->_read_fact_prot_reg(part->parent, from, len,
M
Mike Dunn 已提交
153
						 retlen, buf);
L
Linus Torvalds 已提交
154 155
}

156 157
static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
				   size_t *retlen, struct otp_info *buf)
158
{
159
	struct mtd_part *part = mtd_to_part(mtd);
160
	return part->parent->_get_fact_prot_info(part->parent, len, retlen,
161
						 buf);
162 163
}

164 165
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
L
Linus Torvalds 已提交
166
{
167
	struct mtd_part *part = mtd_to_part(mtd);
168
	return part->parent->_write(part->parent, to + part->offset, len,
M
Mike Dunn 已提交
169
				    retlen, buf);
L
Linus Torvalds 已提交
170 171
}

172 173
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
174
{
175
	struct mtd_part *part = mtd_to_part(mtd);
176
	return part->parent->_panic_write(part->parent, to + part->offset, len,
M
Mike Dunn 已提交
177
					  retlen, buf);
178 179
}

180
static int part_write_oob(struct mtd_info *mtd, loff_t to,
181
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
182
{
183
	struct mtd_part *part = mtd_to_part(mtd);
184

185
	return part->parent->_write_oob(part->parent, to + part->offset, ops);
L
Linus Torvalds 已提交
186 187
}

188 189
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
190
{
191
	struct mtd_part *part = mtd_to_part(mtd);
192
	return part->parent->_write_user_prot_reg(part->parent, from, len,
M
Mike Dunn 已提交
193
						  retlen, buf);
L
Linus Torvalds 已提交
194 195
}

196 197
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len)
198
{
199
	struct mtd_part *part = mtd_to_part(mtd);
200
	return part->parent->_lock_user_prot_reg(part->parent, from, len);
201 202
}

203 204
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
		unsigned long count, loff_t to, size_t *retlen)
L
Linus Torvalds 已提交
205
{
206
	struct mtd_part *part = mtd_to_part(mtd);
207
	return part->parent->_writev(part->parent, vecs, count,
M
Mike Dunn 已提交
208
				     to + part->offset, retlen);
L
Linus Torvalds 已提交
209 210
}

211
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
L
Linus Torvalds 已提交
212
{
213
	struct mtd_part *part = mtd_to_part(mtd);
L
Linus Torvalds 已提交
214
	int ret;
215

L
Linus Torvalds 已提交
216
	instr->addr += part->offset;
217
	ret = part->parent->_erase(part->parent, instr);
218 219 220 221
	if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
		instr->fail_addr -= part->offset;
	instr->addr -= part->offset;

L
Linus Torvalds 已提交
222 223 224
	return ret;
}

225
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
226
{
227
	struct mtd_part *part = mtd_to_part(mtd);
228
	return part->parent->_lock(part->parent, ofs + part->offset, len);
L
Linus Torvalds 已提交
229 230
}

231
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
232
{
233
	struct mtd_part *part = mtd_to_part(mtd);
234
	return part->parent->_unlock(part->parent, ofs + part->offset, len);
L
Linus Torvalds 已提交
235 236
}

237 238
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
239
	struct mtd_part *part = mtd_to_part(mtd);
240
	return part->parent->_is_locked(part->parent, ofs + part->offset, len);
241 242
}

L
Linus Torvalds 已提交
243 244
static void part_sync(struct mtd_info *mtd)
{
245
	struct mtd_part *part = mtd_to_part(mtd);
246
	part->parent->_sync(part->parent);
L
Linus Torvalds 已提交
247 248 249 250
}

static int part_suspend(struct mtd_info *mtd)
{
251
	struct mtd_part *part = mtd_to_part(mtd);
252
	return part->parent->_suspend(part->parent);
L
Linus Torvalds 已提交
253 254 255 256
}

static void part_resume(struct mtd_info *mtd)
{
257
	struct mtd_part *part = mtd_to_part(mtd);
258
	part->parent->_resume(part->parent);
L
Linus Torvalds 已提交
259 260
}

261 262
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
263
	struct mtd_part *part = mtd_to_part(mtd);
264
	ofs += part->offset;
265
	return part->parent->_block_isreserved(part->parent, ofs);
266 267
}

268
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
269
{
270
	struct mtd_part *part = mtd_to_part(mtd);
L
Linus Torvalds 已提交
271
	ofs += part->offset;
272
	return part->parent->_block_isbad(part->parent, ofs);
L
Linus Torvalds 已提交
273 274
}

275
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
276
{
277
	struct mtd_part *part = mtd_to_part(mtd);
278 279
	int res;

L
Linus Torvalds 已提交
280
	ofs += part->offset;
281
	res = part->parent->_block_markbad(part->parent, ofs);
282 283 284
	if (!res)
		mtd->ecc_stats.badblocks++;
	return res;
L
Linus Torvalds 已提交
285 286
}

287 288 289
static int part_get_device(struct mtd_info *mtd)
{
	struct mtd_part *part = mtd_to_part(mtd);
290
	return part->parent->_get_device(part->parent);
291 292 293 294 295
}

static void part_put_device(struct mtd_info *mtd)
{
	struct mtd_part *part = mtd_to_part(mtd);
296
	part->parent->_put_device(part->parent);
297 298
}

299 300 301 302 303
static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
			      struct mtd_oob_region *oobregion)
{
	struct mtd_part *part = mtd_to_part(mtd);

304
	return mtd_ooblayout_ecc(part->parent, section, oobregion);
305 306 307 308 309 310 311
}

static int part_ooblayout_free(struct mtd_info *mtd, int section,
			       struct mtd_oob_region *oobregion)
{
	struct mtd_part *part = mtd_to_part(mtd);

312
	return mtd_ooblayout_free(part->parent, section, oobregion);
313 314 315 316 317 318 319
}

static const struct mtd_ooblayout_ops part_ooblayout_ops = {
	.ecc = part_ooblayout_ecc,
	.free = part_ooblayout_free,
};

320 321 322 323
static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
{
	struct mtd_part *part = mtd_to_part(mtd);

324
	return part->parent->_max_bad_blocks(part->parent,
325 326 327
					     ofs + part->offset, len);
}

328 329 330 331 332 333
static inline void free_partition(struct mtd_part *p)
{
	kfree(p->mtd.name);
	kfree(p);
}

334
static struct mtd_part *allocate_partition(struct mtd_info *parent,
335 336
			const struct mtd_partition *part, int partno,
			uint64_t cur_offset)
337
{
338
	int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
339
							    parent->erasesize;
340
	struct mtd_part *slave;
341
	u32 remainder;
342
	char *name;
343
	u64 tmp;
344 345

	/* allocate the partition structure */
346
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
347 348
	name = kstrdup(part->name, GFP_KERNEL);
	if (!name || !slave) {
349
		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
350
		       parent->name);
351 352 353
		kfree(name);
		kfree(slave);
		return ERR_PTR(-ENOMEM);
354 355 356
	}

	/* set up the MTD object for this partition */
357
	slave->mtd.type = parent->type;
358 359
	slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
	slave->mtd.orig_flags = slave->mtd.flags;
360
	slave->mtd.size = part->size;
361 362 363 364 365 366
	slave->mtd.writesize = parent->writesize;
	slave->mtd.writebufsize = parent->writebufsize;
	slave->mtd.oobsize = parent->oobsize;
	slave->mtd.oobavail = parent->oobavail;
	slave->mtd.subpage_sft = parent->subpage_sft;
	slave->mtd.pairing = parent->pairing;
367

368
	slave->mtd.name = name;
369
	slave->mtd.owner = parent->owner;
370

371 372 373 374 375 376 377
	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
	 * concern for showing the same data in multiple partitions.
	 * However, it is very useful to have the master node present,
	 * so the MTD_PARTITIONED_MASTER option allows that. The master
	 * will have device nodes etc only if this is set, so make the
	 * parent conditional on that option. Note, this is a way to
	 * distinguish between the master and the partition in sysfs.
D
David Brownell 已提交
378
	 */
379
	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
380 381
				&parent->dev :
				parent->dev.parent;
382
	slave->mtd.dev.of_node = part->of_node;
D
David Brownell 已提交
383

384 385 386 387
	if (parent->_read)
		slave->mtd._read = part_read;
	if (parent->_write)
		slave->mtd._write = part_write;
388

389
	if (parent->_panic_write)
390
		slave->mtd._panic_write = part_panic_write;
391

392
	if (parent->_point && parent->_unpoint) {
393 394
		slave->mtd._point = part_point;
		slave->mtd._unpoint = part_unpoint;
395 396
	}

397
	if (parent->_read_oob)
398
		slave->mtd._read_oob = part_read_oob;
399
	if (parent->_write_oob)
400
		slave->mtd._write_oob = part_write_oob;
401
	if (parent->_read_user_prot_reg)
402
		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
403
	if (parent->_read_fact_prot_reg)
404
		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
405
	if (parent->_write_user_prot_reg)
406
		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
407
	if (parent->_lock_user_prot_reg)
408
		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
409
	if (parent->_get_user_prot_info)
410
		slave->mtd._get_user_prot_info = part_get_user_prot_info;
411
	if (parent->_get_fact_prot_info)
412
		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
413
	if (parent->_sync)
414
		slave->mtd._sync = part_sync;
415 416
	if (!partno && !parent->dev.class && parent->_suspend &&
	    parent->_resume) {
417 418
		slave->mtd._suspend = part_suspend;
		slave->mtd._resume = part_resume;
419
	}
420
	if (parent->_writev)
421
		slave->mtd._writev = part_writev;
422
	if (parent->_lock)
423
		slave->mtd._lock = part_lock;
424
	if (parent->_unlock)
425
		slave->mtd._unlock = part_unlock;
426
	if (parent->_is_locked)
427
		slave->mtd._is_locked = part_is_locked;
428
	if (parent->_block_isreserved)
429
		slave->mtd._block_isreserved = part_block_isreserved;
430
	if (parent->_block_isbad)
431
		slave->mtd._block_isbad = part_block_isbad;
432
	if (parent->_block_markbad)
433
		slave->mtd._block_markbad = part_block_markbad;
434
	if (parent->_max_bad_blocks)
435
		slave->mtd._max_bad_blocks = part_max_bad_blocks;
436

437
	if (parent->_get_device)
438
		slave->mtd._get_device = part_get_device;
439
	if (parent->_put_device)
440 441
		slave->mtd._put_device = part_put_device;

442
	slave->mtd._erase = part_erase;
443
	slave->parent = parent;
444 445 446 447 448
	slave->offset = part->offset;

	if (slave->offset == MTDPART_OFS_APPEND)
		slave->offset = cur_offset;
	if (slave->offset == MTDPART_OFS_NXTBLK) {
449
		tmp = cur_offset;
450
		slave->offset = cur_offset;
451 452 453
		remainder = do_div(tmp, wr_alignment);
		if (remainder) {
			slave->offset += wr_alignment - remainder;
454
			printk(KERN_NOTICE "Moving partition %d: "
455 456
			       "0x%012llx -> 0x%012llx\n", partno,
			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
457 458
		}
	}
459 460
	if (slave->offset == MTDPART_OFS_RETAIN) {
		slave->offset = cur_offset;
461 462
		if (parent->size - slave->offset >= slave->mtd.size) {
			slave->mtd.size = parent->size - slave->offset
463 464 465
							- slave->mtd.size;
		} else {
			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
466
				part->name, parent->size - slave->offset,
467 468 469 470 471
				slave->mtd.size);
			/* register to preserve ordering */
			goto out_register;
		}
	}
472
	if (slave->mtd.size == MTDPART_SIZ_FULL)
473
		slave->mtd.size = parent->size - slave->offset;
474

475 476
	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
477 478

	/* let's do some sanity checks */
479
	if (slave->offset >= parent->size) {
480
		/* let's register it anyway to preserve ordering */
481 482
		slave->offset = 0;
		slave->mtd.size = 0;
483
		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
484
			part->name);
485
		goto out_register;
486
	}
487 488
	if (slave->offset + slave->mtd.size > parent->size) {
		slave->mtd.size = parent->size - slave->offset;
489
		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
490
			part->name, parent->name, (unsigned long long)slave->mtd.size);
491
	}
492
	if (parent->numeraseregions > 1) {
493
		/* Deal with variable erase size stuff */
494
		int i, max = parent->numeraseregions;
495
		u64 end = slave->offset + slave->mtd.size;
496
		struct mtd_erase_region_info *regions = parent->eraseregions;
497

498 499 500
		/* Find the first erase regions which is part of this
		 * partition. */
		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
501
			;
502
		/* The loop searched for the region _behind_ the first one */
503 504
		if (i > 0)
			i--;
505

506 507
		/* Pick biggest erasesize */
		for (; i < max && regions[i].offset < end; i++) {
508 509 510 511
			if (slave->mtd.erasesize < regions[i].erasesize) {
				slave->mtd.erasesize = regions[i].erasesize;
			}
		}
512
		BUG_ON(slave->mtd.erasesize == 0);
513 514
	} else {
		/* Single erase size */
515
		slave->mtd.erasesize = parent->erasesize;
516 517
	}

518 519 520 521 522 523 524 525
	/*
	 * Slave erasesize might differ from the master one if the master
	 * exposes several regions with different erasesize. Adjust
	 * wr_alignment accordingly.
	 */
	if (!(slave->mtd.flags & MTD_NO_ERASE))
		wr_alignment = slave->mtd.erasesize;

526
	tmp = part_absolute_offset(parent) + slave->offset;
527 528
	remainder = do_div(tmp, wr_alignment);
	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
529
		/* Doesn't start on a boundary of major erase size */
530 531
		/* FIXME: Let it be writable if it is on a boundary of
		 * _minor_ erase size though */
532
		slave->mtd.flags &= ~MTD_WRITEABLE;
533
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
534 535
			part->name);
	}
536

537
	tmp = part_absolute_offset(parent) + slave->mtd.size;
538 539
	remainder = do_div(tmp, wr_alignment);
	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
540
		slave->mtd.flags &= ~MTD_WRITEABLE;
541
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
542 543 544
			part->name);
	}

545
	mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
546 547 548
	slave->mtd.ecc_step_size = parent->ecc_step_size;
	slave->mtd.ecc_strength = parent->ecc_strength;
	slave->mtd.bitflip_threshold = parent->bitflip_threshold;
549

550
	if (parent->_block_isbad) {
551
		uint64_t offs = 0;
552

553
		while (offs < slave->mtd.size) {
554
			if (mtd_block_isreserved(parent, offs + slave->offset))
555
				slave->mtd.ecc_stats.bbtblocks++;
556
			else if (mtd_block_isbad(parent, offs + slave->offset))
557 558 559 560 561
				slave->mtd.ecc_stats.badblocks++;
			offs += slave->mtd.erasesize;
		}
	}

562
out_register:
563 564 565
	return slave;
}

566 567 568 569
static ssize_t mtd_partition_offset_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct mtd_info *mtd = dev_get_drvdata(dev);
570
	struct mtd_part *part = mtd_to_part(mtd);
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
	return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
}

static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);

static const struct attribute *mtd_partition_attrs[] = {
	&dev_attr_offset.attr,
	NULL
};

static int mtd_add_partition_attrs(struct mtd_part *new)
{
	int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
	if (ret)
		printk(KERN_WARNING
		       "mtd: failed to create partition attrs, err=%d\n", ret);
	return ret;
}

590
int mtd_add_partition(struct mtd_info *parent, const char *name,
591 592 593
		      long long offset, long long length)
{
	struct mtd_partition part;
594
	struct mtd_part *new;
595 596 597 598 599 600 601 602
	int ret = 0;

	/* the direct offset is expected */
	if (offset == MTDPART_OFS_APPEND ||
	    offset == MTDPART_OFS_NXTBLK)
		return -EINVAL;

	if (length == MTDPART_SIZ_FULL)
603
		length = parent->size - offset;
604 605 606 607

	if (length <= 0)
		return -EINVAL;

608
	memset(&part, 0, sizeof(part));
609 610 611 612
	part.name = name;
	part.size = length;
	part.offset = offset;

613
	new = allocate_partition(parent, &part, -1, offset);
614 615 616 617 618 619 620 621 622
	if (IS_ERR(new))
		return PTR_ERR(new);

	mutex_lock(&mtd_partitions_mutex);
	list_add(&new->list, &mtd_partitions);
	mutex_unlock(&mtd_partitions_mutex);

	add_mtd_device(&new->mtd);

623 624
	mtd_add_partition_attrs(new);

625 626 627 628
	return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);

629 630 631 632 633 634 635 636 637
/**
 * __mtd_del_partition - delete MTD partition
 *
 * @priv: internal MTD struct for partition to be deleted
 *
 * This function must be called with the partitions mutex locked.
 */
static int __mtd_del_partition(struct mtd_part *priv)
{
638
	struct mtd_part *child, *next;
639 640
	int err;

641 642 643 644 645 646 647 648
	list_for_each_entry_safe(child, next, &mtd_partitions, list) {
		if (child->parent == &priv->mtd) {
			err = __mtd_del_partition(child);
			if (err)
				return err;
		}
	}

649 650
	sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);

651 652 653 654 655 656 657 658 659 660 661 662
	err = del_mtd_device(&priv->mtd);
	if (err)
		return err;

	list_del(&priv->list);
	free_partition(priv);

	return 0;
}

/*
 * This function unregisters and destroy all slave MTD objects which are
663
 * attached to the given MTD object.
664
 */
665
int del_mtd_partitions(struct mtd_info *mtd)
666 667 668 669 670 671
{
	struct mtd_part *slave, *next;
	int ret, err = 0;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
672
		if (slave->parent == mtd) {
673 674 675 676 677 678 679 680 681
			ret = __mtd_del_partition(slave);
			if (ret < 0)
				err = ret;
		}
	mutex_unlock(&mtd_partitions_mutex);

	return err;
}

682
int mtd_del_partition(struct mtd_info *mtd, int partno)
683 684 685 686 687 688
{
	struct mtd_part *slave, *next;
	int ret = -EINVAL;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
689
		if ((slave->parent == mtd) &&
690
		    (slave->mtd.index == partno)) {
691
			ret = __mtd_del_partition(slave);
692 693 694 695 696 697 698 699
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

	return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);

L
Linus Torvalds 已提交
700 701 702 703
/*
 * This function, given a master MTD object and a partition table, creates
 * and registers slave MTD objects which are bound to the master according to
 * the partition definitions.
D
David Brownell 已提交
704
 *
705 706
 * For historical reasons, this function's caller only registers the master
 * if the MTD_PARTITIONED_MASTER config option is set.
L
Linus Torvalds 已提交
707 708
 */

709
int add_mtd_partitions(struct mtd_info *master,
L
Linus Torvalds 已提交
710 711 712 713
		       const struct mtd_partition *parts,
		       int nbparts)
{
	struct mtd_part *slave;
714
	uint64_t cur_offset = 0;
L
Linus Torvalds 已提交
715 716
	int i;

717
	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
L
Linus Torvalds 已提交
718 719

	for (i = 0; i < nbparts; i++) {
720
		slave = allocate_partition(master, parts + i, i, cur_offset);
721 722
		if (IS_ERR(slave)) {
			del_mtd_partitions(master);
723
			return PTR_ERR(slave);
724
		}
725 726 727 728 729 730

		mutex_lock(&mtd_partitions_mutex);
		list_add(&slave->list, &mtd_partitions);
		mutex_unlock(&mtd_partitions_mutex);

		add_mtd_device(&slave->mtd);
731
		mtd_add_partition_attrs(slave);
732 733
		/* Look for subpartitions */
		parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
734

L
Linus Torvalds 已提交
735 736 737 738 739 740 741 742 743
		cur_offset = slave->offset + slave->mtd.size;
	}

	return 0;
}

static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);

744
static struct mtd_part_parser *mtd_part_parser_get(const char *name)
L
Linus Torvalds 已提交
745
{
746
	struct mtd_part_parser *p, *ret = NULL;
L
Linus Torvalds 已提交
747

748
	spin_lock(&part_parser_lock);
L
Linus Torvalds 已提交
749

750
	list_for_each_entry(p, &part_parsers, list)
L
Linus Torvalds 已提交
751 752 753 754
		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
			ret = p;
			break;
		}
755

L
Linus Torvalds 已提交
756 757 758 759 760
	spin_unlock(&part_parser_lock);

	return ret;
}

761 762 763 764
static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
{
	module_put(p->owner);
}
765

766 767 768 769 770 771 772 773 774 775
/*
 * Many partition parsers just expected the core to kfree() all their data in
 * one chunk. Do that by default.
 */
static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
					    int nr_parts)
{
	kfree(pparts);
}

776
int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
L
Linus Torvalds 已提交
777
{
778 779
	p->owner = owner;

780 781 782
	if (!p->cleanup)
		p->cleanup = &mtd_part_parser_cleanup_default;

L
Linus Torvalds 已提交
783 784 785
	spin_lock(&part_parser_lock);
	list_add(&p->list, &part_parsers);
	spin_unlock(&part_parser_lock);
786 787

	return 0;
L
Linus Torvalds 已提交
788
}
789
EXPORT_SYMBOL_GPL(__register_mtd_parser);
L
Linus Torvalds 已提交
790

791
void deregister_mtd_parser(struct mtd_part_parser *p)
L
Linus Torvalds 已提交
792 793 794 795 796
{
	spin_lock(&part_parser_lock);
	list_del(&p->list);
	spin_unlock(&part_parser_lock);
}
797
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
L
Linus Torvalds 已提交
798

799 800 801 802
/*
 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
 * are changing this array!
 */
803
static const char * const default_mtd_part_types[] = {
804 805 806 807
	"cmdlinepart",
	"ofpart",
	NULL
};
808

809 810 811 812 813 814
/* Check DT only when looking for subpartitions. */
static const char * const default_subpartition_types[] = {
	"ofpart",
	NULL
};

815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
static int mtd_part_do_parse(struct mtd_part_parser *parser,
			     struct mtd_info *master,
			     struct mtd_partitions *pparts,
			     struct mtd_part_parser_data *data)
{
	int ret;

	ret = (*parser->parse_fn)(master, &pparts->parts, data);
	pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
	if (ret <= 0)
		return ret;

	pr_notice("%d %s partitions found on MTD device %s\n", ret,
		  parser->name, master->name);

	pparts->nr_parts = ret;
	pparts->parser = parser;

	return ret;
}

836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
/**
 * mtd_part_get_compatible_parser - find MTD parser by a compatible string
 *
 * @compat: compatible string describing partitions in a device tree
 *
 * MTD parsers can specify supported partitions by providing a table of
 * compatibility strings. This function finds a parser that advertises support
 * for a passed value of "compatible".
 */
static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
{
	struct mtd_part_parser *p, *ret = NULL;

	spin_lock(&part_parser_lock);

	list_for_each_entry(p, &part_parsers, list) {
		const struct of_device_id *matches;

		matches = p->of_match_table;
		if (!matches)
			continue;

		for (; matches->compatible[0]; matches++) {
			if (!strcmp(matches->compatible, compat) &&
			    try_module_get(p->owner)) {
				ret = p;
				break;
			}
		}

		if (ret)
			break;
	}

	spin_unlock(&part_parser_lock);

	return ret;
}

static int mtd_part_of_parse(struct mtd_info *master,
			     struct mtd_partitions *pparts)
{
	struct mtd_part_parser *parser;
	struct device_node *np;
	struct property *prop;
	const char *compat;
882
	const char *fixed = "fixed-partitions";
883 884
	int ret, err = 0;

885
	np = mtd_get_of_node(master);
886 887 888
	if (mtd_is_partition(master))
		of_node_get(np);
	else
889
		np = of_get_child_by_name(np, "partitions");
890

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
	of_property_for_each_string(np, "compatible", prop, compat) {
		parser = mtd_part_get_compatible_parser(compat);
		if (!parser)
			continue;
		ret = mtd_part_do_parse(parser, master, pparts, NULL);
		if (ret > 0) {
			of_node_put(np);
			return ret;
		}
		mtd_part_parser_put(parser);
		if (ret < 0 && !err)
			err = ret;
	}
	of_node_put(np);

	/*
907
	 * For backward compatibility we have to try the "fixed-partitions"
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
	 * parser. It supports old DT format with partitions specified as a
	 * direct subnodes of a flash device DT node without any compatibility
	 * specified we could match.
	 */
	parser = mtd_part_parser_get(fixed);
	if (!parser && !request_module("%s", fixed))
		parser = mtd_part_parser_get(fixed);
	if (parser) {
		ret = mtd_part_do_parse(parser, master, pparts, NULL);
		if (ret > 0)
			return ret;
		mtd_part_parser_put(parser);
		if (ret < 0 && !err)
			err = ret;
	}

	return err;
}

927
/**
928 929
 * parse_mtd_partitions - parse and register MTD partitions
 *
930 931
 * @master: the master partition (describes whole MTD device)
 * @types: names of partition parsers to try or %NULL
932
 * @data: MTD partition parser-specific data
933
 *
934 935 936
 * This function tries to find & register partitions on MTD device @master. It
 * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
 * then the default list of parsers is used. The default list contains only the
937
 * "cmdlinepart" and "ofpart" parsers ATM.
938 939
 * Note: If there are more then one parser in @types, the kernel only takes the
 * partitions parsed out by the first parser.
940 941 942
 *
 * This function may return:
 * o a negative error code in case of failure
943
 * o number of found partitions otherwise
944
 */
945
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
946
			 struct mtd_part_parser_data *data)
L
Linus Torvalds 已提交
947
{
948
	struct mtd_partitions pparts = { };
L
Linus Torvalds 已提交
949
	struct mtd_part_parser *parser;
950
	int ret, err = 0;
951

952
	if (!types)
953 954
		types = mtd_is_partition(master) ? default_subpartition_types :
			default_mtd_part_types;
955

956
	for ( ; *types; types++) {
957 958 959 960 961 962
		/*
		 * ofpart is a special type that means OF partitioning info
		 * should be used. It requires a bit different logic so it is
		 * handled in a separated function.
		 */
		if (!strcmp(*types, "ofpart")) {
963
			ret = mtd_part_of_parse(master, &pparts);
964 965 966
		} else {
			pr_debug("%s: parsing partitions %s\n", master->name,
				 *types);
967
			parser = mtd_part_parser_get(*types);
968 969 970 971 972 973
			if (!parser && !request_module("%s", *types))
				parser = mtd_part_parser_get(*types);
			pr_debug("%s: got parser %s\n", master->name,
				parser ? parser->name : NULL);
			if (!parser)
				continue;
974
			ret = mtd_part_do_parse(parser, master, &pparts, data);
975 976 977
			if (ret <= 0)
				mtd_part_parser_put(parser);
		}
978
		/* Found partitions! */
979 980 981 982 983 984
		if (ret > 0) {
			err = add_mtd_partitions(master, pparts.parts,
						 pparts.nr_parts);
			mtd_part_parser_cleanup(&pparts);
			return err ? err : pparts.nr_parts;
		}
985 986 987 988 989 990
		/*
		 * Stash the first error we see; only report it if no parser
		 * succeeds
		 */
		if (ret < 0 && !err)
			err = ret;
L
Linus Torvalds 已提交
991
	}
992
	return err;
L
Linus Torvalds 已提交
993
}
994

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
void mtd_part_parser_cleanup(struct mtd_partitions *parts)
{
	const struct mtd_part_parser *parser;

	if (!parts)
		return;

	parser = parts->parser;
	if (parser) {
		if (parser->cleanup)
			parser->cleanup(parts->parts, parts->nr_parts);

		mtd_part_parser_put(parser);
	}
}

1011
int mtd_is_partition(const struct mtd_info *mtd)
1012 1013
{
	struct mtd_part *part;
1014
	int ispart = 0;
1015 1016 1017 1018

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry(part, &mtd_partitions, list)
		if (&part->mtd == mtd) {
1019
			ispart = 1;
1020 1021 1022 1023
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

1024
	return ispart;
1025
}
1026
EXPORT_SYMBOL_GPL(mtd_is_partition);
1027 1028 1029 1030 1031 1032 1033

/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
	if (!mtd_is_partition(mtd))
		return mtd->size;

1034
	return mtd_get_device_size(mtd_to_part(mtd)->parent);
1035 1036
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);