mtdpart.c 27.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Simple MTD partitioning layer
 *
D
David Woodhouse 已提交
4 5 6
 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
L
Linus Torvalds 已提交
7
 *
D
David Woodhouse 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
21
 *
22
 */
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
32
#include <linux/err.h>
33
#include <linux/of.h>
L
Linus Torvalds 已提交
34

35 36
#include "mtdcore.h"

L
Linus Torvalds 已提交
37 38
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
39
static DEFINE_MUTEX(mtd_partitions_mutex);
L
Linus Torvalds 已提交
40

41 42 43 44 45 46 47
/**
 * struct mtd_part - our partition node structure
 *
 * @mtd: struct holding partition details
 * @parent: parent mtd - flash device or another partition
 * @offset: partition offset relative to the *flash device*
 */
L
Linus Torvalds 已提交
48 49
struct mtd_part {
	struct mtd_info mtd;
50
	struct mtd_info *parent;
51
	uint64_t offset;
L
Linus Torvalds 已提交
52 53 54 55 56
	struct list_head list;
};

/*
 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
57
 * the pointer to that structure.
L
Linus Torvalds 已提交
58
 */
59 60 61 62
static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
{
	return container_of(mtd, struct mtd_part, mtd);
}
L
Linus Torvalds 已提交
63

64 65

/*
L
Linus Torvalds 已提交
66 67 68 69
 * MTD methods which simply translate the effective address and pass through
 * to the _real_ device.
 */

70 71
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
72
{
73
	struct mtd_part *part = mtd_to_part(mtd);
74
	struct mtd_ecc_stats stats;
75 76
	int res;

77 78
	stats = part->parent->ecc_stats;
	res = part->parent->_read(part->parent, from + part->offset, len,
M
Mike Dunn 已提交
79
				  retlen, buf);
80 81
	if (unlikely(mtd_is_eccerr(res)))
		mtd->ecc_stats.failed +=
82
			part->parent->ecc_stats.failed - stats.failed;
83 84
	else
		mtd->ecc_stats.corrected +=
85
			part->parent->ecc_stats.corrected - stats.corrected;
86
	return res;
L
Linus Torvalds 已提交
87 88
}

89 90
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, void **virt, resource_size_t *phys)
L
Linus Torvalds 已提交
91
{
92
	struct mtd_part *part = mtd_to_part(mtd);
93

94
	return part->parent->_point(part->parent, from + part->offset, len,
M
Mike Dunn 已提交
95
				    retlen, virt, phys);
L
Linus Torvalds 已提交
96
}
97

98
static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
L
Linus Torvalds 已提交
99
{
100
	struct mtd_part *part = mtd_to_part(mtd);
L
Linus Torvalds 已提交
101

102
	return part->parent->_unpoint(part->parent, from + part->offset, len);
L
Linus Torvalds 已提交
103 104
}

105
static int part_read_oob(struct mtd_info *mtd, loff_t from,
106
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
107
{
108
	struct mtd_part *part = mtd_to_part(mtd);
109
	struct mtd_ecc_stats stats;
110
	int res;
111

112
	stats = part->parent->ecc_stats;
113
	res = part->parent->_read_oob(part->parent, from + part->offset, ops);
114 115 116 117 118 119
	if (unlikely(mtd_is_eccerr(res)))
		mtd->ecc_stats.failed +=
			part->parent->ecc_stats.failed - stats.failed;
	else
		mtd->ecc_stats.corrected +=
			part->parent->ecc_stats.corrected - stats.corrected;
120
	return res;
L
Linus Torvalds 已提交
121 122
}

123 124
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
125
{
126
	struct mtd_part *part = mtd_to_part(mtd);
127
	return part->parent->_read_user_prot_reg(part->parent, from, len,
M
Mike Dunn 已提交
128
						 retlen, buf);
L
Linus Torvalds 已提交
129 130
}

131 132
static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
				   size_t *retlen, struct otp_info *buf)
133
{
134
	struct mtd_part *part = mtd_to_part(mtd);
135
	return part->parent->_get_user_prot_info(part->parent, len, retlen,
136
						 buf);
137 138
}

139 140
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
141
{
142
	struct mtd_part *part = mtd_to_part(mtd);
143
	return part->parent->_read_fact_prot_reg(part->parent, from, len,
M
Mike Dunn 已提交
144
						 retlen, buf);
L
Linus Torvalds 已提交
145 146
}

147 148
static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
				   size_t *retlen, struct otp_info *buf)
149
{
150
	struct mtd_part *part = mtd_to_part(mtd);
151
	return part->parent->_get_fact_prot_info(part->parent, len, retlen,
152
						 buf);
153 154
}

155 156
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
L
Linus Torvalds 已提交
157
{
158
	struct mtd_part *part = mtd_to_part(mtd);
159
	return part->parent->_write(part->parent, to + part->offset, len,
M
Mike Dunn 已提交
160
				    retlen, buf);
L
Linus Torvalds 已提交
161 162
}

163 164
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
165
{
166
	struct mtd_part *part = mtd_to_part(mtd);
167
	return part->parent->_panic_write(part->parent, to + part->offset, len,
M
Mike Dunn 已提交
168
					  retlen, buf);
169 170
}

171
static int part_write_oob(struct mtd_info *mtd, loff_t to,
172
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
173
{
174
	struct mtd_part *part = mtd_to_part(mtd);
175

176
	return part->parent->_write_oob(part->parent, to + part->offset, ops);
L
Linus Torvalds 已提交
177 178
}

179 180
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
181
{
182
	struct mtd_part *part = mtd_to_part(mtd);
183
	return part->parent->_write_user_prot_reg(part->parent, from, len,
M
Mike Dunn 已提交
184
						  retlen, buf);
L
Linus Torvalds 已提交
185 186
}

187 188
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len)
189
{
190
	struct mtd_part *part = mtd_to_part(mtd);
191
	return part->parent->_lock_user_prot_reg(part->parent, from, len);
192 193
}

194 195
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
		unsigned long count, loff_t to, size_t *retlen)
L
Linus Torvalds 已提交
196
{
197
	struct mtd_part *part = mtd_to_part(mtd);
198
	return part->parent->_writev(part->parent, vecs, count,
M
Mike Dunn 已提交
199
				     to + part->offset, retlen);
L
Linus Torvalds 已提交
200 201
}

202
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
L
Linus Torvalds 已提交
203
{
204
	struct mtd_part *part = mtd_to_part(mtd);
L
Linus Torvalds 已提交
205
	int ret;
206

L
Linus Torvalds 已提交
207
	instr->addr += part->offset;
208
	ret = part->parent->_erase(part->parent, instr);
209 210 211 212
	if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
		instr->fail_addr -= part->offset;
	instr->addr -= part->offset;

L
Linus Torvalds 已提交
213 214 215
	return ret;
}

216
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
217
{
218
	struct mtd_part *part = mtd_to_part(mtd);
219
	return part->parent->_lock(part->parent, ofs + part->offset, len);
L
Linus Torvalds 已提交
220 221
}

222
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
223
{
224
	struct mtd_part *part = mtd_to_part(mtd);
225
	return part->parent->_unlock(part->parent, ofs + part->offset, len);
L
Linus Torvalds 已提交
226 227
}

228 229
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
230
	struct mtd_part *part = mtd_to_part(mtd);
231
	return part->parent->_is_locked(part->parent, ofs + part->offset, len);
232 233
}

L
Linus Torvalds 已提交
234 235
static void part_sync(struct mtd_info *mtd)
{
236
	struct mtd_part *part = mtd_to_part(mtd);
237
	part->parent->_sync(part->parent);
L
Linus Torvalds 已提交
238 239 240 241
}

static int part_suspend(struct mtd_info *mtd)
{
242
	struct mtd_part *part = mtd_to_part(mtd);
243
	return part->parent->_suspend(part->parent);
L
Linus Torvalds 已提交
244 245 246 247
}

static void part_resume(struct mtd_info *mtd)
{
248
	struct mtd_part *part = mtd_to_part(mtd);
249
	part->parent->_resume(part->parent);
L
Linus Torvalds 已提交
250 251
}

252 253
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
254
	struct mtd_part *part = mtd_to_part(mtd);
255
	ofs += part->offset;
256
	return part->parent->_block_isreserved(part->parent, ofs);
257 258
}

259
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
260
{
261
	struct mtd_part *part = mtd_to_part(mtd);
L
Linus Torvalds 已提交
262
	ofs += part->offset;
263
	return part->parent->_block_isbad(part->parent, ofs);
L
Linus Torvalds 已提交
264 265
}

266
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
267
{
268
	struct mtd_part *part = mtd_to_part(mtd);
269 270
	int res;

L
Linus Torvalds 已提交
271
	ofs += part->offset;
272
	res = part->parent->_block_markbad(part->parent, ofs);
273 274 275
	if (!res)
		mtd->ecc_stats.badblocks++;
	return res;
L
Linus Torvalds 已提交
276 277
}

278 279 280
static int part_get_device(struct mtd_info *mtd)
{
	struct mtd_part *part = mtd_to_part(mtd);
281
	return part->parent->_get_device(part->parent);
282 283 284 285 286
}

static void part_put_device(struct mtd_info *mtd)
{
	struct mtd_part *part = mtd_to_part(mtd);
287
	part->parent->_put_device(part->parent);
288 289
}

290 291 292 293 294
static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
			      struct mtd_oob_region *oobregion)
{
	struct mtd_part *part = mtd_to_part(mtd);

295
	return mtd_ooblayout_ecc(part->parent, section, oobregion);
296 297 298 299 300 301 302
}

static int part_ooblayout_free(struct mtd_info *mtd, int section,
			       struct mtd_oob_region *oobregion)
{
	struct mtd_part *part = mtd_to_part(mtd);

303
	return mtd_ooblayout_free(part->parent, section, oobregion);
304 305 306 307 308 309 310
}

static const struct mtd_ooblayout_ops part_ooblayout_ops = {
	.ecc = part_ooblayout_ecc,
	.free = part_ooblayout_free,
};

311 312 313 314
static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
{
	struct mtd_part *part = mtd_to_part(mtd);

315
	return part->parent->_max_bad_blocks(part->parent,
316 317 318
					     ofs + part->offset, len);
}

319 320 321 322 323 324
static inline void free_partition(struct mtd_part *p)
{
	kfree(p->mtd.name);
	kfree(p);
}

325
static struct mtd_part *allocate_partition(struct mtd_info *parent,
326 327
			const struct mtd_partition *part, int partno,
			uint64_t cur_offset)
328
{
329
	int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
330
							    parent->erasesize;
331
	struct mtd_part *slave;
332
	u32 remainder;
333
	char *name;
334
	u64 tmp;
335 336

	/* allocate the partition structure */
337
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
338 339
	name = kstrdup(part->name, GFP_KERNEL);
	if (!name || !slave) {
340
		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
341
		       parent->name);
342 343 344
		kfree(name);
		kfree(slave);
		return ERR_PTR(-ENOMEM);
345 346 347
	}

	/* set up the MTD object for this partition */
348 349
	slave->mtd.type = parent->type;
	slave->mtd.flags = parent->flags & ~part->mask_flags;
350
	slave->mtd.size = part->size;
351 352 353 354 355 356
	slave->mtd.writesize = parent->writesize;
	slave->mtd.writebufsize = parent->writebufsize;
	slave->mtd.oobsize = parent->oobsize;
	slave->mtd.oobavail = parent->oobavail;
	slave->mtd.subpage_sft = parent->subpage_sft;
	slave->mtd.pairing = parent->pairing;
357

358
	slave->mtd.name = name;
359
	slave->mtd.owner = parent->owner;
360

361 362 363 364 365 366 367
	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
	 * concern for showing the same data in multiple partitions.
	 * However, it is very useful to have the master node present,
	 * so the MTD_PARTITIONED_MASTER option allows that. The master
	 * will have device nodes etc only if this is set, so make the
	 * parent conditional on that option. Note, this is a way to
	 * distinguish between the master and the partition in sysfs.
D
David Brownell 已提交
368
	 */
369
	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
370 371
				&parent->dev :
				parent->dev.parent;
372
	slave->mtd.dev.of_node = part->of_node;
D
David Brownell 已提交
373

374 375 376 377
	if (parent->_read)
		slave->mtd._read = part_read;
	if (parent->_write)
		slave->mtd._write = part_write;
378

379
	if (parent->_panic_write)
380
		slave->mtd._panic_write = part_panic_write;
381

382
	if (parent->_point && parent->_unpoint) {
383 384
		slave->mtd._point = part_point;
		slave->mtd._unpoint = part_unpoint;
385 386
	}

387
	if (parent->_read_oob)
388
		slave->mtd._read_oob = part_read_oob;
389
	if (parent->_write_oob)
390
		slave->mtd._write_oob = part_write_oob;
391
	if (parent->_read_user_prot_reg)
392
		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
393
	if (parent->_read_fact_prot_reg)
394
		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
395
	if (parent->_write_user_prot_reg)
396
		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
397
	if (parent->_lock_user_prot_reg)
398
		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
399
	if (parent->_get_user_prot_info)
400
		slave->mtd._get_user_prot_info = part_get_user_prot_info;
401
	if (parent->_get_fact_prot_info)
402
		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
403
	if (parent->_sync)
404
		slave->mtd._sync = part_sync;
405 406
	if (!partno && !parent->dev.class && parent->_suspend &&
	    parent->_resume) {
407 408
		slave->mtd._suspend = part_suspend;
		slave->mtd._resume = part_resume;
409
	}
410
	if (parent->_writev)
411
		slave->mtd._writev = part_writev;
412
	if (parent->_lock)
413
		slave->mtd._lock = part_lock;
414
	if (parent->_unlock)
415
		slave->mtd._unlock = part_unlock;
416
	if (parent->_is_locked)
417
		slave->mtd._is_locked = part_is_locked;
418
	if (parent->_block_isreserved)
419
		slave->mtd._block_isreserved = part_block_isreserved;
420
	if (parent->_block_isbad)
421
		slave->mtd._block_isbad = part_block_isbad;
422
	if (parent->_block_markbad)
423
		slave->mtd._block_markbad = part_block_markbad;
424
	if (parent->_max_bad_blocks)
425
		slave->mtd._max_bad_blocks = part_max_bad_blocks;
426

427
	if (parent->_get_device)
428
		slave->mtd._get_device = part_get_device;
429
	if (parent->_put_device)
430 431
		slave->mtd._put_device = part_put_device;

432
	slave->mtd._erase = part_erase;
433
	slave->parent = parent;
434 435 436 437 438
	slave->offset = part->offset;

	if (slave->offset == MTDPART_OFS_APPEND)
		slave->offset = cur_offset;
	if (slave->offset == MTDPART_OFS_NXTBLK) {
439
		tmp = cur_offset;
440
		slave->offset = cur_offset;
441 442 443
		remainder = do_div(tmp, wr_alignment);
		if (remainder) {
			slave->offset += wr_alignment - remainder;
444
			printk(KERN_NOTICE "Moving partition %d: "
445 446
			       "0x%012llx -> 0x%012llx\n", partno,
			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
447 448
		}
	}
449 450
	if (slave->offset == MTDPART_OFS_RETAIN) {
		slave->offset = cur_offset;
451 452
		if (parent->size - slave->offset >= slave->mtd.size) {
			slave->mtd.size = parent->size - slave->offset
453 454 455
							- slave->mtd.size;
		} else {
			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
456
				part->name, parent->size - slave->offset,
457 458 459 460 461
				slave->mtd.size);
			/* register to preserve ordering */
			goto out_register;
		}
	}
462
	if (slave->mtd.size == MTDPART_SIZ_FULL)
463
		slave->mtd.size = parent->size - slave->offset;
464

465 466
	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
467 468

	/* let's do some sanity checks */
469
	if (slave->offset >= parent->size) {
470
		/* let's register it anyway to preserve ordering */
471 472
		slave->offset = 0;
		slave->mtd.size = 0;
473 474 475 476

		/* Initialize ->erasesize to make add_mtd_device() happy. */
		slave->mtd.erasesize = parent->erasesize;

477
		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
478
			part->name);
479
		goto out_register;
480
	}
481 482
	if (slave->offset + slave->mtd.size > parent->size) {
		slave->mtd.size = parent->size - slave->offset;
483
		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
484
			part->name, parent->name, (unsigned long long)slave->mtd.size);
485
	}
486
	if (parent->numeraseregions > 1) {
487
		/* Deal with variable erase size stuff */
488
		int i, max = parent->numeraseregions;
489
		u64 end = slave->offset + slave->mtd.size;
490
		struct mtd_erase_region_info *regions = parent->eraseregions;
491

492 493 494
		/* Find the first erase regions which is part of this
		 * partition. */
		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
495
			;
496
		/* The loop searched for the region _behind_ the first one */
497 498
		if (i > 0)
			i--;
499

500 501
		/* Pick biggest erasesize */
		for (; i < max && regions[i].offset < end; i++) {
502 503 504 505
			if (slave->mtd.erasesize < regions[i].erasesize) {
				slave->mtd.erasesize = regions[i].erasesize;
			}
		}
506
		BUG_ON(slave->mtd.erasesize == 0);
507 508
	} else {
		/* Single erase size */
509
		slave->mtd.erasesize = parent->erasesize;
510 511
	}

512 513 514 515 516 517 518 519
	/*
	 * Slave erasesize might differ from the master one if the master
	 * exposes several regions with different erasesize. Adjust
	 * wr_alignment accordingly.
	 */
	if (!(slave->mtd.flags & MTD_NO_ERASE))
		wr_alignment = slave->mtd.erasesize;

520 521 522
	tmp = slave->offset;
	remainder = do_div(tmp, wr_alignment);
	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
523
		/* Doesn't start on a boundary of major erase size */
524 525
		/* FIXME: Let it be writable if it is on a boundary of
		 * _minor_ erase size though */
526
		slave->mtd.flags &= ~MTD_WRITEABLE;
527
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
528 529
			part->name);
	}
530 531 532 533

	tmp = slave->mtd.size;
	remainder = do_div(tmp, wr_alignment);
	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
534
		slave->mtd.flags &= ~MTD_WRITEABLE;
535
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
536 537 538
			part->name);
	}

539
	mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
540 541 542
	slave->mtd.ecc_step_size = parent->ecc_step_size;
	slave->mtd.ecc_strength = parent->ecc_strength;
	slave->mtd.bitflip_threshold = parent->bitflip_threshold;
543

544
	if (parent->_block_isbad) {
545
		uint64_t offs = 0;
546

547
		while (offs < slave->mtd.size) {
548
			if (mtd_block_isreserved(parent, offs + slave->offset))
549
				slave->mtd.ecc_stats.bbtblocks++;
550
			else if (mtd_block_isbad(parent, offs + slave->offset))
551 552 553 554 555
				slave->mtd.ecc_stats.badblocks++;
			offs += slave->mtd.erasesize;
		}
	}

556
out_register:
557 558 559
	return slave;
}

560 561 562 563
static ssize_t mtd_partition_offset_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct mtd_info *mtd = dev_get_drvdata(dev);
564
	struct mtd_part *part = mtd_to_part(mtd);
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
	return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
}

static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);

static const struct attribute *mtd_partition_attrs[] = {
	&dev_attr_offset.attr,
	NULL
};

static int mtd_add_partition_attrs(struct mtd_part *new)
{
	int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
	if (ret)
		printk(KERN_WARNING
		       "mtd: failed to create partition attrs, err=%d\n", ret);
	return ret;
}

584
int mtd_add_partition(struct mtd_info *parent, const char *name,
585 586 587
		      long long offset, long long length)
{
	struct mtd_partition part;
588
	struct mtd_part *new;
589 590 591 592 593 594 595 596
	int ret = 0;

	/* the direct offset is expected */
	if (offset == MTDPART_OFS_APPEND ||
	    offset == MTDPART_OFS_NXTBLK)
		return -EINVAL;

	if (length == MTDPART_SIZ_FULL)
597
		length = parent->size - offset;
598 599 600 601

	if (length <= 0)
		return -EINVAL;

602
	memset(&part, 0, sizeof(part));
603 604 605 606
	part.name = name;
	part.size = length;
	part.offset = offset;

607
	new = allocate_partition(parent, &part, -1, offset);
608 609 610 611 612 613 614 615 616
	if (IS_ERR(new))
		return PTR_ERR(new);

	mutex_lock(&mtd_partitions_mutex);
	list_add(&new->list, &mtd_partitions);
	mutex_unlock(&mtd_partitions_mutex);

	add_mtd_device(&new->mtd);

617 618
	mtd_add_partition_attrs(new);

619 620 621 622
	return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);

623 624 625 626 627 628 629 630 631
/**
 * __mtd_del_partition - delete MTD partition
 *
 * @priv: internal MTD struct for partition to be deleted
 *
 * This function must be called with the partitions mutex locked.
 */
static int __mtd_del_partition(struct mtd_part *priv)
{
632
	struct mtd_part *child, *next;
633 634
	int err;

635 636 637 638 639 640 641 642
	list_for_each_entry_safe(child, next, &mtd_partitions, list) {
		if (child->parent == &priv->mtd) {
			err = __mtd_del_partition(child);
			if (err)
				return err;
		}
	}

643 644
	sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);

645 646 647 648 649 650 651 652 653 654 655 656
	err = del_mtd_device(&priv->mtd);
	if (err)
		return err;

	list_del(&priv->list);
	free_partition(priv);

	return 0;
}

/*
 * This function unregisters and destroy all slave MTD objects which are
657
 * attached to the given MTD object.
658
 */
659
int del_mtd_partitions(struct mtd_info *mtd)
660 661 662 663 664 665
{
	struct mtd_part *slave, *next;
	int ret, err = 0;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
666
		if (slave->parent == mtd) {
667 668 669 670 671 672 673 674 675
			ret = __mtd_del_partition(slave);
			if (ret < 0)
				err = ret;
		}
	mutex_unlock(&mtd_partitions_mutex);

	return err;
}

676
int mtd_del_partition(struct mtd_info *mtd, int partno)
677 678 679 680 681 682
{
	struct mtd_part *slave, *next;
	int ret = -EINVAL;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
683
		if ((slave->parent == mtd) &&
684
		    (slave->mtd.index == partno)) {
685
			ret = __mtd_del_partition(slave);
686 687 688 689 690 691 692 693
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

	return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);

L
Linus Torvalds 已提交
694 695 696 697
/*
 * This function, given a master MTD object and a partition table, creates
 * and registers slave MTD objects which are bound to the master according to
 * the partition definitions.
D
David Brownell 已提交
698
 *
699 700
 * For historical reasons, this function's caller only registers the master
 * if the MTD_PARTITIONED_MASTER config option is set.
L
Linus Torvalds 已提交
701 702
 */

703
int add_mtd_partitions(struct mtd_info *master,
L
Linus Torvalds 已提交
704 705 706 707
		       const struct mtd_partition *parts,
		       int nbparts)
{
	struct mtd_part *slave;
708
	uint64_t cur_offset = 0;
L
Linus Torvalds 已提交
709 710
	int i;

711
	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
L
Linus Torvalds 已提交
712 713

	for (i = 0; i < nbparts; i++) {
714
		slave = allocate_partition(master, parts + i, i, cur_offset);
715 716
		if (IS_ERR(slave)) {
			del_mtd_partitions(master);
717
			return PTR_ERR(slave);
718
		}
719 720 721 722 723 724

		mutex_lock(&mtd_partitions_mutex);
		list_add(&slave->list, &mtd_partitions);
		mutex_unlock(&mtd_partitions_mutex);

		add_mtd_device(&slave->mtd);
725
		mtd_add_partition_attrs(slave);
726 727
		/* Look for subpartitions */
		parse_mtd_partitions(&slave->mtd, parts[i].types, NULL);
728

L
Linus Torvalds 已提交
729 730 731 732 733 734 735 736 737
		cur_offset = slave->offset + slave->mtd.size;
	}

	return 0;
}

static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);

738
static struct mtd_part_parser *mtd_part_parser_get(const char *name)
L
Linus Torvalds 已提交
739
{
740
	struct mtd_part_parser *p, *ret = NULL;
L
Linus Torvalds 已提交
741

742
	spin_lock(&part_parser_lock);
L
Linus Torvalds 已提交
743

744
	list_for_each_entry(p, &part_parsers, list)
L
Linus Torvalds 已提交
745 746 747 748
		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
			ret = p;
			break;
		}
749

L
Linus Torvalds 已提交
750 751 752 753 754
	spin_unlock(&part_parser_lock);

	return ret;
}

755 756 757 758
static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
{
	module_put(p->owner);
}
759

760 761 762 763 764 765 766 767 768 769
/*
 * Many partition parsers just expected the core to kfree() all their data in
 * one chunk. Do that by default.
 */
static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
					    int nr_parts)
{
	kfree(pparts);
}

770
int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
L
Linus Torvalds 已提交
771
{
772 773
	p->owner = owner;

774 775 776
	if (!p->cleanup)
		p->cleanup = &mtd_part_parser_cleanup_default;

L
Linus Torvalds 已提交
777 778 779
	spin_lock(&part_parser_lock);
	list_add(&p->list, &part_parsers);
	spin_unlock(&part_parser_lock);
780 781

	return 0;
L
Linus Torvalds 已提交
782
}
783
EXPORT_SYMBOL_GPL(__register_mtd_parser);
L
Linus Torvalds 已提交
784

785
void deregister_mtd_parser(struct mtd_part_parser *p)
L
Linus Torvalds 已提交
786 787 788 789 790
{
	spin_lock(&part_parser_lock);
	list_del(&p->list);
	spin_unlock(&part_parser_lock);
}
791
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
L
Linus Torvalds 已提交
792

793 794 795 796
/*
 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
 * are changing this array!
 */
797
static const char * const default_mtd_part_types[] = {
798 799 800 801
	"cmdlinepart",
	"ofpart",
	NULL
};
802

803 804 805 806 807 808
/* Check DT only when looking for subpartitions. */
static const char * const default_subpartition_types[] = {
	"ofpart",
	NULL
};

809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
static int mtd_part_do_parse(struct mtd_part_parser *parser,
			     struct mtd_info *master,
			     struct mtd_partitions *pparts,
			     struct mtd_part_parser_data *data)
{
	int ret;

	ret = (*parser->parse_fn)(master, &pparts->parts, data);
	pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
	if (ret <= 0)
		return ret;

	pr_notice("%d %s partitions found on MTD device %s\n", ret,
		  parser->name, master->name);

	pparts->nr_parts = ret;
	pparts->parser = parser;

	return ret;
}

830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
/**
 * mtd_part_get_compatible_parser - find MTD parser by a compatible string
 *
 * @compat: compatible string describing partitions in a device tree
 *
 * MTD parsers can specify supported partitions by providing a table of
 * compatibility strings. This function finds a parser that advertises support
 * for a passed value of "compatible".
 */
static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
{
	struct mtd_part_parser *p, *ret = NULL;

	spin_lock(&part_parser_lock);

	list_for_each_entry(p, &part_parsers, list) {
		const struct of_device_id *matches;

		matches = p->of_match_table;
		if (!matches)
			continue;

		for (; matches->compatible[0]; matches++) {
			if (!strcmp(matches->compatible, compat) &&
			    try_module_get(p->owner)) {
				ret = p;
				break;
			}
		}

		if (ret)
			break;
	}

	spin_unlock(&part_parser_lock);

	return ret;
}

static int mtd_part_of_parse(struct mtd_info *master,
			     struct mtd_partitions *pparts)
{
	struct mtd_part_parser *parser;
	struct device_node *np;
	struct property *prop;
	const char *compat;
876
	const char *fixed = "fixed-partitions";
877 878
	int ret, err = 0;

879
	np = mtd_get_of_node(master);
880 881 882
	if (mtd_is_partition(master))
		of_node_get(np);
	else
883
		np = of_get_child_by_name(np, "partitions");
884

885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
	of_property_for_each_string(np, "compatible", prop, compat) {
		parser = mtd_part_get_compatible_parser(compat);
		if (!parser)
			continue;
		ret = mtd_part_do_parse(parser, master, pparts, NULL);
		if (ret > 0) {
			of_node_put(np);
			return ret;
		}
		mtd_part_parser_put(parser);
		if (ret < 0 && !err)
			err = ret;
	}
	of_node_put(np);

	/*
901
	 * For backward compatibility we have to try the "fixed-partitions"
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920
	 * parser. It supports old DT format with partitions specified as a
	 * direct subnodes of a flash device DT node without any compatibility
	 * specified we could match.
	 */
	parser = mtd_part_parser_get(fixed);
	if (!parser && !request_module("%s", fixed))
		parser = mtd_part_parser_get(fixed);
	if (parser) {
		ret = mtd_part_do_parse(parser, master, pparts, NULL);
		if (ret > 0)
			return ret;
		mtd_part_parser_put(parser);
		if (ret < 0 && !err)
			err = ret;
	}

	return err;
}

921
/**
922 923
 * parse_mtd_partitions - parse and register MTD partitions
 *
924 925
 * @master: the master partition (describes whole MTD device)
 * @types: names of partition parsers to try or %NULL
926
 * @data: MTD partition parser-specific data
927
 *
928 929 930
 * This function tries to find & register partitions on MTD device @master. It
 * uses MTD partition parsers, specified in @types. However, if @types is %NULL,
 * then the default list of parsers is used. The default list contains only the
931
 * "cmdlinepart" and "ofpart" parsers ATM.
932 933
 * Note: If there are more then one parser in @types, the kernel only takes the
 * partitions parsed out by the first parser.
934 935 936
 *
 * This function may return:
 * o a negative error code in case of failure
937
 * o number of found partitions otherwise
938
 */
939
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
940
			 struct mtd_part_parser_data *data)
L
Linus Torvalds 已提交
941
{
942
	struct mtd_partitions pparts = { };
L
Linus Torvalds 已提交
943
	struct mtd_part_parser *parser;
944
	int ret, err = 0;
945

946
	if (!types)
947 948
		types = mtd_is_partition(master) ? default_subpartition_types :
			default_mtd_part_types;
949

950
	for ( ; *types; types++) {
951 952 953 954 955 956
		/*
		 * ofpart is a special type that means OF partitioning info
		 * should be used. It requires a bit different logic so it is
		 * handled in a separated function.
		 */
		if (!strcmp(*types, "ofpart")) {
957
			ret = mtd_part_of_parse(master, &pparts);
958 959 960
		} else {
			pr_debug("%s: parsing partitions %s\n", master->name,
				 *types);
961
			parser = mtd_part_parser_get(*types);
962 963 964 965 966 967
			if (!parser && !request_module("%s", *types))
				parser = mtd_part_parser_get(*types);
			pr_debug("%s: got parser %s\n", master->name,
				parser ? parser->name : NULL);
			if (!parser)
				continue;
968
			ret = mtd_part_do_parse(parser, master, &pparts, data);
969 970 971
			if (ret <= 0)
				mtd_part_parser_put(parser);
		}
972
		/* Found partitions! */
973 974 975 976 977 978
		if (ret > 0) {
			err = add_mtd_partitions(master, pparts.parts,
						 pparts.nr_parts);
			mtd_part_parser_cleanup(&pparts);
			return err ? err : pparts.nr_parts;
		}
979 980 981 982 983 984
		/*
		 * Stash the first error we see; only report it if no parser
		 * succeeds
		 */
		if (ret < 0 && !err)
			err = ret;
L
Linus Torvalds 已提交
985
	}
986
	return err;
L
Linus Torvalds 已提交
987
}
988

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
void mtd_part_parser_cleanup(struct mtd_partitions *parts)
{
	const struct mtd_part_parser *parser;

	if (!parts)
		return;

	parser = parts->parser;
	if (parser) {
		if (parser->cleanup)
			parser->cleanup(parts->parts, parts->nr_parts);

		mtd_part_parser_put(parser);
	}
}

1005
int mtd_is_partition(const struct mtd_info *mtd)
1006 1007
{
	struct mtd_part *part;
1008
	int ispart = 0;
1009 1010 1011 1012

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry(part, &mtd_partitions, list)
		if (&part->mtd == mtd) {
1013
			ispart = 1;
1014 1015 1016 1017
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

1018
	return ispart;
1019
}
1020
EXPORT_SYMBOL_GPL(mtd_is_partition);
1021 1022 1023 1024 1025 1026 1027

/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
	if (!mtd_is_partition(mtd))
		return mtd->size;

1028
	return mtd_get_device_size(mtd_to_part(mtd)->parent);
1029 1030
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);