mtdpart.c 21.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Simple MTD partitioning layer
 *
D
David Woodhouse 已提交
4 5 6
 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
L
Linus Torvalds 已提交
7
 *
D
David Woodhouse 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
21
 *
22
 */
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
32
#include <linux/err.h>
L
Linus Torvalds 已提交
33

34 35
#include "mtdcore.h"

L
Linus Torvalds 已提交
36 37
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
38
static DEFINE_MUTEX(mtd_partitions_mutex);
L
Linus Torvalds 已提交
39 40 41 42 43

/* Our partition node structure */
struct mtd_part {
	struct mtd_info mtd;
	struct mtd_info *master;
44
	uint64_t offset;
L
Linus Torvalds 已提交
45 46 47 48 49 50 51 52 53
	struct list_head list;
};

/*
 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
 * the pointer to that structure with this macro.
 */
#define PART(x)  ((struct mtd_part *)(x))

54 55

/*
L
Linus Torvalds 已提交
56 57 58 59
 * MTD methods which simply translate the effective address and pass through
 * to the _real_ device.
 */

60 61
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
62 63
{
	struct mtd_part *part = PART(mtd);
64
	struct mtd_ecc_stats stats;
65 66
	int res;

67 68
	stats = part->master->ecc_stats;

L
Linus Torvalds 已提交
69 70 71 72
	if (from >= mtd->size)
		len = 0;
	else if (from + len > mtd->size)
		len = mtd->size - from;
73
	res = part->master->read(part->master, from + part->offset,
74
				   len, retlen, buf);
75
	if (unlikely(res)) {
76
		if (mtd_is_bitflip(res))
77
			mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
78
		if (mtd_is_eccerr(res))
79
			mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
80 81
	}
	return res;
L
Linus Torvalds 已提交
82 83
}

84 85
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, void **virt, resource_size_t *phys)
L
Linus Torvalds 已提交
86 87 88 89 90 91
{
	struct mtd_part *part = PART(mtd);
	if (from >= mtd->size)
		len = 0;
	else if (from + len > mtd->size)
		len = mtd->size - from;
92 93
	return mtd_point(part->master, from + part->offset, len, retlen,
			 virt, phys);
L
Linus Torvalds 已提交
94
}
95

96
static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
L
Linus Torvalds 已提交
97 98 99
{
	struct mtd_part *part = PART(mtd);

100
	mtd_unpoint(part->master, from + part->offset, len);
L
Linus Torvalds 已提交
101 102
}

103 104 105 106 107 108 109 110
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
					    unsigned long len,
					    unsigned long offset,
					    unsigned long flags)
{
	struct mtd_part *part = PART(mtd);

	offset += part->offset;
111
	return mtd_get_unmapped_area(part->master, len, offset, flags);
112 113
}

114
static int part_read_oob(struct mtd_info *mtd, loff_t from,
115
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
116 117
{
	struct mtd_part *part = PART(mtd);
118
	int res;
119

L
Linus Torvalds 已提交
120
	if (from >= mtd->size)
121
		return -EINVAL;
122
	if (ops->datbuf && from + ops->len > mtd->size)
123
		return -EINVAL;
124

125 126 127 128 129 130 131
	/*
	 * If OOB is also requested, make sure that we do not read past the end
	 * of this partition.
	 */
	if (ops->oobbuf) {
		size_t len, pages;

132
		if (ops->mode == MTD_OPS_AUTO_OOB)
133 134 135 136 137 138 139 140 141 142
			len = mtd->oobavail;
		else
			len = mtd->oobsize;
		pages = mtd_div_by_ws(mtd->size, mtd);
		pages -= mtd_div_by_ws(from, mtd);
		if (ops->ooboffs + ops->ooblen > pages * len)
			return -EINVAL;
	}

	res = part->master->read_oob(part->master, from + part->offset, ops);
143
	if (unlikely(res)) {
144
		if (mtd_is_bitflip(res))
145
			mtd->ecc_stats.corrected++;
146
		if (mtd_is_eccerr(res))
147 148 149
			mtd->ecc_stats.failed++;
	}
	return res;
L
Linus Torvalds 已提交
150 151
}

152 153
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
154 155
{
	struct mtd_part *part = PART(mtd);
156
	return part->master->read_user_prot_reg(part->master, from,
L
Linus Torvalds 已提交
157 158 159
					len, retlen, buf);
}

160 161
static int part_get_user_prot_info(struct mtd_info *mtd,
		struct otp_info *buf, size_t len)
162 163
{
	struct mtd_part *part = PART(mtd);
164
	return part->master->get_user_prot_info(part->master, buf, len);
165 166
}

167 168
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
169 170
{
	struct mtd_part *part = PART(mtd);
171
	return part->master->read_fact_prot_reg(part->master, from,
L
Linus Torvalds 已提交
172 173 174
					len, retlen, buf);
}

175 176
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
		size_t len)
177 178
{
	struct mtd_part *part = PART(mtd);
179
	return part->master->get_fact_prot_info(part->master, buf, len);
180 181
}

182 183
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
L
Linus Torvalds 已提交
184 185 186 187 188 189 190 191
{
	struct mtd_part *part = PART(mtd);
	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;
	if (to >= mtd->size)
		len = 0;
	else if (to + len > mtd->size)
		len = mtd->size - to;
192
	return part->master->write(part->master, to + part->offset,
193
				    len, retlen, buf);
L
Linus Torvalds 已提交
194 195
}

196 197
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
198 199 200 201 202 203 204 205
{
	struct mtd_part *part = PART(mtd);
	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;
	if (to >= mtd->size)
		len = 0;
	else if (to + len > mtd->size)
		len = mtd->size - to;
206
	return part->master->panic_write(part->master, to + part->offset,
207 208 209
				    len, retlen, buf);
}

210
static int part_write_oob(struct mtd_info *mtd, loff_t to,
211
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
212 213
{
	struct mtd_part *part = PART(mtd);
214

L
Linus Torvalds 已提交
215 216
	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;
217

L
Linus Torvalds 已提交
218
	if (to >= mtd->size)
219
		return -EINVAL;
220
	if (ops->datbuf && to + ops->len > mtd->size)
221 222
		return -EINVAL;
	return part->master->write_oob(part->master, to + part->offset, ops);
L
Linus Torvalds 已提交
223 224
}

225 226
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
227 228
{
	struct mtd_part *part = PART(mtd);
229
	return part->master->write_user_prot_reg(part->master, from,
L
Linus Torvalds 已提交
230 231 232
					len, retlen, buf);
}

233 234
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len)
235 236
{
	struct mtd_part *part = PART(mtd);
237
	return part->master->lock_user_prot_reg(part->master, from, len);
238 239
}

240 241
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
		unsigned long count, loff_t to, size_t *retlen)
L
Linus Torvalds 已提交
242 243 244 245
{
	struct mtd_part *part = PART(mtd);
	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;
246
	return part->master->writev(part->master, vecs, count,
L
Linus Torvalds 已提交
247 248 249
					to + part->offset, retlen);
}

250
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258
{
	struct mtd_part *part = PART(mtd);
	int ret;
	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;
	if (instr->addr >= mtd->size)
		return -EINVAL;
	instr->addr += part->offset;
259
	ret = mtd_erase(part->master, instr);
260
	if (ret) {
261
		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
262 263 264
			instr->fail_addr -= part->offset;
		instr->addr -= part->offset;
	}
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272
	return ret;
}

void mtd_erase_callback(struct erase_info *instr)
{
	if (instr->mtd->erase == part_erase) {
		struct mtd_part *part = PART(instr->mtd);

273
		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281
			instr->fail_addr -= part->offset;
		instr->addr -= part->offset;
	}
	if (instr->callback)
		instr->callback(instr);
}
EXPORT_SYMBOL_GPL(mtd_erase_callback);

282
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
283 284
{
	struct mtd_part *part = PART(mtd);
285
	if ((len + ofs) > mtd->size)
L
Linus Torvalds 已提交
286 287 288 289
		return -EINVAL;
	return part->master->lock(part->master, ofs + part->offset, len);
}

290
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
291 292
{
	struct mtd_part *part = PART(mtd);
293
	if ((len + ofs) > mtd->size)
L
Linus Torvalds 已提交
294 295 296 297
		return -EINVAL;
	return part->master->unlock(part->master, ofs + part->offset, len);
}

298 299 300 301 302 303 304 305
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
	struct mtd_part *part = PART(mtd);
	if ((len + ofs) > mtd->size)
		return -EINVAL;
	return part->master->is_locked(part->master, ofs + part->offset, len);
}

L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
static void part_sync(struct mtd_info *mtd)
{
	struct mtd_part *part = PART(mtd);
	part->master->sync(part->master);
}

static int part_suspend(struct mtd_info *mtd)
{
	struct mtd_part *part = PART(mtd);
	return part->master->suspend(part->master);
}

static void part_resume(struct mtd_info *mtd)
{
	struct mtd_part *part = PART(mtd);
	part->master->resume(part->master);
}

324
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
325 326 327 328 329 330 331 332
{
	struct mtd_part *part = PART(mtd);
	if (ofs >= mtd->size)
		return -EINVAL;
	ofs += part->offset;
	return part->master->block_isbad(part->master, ofs);
}

333
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
334 335
{
	struct mtd_part *part = PART(mtd);
336 337
	int res;

L
Linus Torvalds 已提交
338 339 340 341 342
	if (!(mtd->flags & MTD_WRITEABLE))
		return -EROFS;
	if (ofs >= mtd->size)
		return -EINVAL;
	ofs += part->offset;
343 344 345 346
	res = part->master->block_markbad(part->master, ofs);
	if (!res)
		mtd->ecc_stats.badblocks++;
	return res;
L
Linus Torvalds 已提交
347 348
}

349 350 351 352 353 354
static inline void free_partition(struct mtd_part *p)
{
	kfree(p->mtd.name);
	kfree(p);
}

355 356
/*
 * This function unregisters and destroy all slave MTD objects which are
L
Linus Torvalds 已提交
357 358 359 360 361
 * attached to the given master MTD object.
 */

int del_mtd_partitions(struct mtd_info *master)
{
362
	struct mtd_part *slave, *next;
363
	int ret, err = 0;
L
Linus Torvalds 已提交
364

365
	mutex_lock(&mtd_partitions_mutex);
366
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
L
Linus Torvalds 已提交
367
		if (slave->master == master) {
368 369 370 371 372
			ret = del_mtd_device(&slave->mtd);
			if (ret < 0) {
				err = ret;
				continue;
			}
373
			list_del(&slave->list);
374
			free_partition(slave);
L
Linus Torvalds 已提交
375
		}
376
	mutex_unlock(&mtd_partitions_mutex);
L
Linus Torvalds 已提交
377

378
	return err;
L
Linus Torvalds 已提交
379 380
}

381 382 383
static struct mtd_part *allocate_partition(struct mtd_info *master,
			const struct mtd_partition *part, int partno,
			uint64_t cur_offset)
384 385
{
	struct mtd_part *slave;
386
	char *name;
387 388

	/* allocate the partition structure */
389
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
390 391
	name = kstrdup(part->name, GFP_KERNEL);
	if (!name || !slave) {
392
		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
393 394 395 396
		       master->name);
		kfree(name);
		kfree(slave);
		return ERR_PTR(-ENOMEM);
397 398 399 400 401 402 403
	}

	/* set up the MTD object for this partition */
	slave->mtd.type = master->type;
	slave->mtd.flags = master->flags & ~part->mask_flags;
	slave->mtd.size = part->size;
	slave->mtd.writesize = master->writesize;
404
	slave->mtd.writebufsize = master->writebufsize;
405 406 407 408
	slave->mtd.oobsize = master->oobsize;
	slave->mtd.oobavail = master->oobavail;
	slave->mtd.subpage_sft = master->subpage_sft;

409
	slave->mtd.name = name;
410
	slave->mtd.owner = master->owner;
411
	slave->mtd.backing_dev_info = master->backing_dev_info;
412

D
David Brownell 已提交
413 414 415 416 417
	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
	 * to have the same data be in two different partitions.
	 */
	slave->mtd.dev.parent = master->dev.parent;

418 419 420 421 422 423
	slave->mtd.read = part_read;
	slave->mtd.write = part_write;

	if (master->panic_write)
		slave->mtd.panic_write = part_panic_write;

424
	if (master->point && master->unpoint) {
425 426 427 428
		slave->mtd.point = part_point;
		slave->mtd.unpoint = part_unpoint;
	}

429 430
	if (master->get_unmapped_area)
		slave->mtd.get_unmapped_area = part_get_unmapped_area;
431 432 433 434
	if (master->read_oob)
		slave->mtd.read_oob = part_read_oob;
	if (master->write_oob)
		slave->mtd.write_oob = part_write_oob;
435
	if (master->read_user_prot_reg)
436
		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
437
	if (master->read_fact_prot_reg)
438
		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
439
	if (master->write_user_prot_reg)
440
		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
441
	if (master->lock_user_prot_reg)
442
		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
443
	if (master->get_user_prot_info)
444
		slave->mtd.get_user_prot_info = part_get_user_prot_info;
445
	if (master->get_fact_prot_info)
446 447 448
		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
	if (master->sync)
		slave->mtd.sync = part_sync;
449
	if (!partno && !master->dev.class && master->suspend && master->resume) {
450 451 452 453 454 455 456 457 458
			slave->mtd.suspend = part_suspend;
			slave->mtd.resume = part_resume;
	}
	if (master->writev)
		slave->mtd.writev = part_writev;
	if (master->lock)
		slave->mtd.lock = part_lock;
	if (master->unlock)
		slave->mtd.unlock = part_unlock;
459 460
	if (master->is_locked)
		slave->mtd.is_locked = part_is_locked;
461 462 463 464 465 466 467 468 469 470 471 472
	if (master->block_isbad)
		slave->mtd.block_isbad = part_block_isbad;
	if (master->block_markbad)
		slave->mtd.block_markbad = part_block_markbad;
	slave->mtd.erase = part_erase;
	slave->master = master;
	slave->offset = part->offset;

	if (slave->offset == MTDPART_OFS_APPEND)
		slave->offset = cur_offset;
	if (slave->offset == MTDPART_OFS_NXTBLK) {
		slave->offset = cur_offset;
473
		if (mtd_mod_by_eb(cur_offset, master) != 0) {
474
			/* Round up to next erasesize */
475
			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
476
			printk(KERN_NOTICE "Moving partition %d: "
477 478
			       "0x%012llx -> 0x%012llx\n", partno,
			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
479 480
		}
	}
481 482 483 484 485 486 487 488 489 490 491 492 493
	if (slave->offset == MTDPART_OFS_RETAIN) {
		slave->offset = cur_offset;
		if (master->size - slave->offset >= slave->mtd.size) {
			slave->mtd.size = master->size - slave->offset
							- slave->mtd.size;
		} else {
			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
				part->name, master->size - slave->offset,
				slave->mtd.size);
			/* register to preserve ordering */
			goto out_register;
		}
	}
494 495 496
	if (slave->mtd.size == MTDPART_SIZ_FULL)
		slave->mtd.size = master->size - slave->offset;

497 498
	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
499 500 501

	/* let's do some sanity checks */
	if (slave->offset >= master->size) {
502
		/* let's register it anyway to preserve ordering */
503 504
		slave->offset = 0;
		slave->mtd.size = 0;
505
		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
506
			part->name);
507
		goto out_register;
508 509 510
	}
	if (slave->offset + slave->mtd.size > master->size) {
		slave->mtd.size = master->size - slave->offset;
511 512
		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
			part->name, master->name, (unsigned long long)slave->mtd.size);
513
	}
514
	if (master->numeraseregions > 1) {
515
		/* Deal with variable erase size stuff */
516
		int i, max = master->numeraseregions;
517
		u64 end = slave->offset + slave->mtd.size;
518 519
		struct mtd_erase_region_info *regions = master->eraseregions;

520 521 522
		/* Find the first erase regions which is part of this
		 * partition. */
		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
523
			;
524
		/* The loop searched for the region _behind_ the first one */
525 526
		if (i > 0)
			i--;
527

528 529
		/* Pick biggest erasesize */
		for (; i < max && regions[i].offset < end; i++) {
530 531 532 533
			if (slave->mtd.erasesize < regions[i].erasesize) {
				slave->mtd.erasesize = regions[i].erasesize;
			}
		}
534
		BUG_ON(slave->mtd.erasesize == 0);
535 536 537 538 539 540
	} else {
		/* Single erase size */
		slave->mtd.erasesize = master->erasesize;
	}

	if ((slave->mtd.flags & MTD_WRITEABLE) &&
541
	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
542
		/* Doesn't start on a boundary of major erase size */
543 544
		/* FIXME: Let it be writable if it is on a boundary of
		 * _minor_ erase size though */
545
		slave->mtd.flags &= ~MTD_WRITEABLE;
546
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
547 548 549
			part->name);
	}
	if ((slave->mtd.flags & MTD_WRITEABLE) &&
550
	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
551
		slave->mtd.flags &= ~MTD_WRITEABLE;
552
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
553 554 555 556 557
			part->name);
	}

	slave->mtd.ecclayout = master->ecclayout;
	if (master->block_isbad) {
558
		uint64_t offs = 0;
559

560
		while (offs < slave->mtd.size) {
561 562 563 564 565 566 567
			if (master->block_isbad(master,
						offs + slave->offset))
				slave->mtd.ecc_stats.badblocks++;
			offs += slave->mtd.erasesize;
		}
	}

568
out_register:
569 570 571
	return slave;
}

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
int mtd_add_partition(struct mtd_info *master, char *name,
		      long long offset, long long length)
{
	struct mtd_partition part;
	struct mtd_part *p, *new;
	uint64_t start, end;
	int ret = 0;

	/* the direct offset is expected */
	if (offset == MTDPART_OFS_APPEND ||
	    offset == MTDPART_OFS_NXTBLK)
		return -EINVAL;

	if (length == MTDPART_SIZ_FULL)
		length = master->size - offset;

	if (length <= 0)
		return -EINVAL;

	part.name = name;
	part.size = length;
	part.offset = offset;
	part.mask_flags = 0;
	part.ecclayout = NULL;

	new = allocate_partition(master, &part, -1, offset);
	if (IS_ERR(new))
		return PTR_ERR(new);

	start = offset;
	end = offset + length;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry(p, &mtd_partitions, list)
		if (p->master == master) {
			if ((start >= p->offset) &&
			    (start < (p->offset + p->mtd.size)))
				goto err_inv;

			if ((end >= p->offset) &&
			    (end < (p->offset + p->mtd.size)))
				goto err_inv;
		}

	list_add(&new->list, &mtd_partitions);
	mutex_unlock(&mtd_partitions_mutex);

	add_mtd_device(&new->mtd);

	return ret;
err_inv:
	mutex_unlock(&mtd_partitions_mutex);
	free_partition(new);
	return -EINVAL;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);

int mtd_del_partition(struct mtd_info *master, int partno)
{
	struct mtd_part *slave, *next;
	int ret = -EINVAL;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
		if ((slave->master == master) &&
		    (slave->mtd.index == partno)) {
			ret = del_mtd_device(&slave->mtd);
			if (ret < 0)
				break;

			list_del(&slave->list);
			free_partition(slave);
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

	return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);

L
Linus Torvalds 已提交
652 653 654 655
/*
 * This function, given a master MTD object and a partition table, creates
 * and registers slave MTD objects which are bound to the master according to
 * the partition definitions.
D
David Brownell 已提交
656 657 658
 *
 * We don't register the master, or expect the caller to have done so,
 * for reasons of data integrity.
L
Linus Torvalds 已提交
659 660
 */

661
int add_mtd_partitions(struct mtd_info *master,
L
Linus Torvalds 已提交
662 663 664 665
		       const struct mtd_partition *parts,
		       int nbparts)
{
	struct mtd_part *slave;
666
	uint64_t cur_offset = 0;
L
Linus Torvalds 已提交
667 668
	int i;

669
	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
L
Linus Torvalds 已提交
670 671

	for (i = 0; i < nbparts; i++) {
672 673 674 675 676 677 678 679 680 681
		slave = allocate_partition(master, parts + i, i, cur_offset);
		if (IS_ERR(slave))
			return PTR_ERR(slave);

		mutex_lock(&mtd_partitions_mutex);
		list_add(&slave->list, &mtd_partitions);
		mutex_unlock(&mtd_partitions_mutex);

		add_mtd_device(&slave->mtd);

L
Linus Torvalds 已提交
682 683 684 685 686 687 688 689 690 691 692
		cur_offset = slave->offset + slave->mtd.size;
	}

	return 0;
}

static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);

static struct mtd_part_parser *get_partition_parser(const char *name)
{
693
	struct mtd_part_parser *p, *ret = NULL;
L
Linus Torvalds 已提交
694

695
	spin_lock(&part_parser_lock);
L
Linus Torvalds 已提交
696

697
	list_for_each_entry(p, &part_parsers, list)
L
Linus Torvalds 已提交
698 699 700 701
		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
			ret = p;
			break;
		}
702

L
Linus Torvalds 已提交
703 704 705 706 707
	spin_unlock(&part_parser_lock);

	return ret;
}

708 709
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)

L
Linus Torvalds 已提交
710 711 712 713 714 715 716 717
int register_mtd_parser(struct mtd_part_parser *p)
{
	spin_lock(&part_parser_lock);
	list_add(&p->list, &part_parsers);
	spin_unlock(&part_parser_lock);

	return 0;
}
718
EXPORT_SYMBOL_GPL(register_mtd_parser);
L
Linus Torvalds 已提交
719 720 721 722 723 724 725 726

int deregister_mtd_parser(struct mtd_part_parser *p)
{
	spin_lock(&part_parser_lock);
	list_del(&p->list);
	spin_unlock(&part_parser_lock);
	return 0;
}
727
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
L
Linus Torvalds 已提交
728

729 730 731 732
/*
 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
 * are changing this array!
 */
733 734 735 736 737
static const char *default_mtd_part_types[] = {
	"cmdlinepart",
	"ofpart",
	NULL
};
738

739 740 741 742 743
/**
 * parse_mtd_partitions - parse MTD partitions
 * @master: the master partition (describes whole MTD device)
 * @types: names of partition parsers to try or %NULL
 * @pparts: array of partitions found is returned here
744
 * @data: MTD partition parser-specific data
745 746 747 748
 *
 * This function tries to find partition on MTD device @master. It uses MTD
 * partition parsers, specified in @types. However, if @types is %NULL, then
 * the default list of parsers is used. The default list contains only the
749
 * "cmdlinepart" and "ofpart" parsers ATM.
750 751 752 753 754 755 756
 *
 * This function may return:
 * o a negative error code in case of failure
 * o zero if no partitions were found
 * o a positive number of found partitions, in which case on exit @pparts will
 *   point to an array containing this number of &struct mtd_info objects.
 */
757
int parse_mtd_partitions(struct mtd_info *master, const char **types,
758 759
			 struct mtd_partition **pparts,
			 struct mtd_part_parser_data *data)
L
Linus Torvalds 已提交
760 761 762
{
	struct mtd_part_parser *parser;
	int ret = 0;
763

764 765 766
	if (!types)
		types = default_mtd_part_types;

L
Linus Torvalds 已提交
767 768 769 770
	for ( ; ret <= 0 && *types; types++) {
		parser = get_partition_parser(*types);
		if (!parser && !request_module("%s", *types))
				parser = get_partition_parser(*types);
771
		if (!parser)
L
Linus Torvalds 已提交
772
			continue;
773
		ret = (*parser->parse_fn)(master, pparts, data);
L
Linus Torvalds 已提交
774
		if (ret > 0) {
775
			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
L
Linus Torvalds 已提交
776 777 778 779 780 781
			       ret, parser->name, master->name);
		}
		put_partition_parser(parser);
	}
	return ret;
}
782

783
int mtd_is_partition(struct mtd_info *mtd)
784 785
{
	struct mtd_part *part;
786
	int ispart = 0;
787 788 789 790

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry(part, &mtd_partitions, list)
		if (&part->mtd == mtd) {
791
			ispart = 1;
792 793 794 795
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

796
	return ispart;
797
}
798
EXPORT_SYMBOL_GPL(mtd_is_partition);