mtdpart.c 22.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * Simple MTD partitioning layer
 *
D
David Woodhouse 已提交
4 5 6
 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
L
Linus Torvalds 已提交
7
 *
D
David Woodhouse 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
21
 *
22
 */
L
Linus Torvalds 已提交
23 24 25 26 27 28 29 30 31

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
32
#include <linux/err.h>
33
#include <linux/kconfig.h>
L
Linus Torvalds 已提交
34

35 36
#include "mtdcore.h"

L
Linus Torvalds 已提交
37 38
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
39
static DEFINE_MUTEX(mtd_partitions_mutex);
L
Linus Torvalds 已提交
40 41 42 43 44

/* Our partition node structure */
struct mtd_part {
	struct mtd_info mtd;
	struct mtd_info *master;
45
	uint64_t offset;
L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53 54
	struct list_head list;
};

/*
 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
 * the pointer to that structure with this macro.
 */
#define PART(x)  ((struct mtd_part *)(x))

55 56

/*
L
Linus Torvalds 已提交
57 58 59 60
 * MTD methods which simply translate the effective address and pass through
 * to the _real_ device.
 */

61 62
static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
63 64
{
	struct mtd_part *part = PART(mtd);
65
	struct mtd_ecc_stats stats;
66 67
	int res;

68
	stats = part->master->ecc_stats;
M
Mike Dunn 已提交
69 70
	res = part->master->_read(part->master, from + part->offset, len,
				  retlen, buf);
71 72 73 74 75 76
	if (unlikely(mtd_is_eccerr(res)))
		mtd->ecc_stats.failed +=
			part->master->ecc_stats.failed - stats.failed;
	else
		mtd->ecc_stats.corrected +=
			part->master->ecc_stats.corrected - stats.corrected;
77
	return res;
L
Linus Torvalds 已提交
78 79
}

80 81
static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
		size_t *retlen, void **virt, resource_size_t *phys)
L
Linus Torvalds 已提交
82 83
{
	struct mtd_part *part = PART(mtd);
84

M
Mike Dunn 已提交
85 86
	return part->master->_point(part->master, from + part->offset, len,
				    retlen, virt, phys);
L
Linus Torvalds 已提交
87
}
88

89
static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
L
Linus Torvalds 已提交
90 91 92
{
	struct mtd_part *part = PART(mtd);

M
Mike Dunn 已提交
93
	return part->master->_unpoint(part->master, from + part->offset, len);
L
Linus Torvalds 已提交
94 95
}

96 97 98 99 100 101 102 103
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
					    unsigned long len,
					    unsigned long offset,
					    unsigned long flags)
{
	struct mtd_part *part = PART(mtd);

	offset += part->offset;
M
Mike Dunn 已提交
104 105
	return part->master->_get_unmapped_area(part->master, len, offset,
						flags);
106 107
}

108
static int part_read_oob(struct mtd_info *mtd, loff_t from,
109
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
110 111
{
	struct mtd_part *part = PART(mtd);
112
	int res;
113

L
Linus Torvalds 已提交
114
	if (from >= mtd->size)
115
		return -EINVAL;
116
	if (ops->datbuf && from + ops->len > mtd->size)
117
		return -EINVAL;
118

119 120 121 122 123 124 125
	/*
	 * If OOB is also requested, make sure that we do not read past the end
	 * of this partition.
	 */
	if (ops->oobbuf) {
		size_t len, pages;

126
		if (ops->mode == MTD_OPS_AUTO_OOB)
127 128 129 130 131 132 133 134 135
			len = mtd->oobavail;
		else
			len = mtd->oobsize;
		pages = mtd_div_by_ws(mtd->size, mtd);
		pages -= mtd_div_by_ws(from, mtd);
		if (ops->ooboffs + ops->ooblen > pages * len)
			return -EINVAL;
	}

M
Mike Dunn 已提交
136
	res = part->master->_read_oob(part->master, from + part->offset, ops);
137
	if (unlikely(res)) {
138
		if (mtd_is_bitflip(res))
139
			mtd->ecc_stats.corrected++;
140
		if (mtd_is_eccerr(res))
141 142 143
			mtd->ecc_stats.failed++;
	}
	return res;
L
Linus Torvalds 已提交
144 145
}

146 147
static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
148 149
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
150 151
	return part->master->_read_user_prot_reg(part->master, from, len,
						 retlen, buf);
L
Linus Torvalds 已提交
152 153
}

154 155
static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
				   size_t *retlen, struct otp_info *buf)
156 157
{
	struct mtd_part *part = PART(mtd);
158 159
	return part->master->_get_user_prot_info(part->master, len, retlen,
						 buf);
160 161
}

162 163
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
164 165
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
166 167
	return part->master->_read_fact_prot_reg(part->master, from, len,
						 retlen, buf);
L
Linus Torvalds 已提交
168 169
}

170 171
static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
				   size_t *retlen, struct otp_info *buf)
172 173
{
	struct mtd_part *part = PART(mtd);
174 175
	return part->master->_get_fact_prot_info(part->master, len, retlen,
						 buf);
176 177
}

178 179
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
L
Linus Torvalds 已提交
180 181
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
182 183
	return part->master->_write(part->master, to + part->offset, len,
				    retlen, buf);
L
Linus Torvalds 已提交
184 185
}

186 187
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
		size_t *retlen, const u_char *buf)
188 189
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
190 191
	return part->master->_panic_write(part->master, to + part->offset, len,
					  retlen, buf);
192 193
}

194
static int part_write_oob(struct mtd_info *mtd, loff_t to,
195
		struct mtd_oob_ops *ops)
L
Linus Torvalds 已提交
196 197
{
	struct mtd_part *part = PART(mtd);
198

L
Linus Torvalds 已提交
199
	if (to >= mtd->size)
200
		return -EINVAL;
201
	if (ops->datbuf && to + ops->len > mtd->size)
202
		return -EINVAL;
M
Mike Dunn 已提交
203
	return part->master->_write_oob(part->master, to + part->offset, ops);
L
Linus Torvalds 已提交
204 205
}

206 207
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len, size_t *retlen, u_char *buf)
L
Linus Torvalds 已提交
208 209
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
210 211
	return part->master->_write_user_prot_reg(part->master, from, len,
						  retlen, buf);
L
Linus Torvalds 已提交
212 213
}

214 215
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
		size_t len)
216 217
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
218
	return part->master->_lock_user_prot_reg(part->master, from, len);
219 220
}

221 222
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
		unsigned long count, loff_t to, size_t *retlen)
L
Linus Torvalds 已提交
223 224
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
225 226
	return part->master->_writev(part->master, vecs, count,
				     to + part->offset, retlen);
L
Linus Torvalds 已提交
227 228
}

229
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
L
Linus Torvalds 已提交
230 231 232
{
	struct mtd_part *part = PART(mtd);
	int ret;
233

L
Linus Torvalds 已提交
234
	instr->addr += part->offset;
M
Mike Dunn 已提交
235
	ret = part->master->_erase(part->master, instr);
236
	if (ret) {
237
		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
238 239 240
			instr->fail_addr -= part->offset;
		instr->addr -= part->offset;
	}
L
Linus Torvalds 已提交
241 242 243 244 245
	return ret;
}

void mtd_erase_callback(struct erase_info *instr)
{
246
	if (instr->mtd->_erase == part_erase) {
L
Linus Torvalds 已提交
247 248
		struct mtd_part *part = PART(instr->mtd);

249
		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
L
Linus Torvalds 已提交
250 251 252 253 254 255 256 257
			instr->fail_addr -= part->offset;
		instr->addr -= part->offset;
	}
	if (instr->callback)
		instr->callback(instr);
}
EXPORT_SYMBOL_GPL(mtd_erase_callback);

258
static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
259 260
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
261
	return part->master->_lock(part->master, ofs + part->offset, len);
L
Linus Torvalds 已提交
262 263
}

264
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
L
Linus Torvalds 已提交
265 266
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
267
	return part->master->_unlock(part->master, ofs + part->offset, len);
L
Linus Torvalds 已提交
268 269
}

270 271 272
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
273
	return part->master->_is_locked(part->master, ofs + part->offset, len);
274 275
}

L
Linus Torvalds 已提交
276 277 278
static void part_sync(struct mtd_info *mtd)
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
279
	part->master->_sync(part->master);
L
Linus Torvalds 已提交
280 281 282 283 284
}

static int part_suspend(struct mtd_info *mtd)
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
285
	return part->master->_suspend(part->master);
L
Linus Torvalds 已提交
286 287 288 289 290
}

static void part_resume(struct mtd_info *mtd)
{
	struct mtd_part *part = PART(mtd);
M
Mike Dunn 已提交
291
	part->master->_resume(part->master);
L
Linus Torvalds 已提交
292 293
}

294 295 296 297 298 299 300
static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
{
	struct mtd_part *part = PART(mtd);
	ofs += part->offset;
	return part->master->_block_isreserved(part->master, ofs);
}

301
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
302 303 304
{
	struct mtd_part *part = PART(mtd);
	ofs += part->offset;
M
Mike Dunn 已提交
305
	return part->master->_block_isbad(part->master, ofs);
L
Linus Torvalds 已提交
306 307
}

308
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
L
Linus Torvalds 已提交
309 310
{
	struct mtd_part *part = PART(mtd);
311 312
	int res;

L
Linus Torvalds 已提交
313
	ofs += part->offset;
M
Mike Dunn 已提交
314
	res = part->master->_block_markbad(part->master, ofs);
315 316 317
	if (!res)
		mtd->ecc_stats.badblocks++;
	return res;
L
Linus Torvalds 已提交
318 319
}

320 321 322 323 324 325
static inline void free_partition(struct mtd_part *p)
{
	kfree(p->mtd.name);
	kfree(p);
}

326 327
/*
 * This function unregisters and destroy all slave MTD objects which are
L
Linus Torvalds 已提交
328 329 330 331 332
 * attached to the given master MTD object.
 */

int del_mtd_partitions(struct mtd_info *master)
{
333
	struct mtd_part *slave, *next;
334
	int ret, err = 0;
L
Linus Torvalds 已提交
335

336
	mutex_lock(&mtd_partitions_mutex);
337
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
L
Linus Torvalds 已提交
338
		if (slave->master == master) {
339 340 341 342 343
			ret = del_mtd_device(&slave->mtd);
			if (ret < 0) {
				err = ret;
				continue;
			}
344
			list_del(&slave->list);
345
			free_partition(slave);
L
Linus Torvalds 已提交
346
		}
347
	mutex_unlock(&mtd_partitions_mutex);
L
Linus Torvalds 已提交
348

349
	return err;
L
Linus Torvalds 已提交
350 351
}

352 353 354
static struct mtd_part *allocate_partition(struct mtd_info *master,
			const struct mtd_partition *part, int partno,
			uint64_t cur_offset)
355 356
{
	struct mtd_part *slave;
357
	char *name;
358 359

	/* allocate the partition structure */
360
	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
361 362
	name = kstrdup(part->name, GFP_KERNEL);
	if (!name || !slave) {
363
		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
364 365 366 367
		       master->name);
		kfree(name);
		kfree(slave);
		return ERR_PTR(-ENOMEM);
368 369 370 371 372 373 374
	}

	/* set up the MTD object for this partition */
	slave->mtd.type = master->type;
	slave->mtd.flags = master->flags & ~part->mask_flags;
	slave->mtd.size = part->size;
	slave->mtd.writesize = master->writesize;
375
	slave->mtd.writebufsize = master->writebufsize;
376 377 378 379
	slave->mtd.oobsize = master->oobsize;
	slave->mtd.oobavail = master->oobavail;
	slave->mtd.subpage_sft = master->subpage_sft;

380
	slave->mtd.name = name;
381 382
	slave->mtd.owner = master->owner;

383 384 385 386 387 388 389
	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
	 * concern for showing the same data in multiple partitions.
	 * However, it is very useful to have the master node present,
	 * so the MTD_PARTITIONED_MASTER option allows that. The master
	 * will have device nodes etc only if this is set, so make the
	 * parent conditional on that option. Note, this is a way to
	 * distinguish between the master and the partition in sysfs.
D
David Brownell 已提交
390
	 */
391 392 393
	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ?
				&master->dev :
				master->dev.parent;
D
David Brownell 已提交
394

395 396
	slave->mtd._read = part_read;
	slave->mtd._write = part_write;
397

398 399
	if (master->_panic_write)
		slave->mtd._panic_write = part_panic_write;
400

401 402 403
	if (master->_point && master->_unpoint) {
		slave->mtd._point = part_point;
		slave->mtd._unpoint = part_unpoint;
404 405
	}

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
	if (master->_get_unmapped_area)
		slave->mtd._get_unmapped_area = part_get_unmapped_area;
	if (master->_read_oob)
		slave->mtd._read_oob = part_read_oob;
	if (master->_write_oob)
		slave->mtd._write_oob = part_write_oob;
	if (master->_read_user_prot_reg)
		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
	if (master->_read_fact_prot_reg)
		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
	if (master->_write_user_prot_reg)
		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
	if (master->_lock_user_prot_reg)
		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
	if (master->_get_user_prot_info)
		slave->mtd._get_user_prot_info = part_get_user_prot_info;
	if (master->_get_fact_prot_info)
		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
	if (master->_sync)
		slave->mtd._sync = part_sync;
	if (!partno && !master->dev.class && master->_suspend &&
	    master->_resume) {
			slave->mtd._suspend = part_suspend;
			slave->mtd._resume = part_resume;
430
	}
431 432 433 434 435 436 437 438
	if (master->_writev)
		slave->mtd._writev = part_writev;
	if (master->_lock)
		slave->mtd._lock = part_lock;
	if (master->_unlock)
		slave->mtd._unlock = part_unlock;
	if (master->_is_locked)
		slave->mtd._is_locked = part_is_locked;
439 440
	if (master->_block_isreserved)
		slave->mtd._block_isreserved = part_block_isreserved;
441 442 443 444 445
	if (master->_block_isbad)
		slave->mtd._block_isbad = part_block_isbad;
	if (master->_block_markbad)
		slave->mtd._block_markbad = part_block_markbad;
	slave->mtd._erase = part_erase;
446 447 448 449 450 451 452
	slave->master = master;
	slave->offset = part->offset;

	if (slave->offset == MTDPART_OFS_APPEND)
		slave->offset = cur_offset;
	if (slave->offset == MTDPART_OFS_NXTBLK) {
		slave->offset = cur_offset;
453
		if (mtd_mod_by_eb(cur_offset, master) != 0) {
454
			/* Round up to next erasesize */
455
			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
456
			printk(KERN_NOTICE "Moving partition %d: "
457 458
			       "0x%012llx -> 0x%012llx\n", partno,
			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
459 460
		}
	}
461 462 463 464 465 466 467 468 469 470 471 472 473
	if (slave->offset == MTDPART_OFS_RETAIN) {
		slave->offset = cur_offset;
		if (master->size - slave->offset >= slave->mtd.size) {
			slave->mtd.size = master->size - slave->offset
							- slave->mtd.size;
		} else {
			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
				part->name, master->size - slave->offset,
				slave->mtd.size);
			/* register to preserve ordering */
			goto out_register;
		}
	}
474 475 476
	if (slave->mtd.size == MTDPART_SIZ_FULL)
		slave->mtd.size = master->size - slave->offset;

477 478
	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
479 480 481

	/* let's do some sanity checks */
	if (slave->offset >= master->size) {
482
		/* let's register it anyway to preserve ordering */
483 484
		slave->offset = 0;
		slave->mtd.size = 0;
485
		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
486
			part->name);
487
		goto out_register;
488 489 490
	}
	if (slave->offset + slave->mtd.size > master->size) {
		slave->mtd.size = master->size - slave->offset;
491 492
		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
			part->name, master->name, (unsigned long long)slave->mtd.size);
493
	}
494
	if (master->numeraseregions > 1) {
495
		/* Deal with variable erase size stuff */
496
		int i, max = master->numeraseregions;
497
		u64 end = slave->offset + slave->mtd.size;
498 499
		struct mtd_erase_region_info *regions = master->eraseregions;

500 501 502
		/* Find the first erase regions which is part of this
		 * partition. */
		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
503
			;
504
		/* The loop searched for the region _behind_ the first one */
505 506
		if (i > 0)
			i--;
507

508 509
		/* Pick biggest erasesize */
		for (; i < max && regions[i].offset < end; i++) {
510 511 512 513
			if (slave->mtd.erasesize < regions[i].erasesize) {
				slave->mtd.erasesize = regions[i].erasesize;
			}
		}
514
		BUG_ON(slave->mtd.erasesize == 0);
515 516 517 518 519 520
	} else {
		/* Single erase size */
		slave->mtd.erasesize = master->erasesize;
	}

	if ((slave->mtd.flags & MTD_WRITEABLE) &&
521
	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
522
		/* Doesn't start on a boundary of major erase size */
523 524
		/* FIXME: Let it be writable if it is on a boundary of
		 * _minor_ erase size though */
525
		slave->mtd.flags &= ~MTD_WRITEABLE;
526
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
527 528 529
			part->name);
	}
	if ((slave->mtd.flags & MTD_WRITEABLE) &&
530
	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
531
		slave->mtd.flags &= ~MTD_WRITEABLE;
532
		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
533 534 535 536
			part->name);
	}

	slave->mtd.ecclayout = master->ecclayout;
537
	slave->mtd.ecc_step_size = master->ecc_step_size;
M
Mike Dunn 已提交
538
	slave->mtd.ecc_strength = master->ecc_strength;
539 540
	slave->mtd.bitflip_threshold = master->bitflip_threshold;

541
	if (master->_block_isbad) {
542
		uint64_t offs = 0;
543

544
		while (offs < slave->mtd.size) {
545 546 547
			if (mtd_block_isreserved(master, offs + slave->offset))
				slave->mtd.ecc_stats.bbtblocks++;
			else if (mtd_block_isbad(master, offs + slave->offset))
548 549 550 551 552
				slave->mtd.ecc_stats.badblocks++;
			offs += slave->mtd.erasesize;
		}
	}

553
out_register:
554 555 556
	return slave;
}

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static ssize_t mtd_partition_offset_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct mtd_info *mtd = dev_get_drvdata(dev);
	struct mtd_part *part = PART(mtd);
	return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
}

static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);

static const struct attribute *mtd_partition_attrs[] = {
	&dev_attr_offset.attr,
	NULL
};

static int mtd_add_partition_attrs(struct mtd_part *new)
{
	int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
	if (ret)
		printk(KERN_WARNING
		       "mtd: failed to create partition attrs, err=%d\n", ret);
	return ret;
}

581
int mtd_add_partition(struct mtd_info *master, const char *name,
582 583 584
		      long long offset, long long length)
{
	struct mtd_partition part;
585
	struct mtd_part *new;
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	int ret = 0;

	/* the direct offset is expected */
	if (offset == MTDPART_OFS_APPEND ||
	    offset == MTDPART_OFS_NXTBLK)
		return -EINVAL;

	if (length == MTDPART_SIZ_FULL)
		length = master->size - offset;

	if (length <= 0)
		return -EINVAL;

	part.name = name;
	part.size = length;
	part.offset = offset;
	part.mask_flags = 0;
	part.ecclayout = NULL;

	new = allocate_partition(master, &part, -1, offset);
	if (IS_ERR(new))
		return PTR_ERR(new);

	mutex_lock(&mtd_partitions_mutex);
	list_add(&new->list, &mtd_partitions);
	mutex_unlock(&mtd_partitions_mutex);

	add_mtd_device(&new->mtd);

615 616
	mtd_add_partition_attrs(new);

617 618 619 620 621 622 623 624 625 626 627 628 629
	return ret;
}
EXPORT_SYMBOL_GPL(mtd_add_partition);

int mtd_del_partition(struct mtd_info *master, int partno)
{
	struct mtd_part *slave, *next;
	int ret = -EINVAL;

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
		if ((slave->master == master) &&
		    (slave->mtd.index == partno)) {
630 631
			sysfs_remove_files(&slave->mtd.dev.kobj,
					   mtd_partition_attrs);
632 633 634 635 636 637 638 639 640 641 642 643 644 645
			ret = del_mtd_device(&slave->mtd);
			if (ret < 0)
				break;

			list_del(&slave->list);
			free_partition(slave);
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

	return ret;
}
EXPORT_SYMBOL_GPL(mtd_del_partition);

L
Linus Torvalds 已提交
646 647 648 649
/*
 * This function, given a master MTD object and a partition table, creates
 * and registers slave MTD objects which are bound to the master according to
 * the partition definitions.
D
David Brownell 已提交
650
 *
651 652
 * For historical reasons, this function's caller only registers the master
 * if the MTD_PARTITIONED_MASTER config option is set.
L
Linus Torvalds 已提交
653 654
 */

655
int add_mtd_partitions(struct mtd_info *master,
L
Linus Torvalds 已提交
656 657 658 659
		       const struct mtd_partition *parts,
		       int nbparts)
{
	struct mtd_part *slave;
660
	uint64_t cur_offset = 0;
L
Linus Torvalds 已提交
661 662
	int i;

663
	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
L
Linus Torvalds 已提交
664 665

	for (i = 0; i < nbparts; i++) {
666 667 668 669 670 671 672 673 674
		slave = allocate_partition(master, parts + i, i, cur_offset);
		if (IS_ERR(slave))
			return PTR_ERR(slave);

		mutex_lock(&mtd_partitions_mutex);
		list_add(&slave->list, &mtd_partitions);
		mutex_unlock(&mtd_partitions_mutex);

		add_mtd_device(&slave->mtd);
675
		mtd_add_partition_attrs(slave);
676

L
Linus Torvalds 已提交
677 678 679 680 681 682 683 684 685 686 687
		cur_offset = slave->offset + slave->mtd.size;
	}

	return 0;
}

static DEFINE_SPINLOCK(part_parser_lock);
static LIST_HEAD(part_parsers);

static struct mtd_part_parser *get_partition_parser(const char *name)
{
688
	struct mtd_part_parser *p, *ret = NULL;
L
Linus Torvalds 已提交
689

690
	spin_lock(&part_parser_lock);
L
Linus Torvalds 已提交
691

692
	list_for_each_entry(p, &part_parsers, list)
L
Linus Torvalds 已提交
693 694 695 696
		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
			ret = p;
			break;
		}
697

L
Linus Torvalds 已提交
698 699 700 701 702
	spin_unlock(&part_parser_lock);

	return ret;
}

703 704
#define put_partition_parser(p) do { module_put((p)->owner); } while (0)

705
void register_mtd_parser(struct mtd_part_parser *p)
L
Linus Torvalds 已提交
706 707 708 709 710
{
	spin_lock(&part_parser_lock);
	list_add(&p->list, &part_parsers);
	spin_unlock(&part_parser_lock);
}
711
EXPORT_SYMBOL_GPL(register_mtd_parser);
L
Linus Torvalds 已提交
712

713
void deregister_mtd_parser(struct mtd_part_parser *p)
L
Linus Torvalds 已提交
714 715 716 717 718
{
	spin_lock(&part_parser_lock);
	list_del(&p->list);
	spin_unlock(&part_parser_lock);
}
719
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
L
Linus Torvalds 已提交
720

721 722 723 724
/*
 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
 * are changing this array!
 */
725
static const char * const default_mtd_part_types[] = {
726 727 728 729
	"cmdlinepart",
	"ofpart",
	NULL
};
730

731 732 733 734 735
/**
 * parse_mtd_partitions - parse MTD partitions
 * @master: the master partition (describes whole MTD device)
 * @types: names of partition parsers to try or %NULL
 * @pparts: array of partitions found is returned here
736
 * @data: MTD partition parser-specific data
737 738 739 740
 *
 * This function tries to find partition on MTD device @master. It uses MTD
 * partition parsers, specified in @types. However, if @types is %NULL, then
 * the default list of parsers is used. The default list contains only the
741
 * "cmdlinepart" and "ofpart" parsers ATM.
742 743
 * Note: If there are more then one parser in @types, the kernel only takes the
 * partitions parsed out by the first parser.
744 745 746 747 748 749 750
 *
 * This function may return:
 * o a negative error code in case of failure
 * o zero if no partitions were found
 * o a positive number of found partitions, in which case on exit @pparts will
 *   point to an array containing this number of &struct mtd_info objects.
 */
751
int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
752 753
			 struct mtd_partition **pparts,
			 struct mtd_part_parser_data *data)
L
Linus Torvalds 已提交
754 755 756
{
	struct mtd_part_parser *parser;
	int ret = 0;
757

758 759 760
	if (!types)
		types = default_mtd_part_types;

L
Linus Torvalds 已提交
761 762 763
	for ( ; ret <= 0 && *types; types++) {
		parser = get_partition_parser(*types);
		if (!parser && !request_module("%s", *types))
764
			parser = get_partition_parser(*types);
765
		if (!parser)
L
Linus Torvalds 已提交
766
			continue;
767
		ret = (*parser->parse_fn)(master, pparts, data);
768
		put_partition_parser(parser);
L
Linus Torvalds 已提交
769
		if (ret > 0) {
770
			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
L
Linus Torvalds 已提交
771
			       ret, parser->name, master->name);
772
			break;
L
Linus Torvalds 已提交
773 774 775 776
		}
	}
	return ret;
}
777

778
int mtd_is_partition(const struct mtd_info *mtd)
779 780
{
	struct mtd_part *part;
781
	int ispart = 0;
782 783 784 785

	mutex_lock(&mtd_partitions_mutex);
	list_for_each_entry(part, &mtd_partitions, list)
		if (&part->mtd == mtd) {
786
			ispart = 1;
787 788 789 790
			break;
		}
	mutex_unlock(&mtd_partitions_mutex);

791
	return ispart;
792
}
793
EXPORT_SYMBOL_GPL(mtd_is_partition);
794 795 796 797 798 799 800 801 802 803

/* Returns the size of the entire flash chip */
uint64_t mtd_get_device_size(const struct mtd_info *mtd)
{
	if (!mtd_is_partition(mtd))
		return mtd->size;

	return PART(mtd)->master->size;
}
EXPORT_SYMBOL_GPL(mtd_get_device_size);