mtdblock.c 9.4 KB
Newer Older
1
/*
L
Linus Torvalds 已提交
2 3
 * Direct MTD block device access
 *
4
 * (C) 2000-2003 Nicolas Pitre <nico@fluxnic.net>
L
Linus Torvalds 已提交
5 6 7 8 9
 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
 */

#include <linux/fs.h>
#include <linux/init.h>
10 11 12
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
L
Linus Torvalds 已提交
13
#include <linux/slab.h>
14
#include <linux/types.h>
L
Linus Torvalds 已提交
15
#include <linux/vmalloc.h>
16

L
Linus Torvalds 已提交
17 18
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
I
Ingo Molnar 已提交
19 20
#include <linux/mutex.h>

L
Linus Torvalds 已提交
21 22 23 24

static struct mtdblk_dev {
	struct mtd_info *mtd;
	int count;
I
Ingo Molnar 已提交
25
	struct mutex cache_mutex;
L
Linus Torvalds 已提交
26 27 28 29 30 31
	unsigned char *cache_data;
	unsigned long cache_offset;
	unsigned int cache_size;
	enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES];

32 33
static struct mutex mtdblks_lock;

L
Linus Torvalds 已提交
34 35
/*
 * Cache stuff...
36
 *
L
Linus Torvalds 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49
 * Since typical flash erasable sectors are much larger than what Linux's
 * buffer cache can handle, we must implement read-modify-write on flash
 * sectors for each block write requests.  To avoid over-erasing flash sectors
 * and to speed things up, we locally cache a whole flash sector while it is
 * being written to until a different sector is required.
 */

static void erase_callback(struct erase_info *done)
{
	wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
	wake_up(wait_q);
}

50
static int erase_write (struct mtd_info *mtd, unsigned long pos,
L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
			int len, const char *buf)
{
	struct erase_info erase;
	DECLARE_WAITQUEUE(wait, current);
	wait_queue_head_t wait_q;
	size_t retlen;
	int ret;

	/*
	 * First, let's erase the flash block.
	 */

	init_waitqueue_head(&wait_q);
	erase.mtd = mtd;
	erase.callback = erase_callback;
	erase.addr = pos;
	erase.len = len;
	erase.priv = (u_long)&wait_q;

	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&wait_q, &wait);

73
	ret = mtd->erase(mtd, &erase);
L
Linus Torvalds 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86
	if (ret) {
		set_current_state(TASK_RUNNING);
		remove_wait_queue(&wait_q, &wait);
		printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
				     "on \"%s\" failed\n",
			pos, len, mtd->name);
		return ret;
	}

	schedule();  /* Wait for erase to finish. */
	remove_wait_queue(&wait_q, &wait);

	/*
M
Matthias Kaehlcke 已提交
87
	 * Next, write the data to flash.
L
Linus Torvalds 已提交
88 89
	 */

90
	ret = mtd->write(mtd, pos, len, &retlen, buf);
L
Linus Torvalds 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
	if (ret)
		return ret;
	if (retlen != len)
		return -EIO;
	return 0;
}


static int write_cached_data (struct mtdblk_dev *mtdblk)
{
	struct mtd_info *mtd = mtdblk->mtd;
	int ret;

	if (mtdblk->cache_state != STATE_DIRTY)
		return 0;

	DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
108
			"at 0x%lx, size 0x%x\n", mtd->name,
L
Linus Torvalds 已提交
109
			mtdblk->cache_offset, mtdblk->cache_size);
110 111

	ret = erase_write (mtd, mtdblk->cache_offset,
L
Linus Torvalds 已提交
112 113 114 115 116 117
			   mtdblk->cache_size, mtdblk->cache_data);
	if (ret)
		return ret;

	/*
	 * Here we could argubly set the cache state to STATE_CLEAN.
118 119
	 * However this could lead to inconsistency since we will not
	 * be notified if this content is altered on the flash by other
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127
	 * means.  Let's declare it empty and leave buffering tasks to
	 * the buffer cache instead.
	 */
	mtdblk->cache_state = STATE_EMPTY;
	return 0;
}


128
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136 137
			    int len, const char *buf)
{
	struct mtd_info *mtd = mtdblk->mtd;
	unsigned int sect_size = mtdblk->cache_size;
	size_t retlen;
	int ret;

	DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
		mtd->name, pos, len);
138

L
Linus Torvalds 已提交
139
	if (!sect_size)
140
		return mtd->write(mtd, pos, len, &retlen, buf);
L
Linus Torvalds 已提交
141 142 143 144 145

	while (len > 0) {
		unsigned long sect_start = (pos/sect_size)*sect_size;
		unsigned int offset = pos - sect_start;
		unsigned int size = sect_size - offset;
146
		if( size > len )
L
Linus Torvalds 已提交
147 148 149
			size = len;

		if (size == sect_size) {
150
			/*
L
Linus Torvalds 已提交
151 152 153 154 155 156 157 158 159 160 161 162 163
			 * We are covering a whole sector.  Thus there is no
			 * need to bother with the cache while it may still be
			 * useful for other partial writes.
			 */
			ret = erase_write (mtd, pos, size, buf);
			if (ret)
				return ret;
		} else {
			/* Partial sector: need to use the cache */

			if (mtdblk->cache_state == STATE_DIRTY &&
			    mtdblk->cache_offset != sect_start) {
				ret = write_cached_data(mtdblk);
164
				if (ret)
L
Linus Torvalds 已提交
165 166 167 168 169 170 171
					return ret;
			}

			if (mtdblk->cache_state == STATE_EMPTY ||
			    mtdblk->cache_offset != sect_start) {
				/* fill the cache with the current sector */
				mtdblk->cache_state = STATE_EMPTY;
172 173
				ret = mtd->read(mtd, sect_start, sect_size,
						&retlen, mtdblk->cache_data);
L
Linus Torvalds 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
				if (ret)
					return ret;
				if (retlen != sect_size)
					return -EIO;

				mtdblk->cache_offset = sect_start;
				mtdblk->cache_size = sect_size;
				mtdblk->cache_state = STATE_CLEAN;
			}

			/* write data to our local cache */
			memcpy (mtdblk->cache_data + offset, buf, size);
			mtdblk->cache_state = STATE_DIRTY;
		}

		buf += size;
		pos += size;
		len -= size;
	}

	return 0;
}


198
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
L
Linus Torvalds 已提交
199 200 201 202 203 204 205
			   int len, char *buf)
{
	struct mtd_info *mtd = mtdblk->mtd;
	unsigned int sect_size = mtdblk->cache_size;
	size_t retlen;
	int ret;

206
	DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
L
Linus Torvalds 已提交
207
			mtd->name, pos, len);
208

L
Linus Torvalds 已提交
209
	if (!sect_size)
210
		return mtd->read(mtd, pos, len, &retlen, buf);
L
Linus Torvalds 已提交
211 212 213 214 215

	while (len > 0) {
		unsigned long sect_start = (pos/sect_size)*sect_size;
		unsigned int offset = pos - sect_start;
		unsigned int size = sect_size - offset;
216
		if (size > len)
L
Linus Torvalds 已提交
217 218 219 220 221 222 223 224 225 226 227 228
			size = len;

		/*
		 * Check if the requested data is already cached
		 * Read the requested amount of data from our internal cache if it
		 * contains what we want, otherwise we read the data directly
		 * from flash.
		 */
		if (mtdblk->cache_state != STATE_EMPTY &&
		    mtdblk->cache_offset == sect_start) {
			memcpy (buf, mtdblk->cache_data + offset, size);
		} else {
229
			ret = mtd->read(mtd, pos, size, &retlen, buf);
L
Linus Torvalds 已提交
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
			if (ret)
				return ret;
			if (retlen != size)
				return -EIO;
		}

		buf += size;
		pos += size;
		len -= size;
	}

	return 0;
}

static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
			      unsigned long block, char *buf)
{
	struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
	return do_cached_read(mtdblk, block<<9, 512, buf);
}

static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
			      unsigned long block, char *buf)
{
	struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];
	if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
		mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize);
		if (!mtdblk->cache_data)
			return -EINTR;
		/* -EINTR is not really correct, but it is the best match
		 * documented in man 2 write for all cases.  We could also
		 * return -EAGAIN sometimes, but why bother?
		 */
	}
	return do_cached_write(mtdblk, block<<9, 512, buf);
}

static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
	struct mtdblk_dev *mtdblk;
	struct mtd_info *mtd = mbd->mtd;
	int dev = mbd->devnum;

	DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
274

275
	mutex_lock(&mtdblks_lock);
L
Linus Torvalds 已提交
276 277
	if (mtdblks[dev]) {
		mtdblks[dev]->count++;
278
		mutex_unlock(&mtdblks_lock);
L
Linus Torvalds 已提交
279 280
		return 0;
	}
281

L
Linus Torvalds 已提交
282
	/* OK, it's not open. Create cache info for it */
283
	mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
284 285
	if (!mtdblk) {
		mutex_unlock(&mtdblks_lock);
L
Linus Torvalds 已提交
286
		return -ENOMEM;
287
	}
L
Linus Torvalds 已提交
288 289 290 291

	mtdblk->count = 1;
	mtdblk->mtd = mtd;

I
Ingo Molnar 已提交
292
	mutex_init(&mtdblk->cache_mutex);
L
Linus Torvalds 已提交
293
	mtdblk->cache_state = STATE_EMPTY;
294
	if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
L
Linus Torvalds 已提交
295 296 297 298 299
		mtdblk->cache_size = mtdblk->mtd->erasesize;
		mtdblk->cache_data = NULL;
	}

	mtdblks[dev] = mtdblk;
300
	mutex_unlock(&mtdblks_lock);
301

L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312 313
	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");

	return 0;
}

static int mtdblock_release(struct mtd_blktrans_dev *mbd)
{
	int dev = mbd->devnum;
	struct mtdblk_dev *mtdblk = mtdblks[dev];

   	DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");

314 315
	mutex_lock(&mtdblks_lock);

I
Ingo Molnar 已提交
316
	mutex_lock(&mtdblk->cache_mutex);
L
Linus Torvalds 已提交
317
	write_cached_data(mtdblk);
I
Ingo Molnar 已提交
318
	mutex_unlock(&mtdblk->cache_mutex);
L
Linus Torvalds 已提交
319 320 321 322 323 324 325 326 327

	if (!--mtdblk->count) {
		/* It was the last usage. Free the device */
		mtdblks[dev] = NULL;
		if (mtdblk->mtd->sync)
			mtdblk->mtd->sync(mtdblk->mtd);
		vfree(mtdblk->cache_data);
		kfree(mtdblk);
	}
328 329 330

	mutex_unlock(&mtdblks_lock);

L
Linus Torvalds 已提交
331 332 333
	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");

	return 0;
334
}
L
Linus Torvalds 已提交
335 336 337 338 339

static int mtdblock_flush(struct mtd_blktrans_dev *dev)
{
	struct mtdblk_dev *mtdblk = mtdblks[dev->devnum];

I
Ingo Molnar 已提交
340
	mutex_lock(&mtdblk->cache_mutex);
L
Linus Torvalds 已提交
341
	write_cached_data(mtdblk);
I
Ingo Molnar 已提交
342
	mutex_unlock(&mtdblk->cache_mutex);
L
Linus Torvalds 已提交
343 344 345 346 347 348 349 350

	if (mtdblk->mtd->sync)
		mtdblk->mtd->sync(mtdblk->mtd);
	return 0;
}

static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
351
	struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
L
Linus Torvalds 已提交
352 353 354 355 356 357

	if (!dev)
		return;

	dev->mtd = mtd;
	dev->devnum = mtd->index;
358

L
Linus Torvalds 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	dev->size = mtd->size >> 9;
	dev->tr = tr;

	if (!(mtd->flags & MTD_WRITEABLE))
		dev->readonly = 1;

	add_mtd_blktrans_dev(dev);
}

static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
{
	del_mtd_blktrans_dev(dev);
	kfree(dev);
}

static struct mtd_blktrans_ops mtdblock_tr = {
	.name		= "mtdblock",
	.major		= 31,
	.part_bits	= 0,
378
	.blksize 	= 512,
L
Linus Torvalds 已提交
379 380 381 382 383 384 385 386 387 388 389 390
	.open		= mtdblock_open,
	.flush		= mtdblock_flush,
	.release	= mtdblock_release,
	.readsect	= mtdblock_readsect,
	.writesect	= mtdblock_writesect,
	.add_mtd	= mtdblock_add_mtd,
	.remove_dev	= mtdblock_remove_dev,
	.owner		= THIS_MODULE,
};

static int __init init_mtdblock(void)
{
391 392
	mutex_init(&mtdblks_lock);

L
Linus Torvalds 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405
	return register_mtd_blktrans(&mtdblock_tr);
}

static void __exit cleanup_mtdblock(void)
{
	deregister_mtd_blktrans(&mtdblock_tr);
}

module_init(init_mtdblock);
module_exit(cleanup_mtdblock);


MODULE_LICENSE("GPL");
406
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
L
Linus Torvalds 已提交
407
MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");