mtd_blkdevs.c 10.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 *
 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
 *
 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
 *
 */

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
#include <linux/init.h>
I
Ingo Molnar 已提交
22
#include <linux/mutex.h>
L
Linus Torvalds 已提交
23 24 25 26
#include <asm/uaccess.h>

static LIST_HEAD(blktrans_majors);

I
Ingo Molnar 已提交
27
extern struct mutex mtd_table_mutex;
L
Linus Torvalds 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
extern struct mtd_info *mtd_table[];

struct mtd_blkcore_priv {
	struct completion thread_dead;
	int exiting;
	wait_queue_head_t thread_wq;
	struct request_queue *rq;
	spinlock_t queue_lock;
};

static int do_blktrans_request(struct mtd_blktrans_ops *tr,
			       struct mtd_blktrans_dev *dev,
			       struct request *req)
{
	unsigned long block, nsect;
	char *buf;

45 46 47
	block = req->sector << 9 >> tr->blkshift;
	nsect = req->current_nr_sectors << 9 >> tr->blkshift;

L
Linus Torvalds 已提交
48 49
	buf = req->buffer;

50
	if (!blk_fs_request(req))
L
Linus Torvalds 已提交
51 52
		return 0;

53
	if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
L
Linus Torvalds 已提交
54 55 56 57
		return 0;

	switch(rq_data_dir(req)) {
	case READ:
58
		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
L
Linus Torvalds 已提交
59 60 61 62 63 64 65 66
			if (tr->readsect(dev, block, buf))
				return 0;
		return 1;

	case WRITE:
		if (!tr->writesect)
			return 0;

67
		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
L
Linus Torvalds 已提交
68 69 70 71 72
			if (tr->writesect(dev, block, buf))
				return 0;
		return 1;

	default:
J
Jeff Garzik 已提交
73
		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
L
Linus Torvalds 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
		return 0;
	}
}

static int mtd_blktrans_thread(void *arg)
{
	struct mtd_blktrans_ops *tr = arg;
	struct request_queue *rq = tr->blkcore_priv->rq;

	/* we might get involved when memory gets low, so use PF_MEMALLOC */
	current->flags |= PF_MEMALLOC | PF_NOFREEZE;

	daemonize("%sd", tr->name);

	/* daemonize() doesn't do this for us since some kernel threads
89
	   actually want to deal with signals. We can't just call
L
Linus Torvalds 已提交
90 91 92 93 94 95 96 97
	   exit_sighand() since that'll cause an oops when we finally
	   do exit. */
	spin_lock_irq(&current->sighand->siglock);
	sigfillset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	spin_lock_irq(rq->queue_lock);
98

L
Linus Torvalds 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	while (!tr->blkcore_priv->exiting) {
		struct request *req;
		struct mtd_blktrans_dev *dev;
		int res = 0;
		DECLARE_WAITQUEUE(wait, current);

		req = elv_next_request(rq);

		if (!req) {
			add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
			set_current_state(TASK_INTERRUPTIBLE);

			spin_unlock_irq(rq->queue_lock);

			schedule();
			remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);

			spin_lock_irq(rq->queue_lock);

			continue;
		}

		dev = req->rq_disk->private_data;
		tr = dev->tr;

		spin_unlock_irq(rq->queue_lock);

I
Ingo Molnar 已提交
126
		mutex_lock(&dev->lock);
L
Linus Torvalds 已提交
127
		res = do_blktrans_request(tr, dev, req);
I
Ingo Molnar 已提交
128
		mutex_unlock(&dev->lock);
L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

		spin_lock_irq(rq->queue_lock);

		end_request(req, res);
	}
	spin_unlock_irq(rq->queue_lock);

	complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
}

static void mtd_blktrans_request(struct request_queue *rq)
{
	struct mtd_blktrans_ops *tr = rq->queuedata;
	wake_up(&tr->blkcore_priv->thread_wq);
}


static int blktrans_open(struct inode *i, struct file *f)
{
	struct mtd_blktrans_dev *dev;
	struct mtd_blktrans_ops *tr;
	int ret = -ENODEV;

	dev = i->i_bdev->bd_disk->private_data;
	tr = dev->tr;

	if (!try_module_get(dev->mtd->owner))
		goto out;

	if (!try_module_get(tr->owner))
		goto out_tr;

161
	/* FIXME: Locking. A hot pluggable device can go away
L
Linus Torvalds 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	   (del_mtd_device can be called for it) without its module
	   being unloaded. */
	dev->mtd->usecount++;

	ret = 0;
	if (tr->open && (ret = tr->open(dev))) {
		dev->mtd->usecount--;
		module_put(dev->mtd->owner);
	out_tr:
		module_put(tr->owner);
	}
 out:
	return ret;
}

static int blktrans_release(struct inode *i, struct file *f)
{
	struct mtd_blktrans_dev *dev;
	struct mtd_blktrans_ops *tr;
	int ret = 0;

	dev = i->i_bdev->bd_disk->private_data;
	tr = dev->tr;

	if (tr->release)
		ret = tr->release(dev);

	if (!ret) {
		dev->mtd->usecount--;
		module_put(dev->mtd->owner);
		module_put(tr->owner);
	}

	return ret;
}

198 199 200 201 202 203 204 205
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
	struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;

	if (dev->tr->getgeo)
		return dev->tr->getgeo(dev, geo);
	return -ENOTTY;
}
L
Linus Torvalds 已提交
206

207
static int blktrans_ioctl(struct inode *inode, struct file *file,
L
Linus Torvalds 已提交
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
			      unsigned int cmd, unsigned long arg)
{
	struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
	struct mtd_blktrans_ops *tr = dev->tr;

	switch (cmd) {
	case BLKFLSBUF:
		if (tr->flush)
			return tr->flush(dev);
		/* The core code did the work, we had nothing to do. */
		return 0;
	default:
		return -ENOTTY;
	}
}

struct block_device_operations mtd_blktrans_ops = {
	.owner		= THIS_MODULE,
	.open		= blktrans_open,
	.release	= blktrans_release,
	.ioctl		= blktrans_ioctl,
229
	.getgeo		= blktrans_getgeo,
L
Linus Torvalds 已提交
230 231 232 233 234 235 236 237 238
};

int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
	struct mtd_blktrans_ops *tr = new->tr;
	struct list_head *this;
	int last_devnum = -1;
	struct gendisk *gd;

I
Ingo Molnar 已提交
239 240
	if (!!mutex_trylock(&mtd_table_mutex)) {
		mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
		BUG();
	}

	list_for_each(this, &tr->devs) {
		struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
		if (new->devnum == -1) {
			/* Use first free number */
			if (d->devnum != last_devnum+1) {
				/* Found a free devnum. Plug it in here */
				new->devnum = last_devnum+1;
				list_add_tail(&new->list, &d->list);
				goto added;
			}
		} else if (d->devnum == new->devnum) {
			/* Required number taken */
			return -EBUSY;
		} else if (d->devnum > new->devnum) {
			/* Required number was free */
			list_add_tail(&new->list, &d->list);
			goto added;
261
		}
L
Linus Torvalds 已提交
262 263 264 265 266 267 268 269 270
		last_devnum = d->devnum;
	}
	if (new->devnum == -1)
		new->devnum = last_devnum+1;

	if ((new->devnum << tr->part_bits) > 256) {
		return -EBUSY;
	}

I
Ingo Molnar 已提交
271
	mutex_init(&new->lock);
L
Linus Torvalds 已提交
272 273 274 275 276 277 278 279 280 281 282 283 284
	list_add_tail(&new->list, &tr->devs);
 added:
	if (!tr->writesect)
		new->readonly = 1;

	gd = alloc_disk(1 << tr->part_bits);
	if (!gd) {
		list_del(&new->list);
		return -ENOMEM;
	}
	gd->major = tr->major;
	gd->first_minor = (new->devnum) << tr->part_bits;
	gd->fops = &mtd_blktrans_ops;
285

286 287 288 289 290 291 292 293 294 295 296 297
	if (tr->part_bits)
		if (new->devnum < 26)
			snprintf(gd->disk_name, sizeof(gd->disk_name),
				 "%s%c", tr->name, 'a' + new->devnum);
		else
			snprintf(gd->disk_name, sizeof(gd->disk_name),
				 "%s%c%c", tr->name,
				 'a' - 1 + new->devnum / 26,
				 'a' + new->devnum % 26);
	else
		snprintf(gd->disk_name, sizeof(gd->disk_name),
			 "%s%d", tr->name, new->devnum);
L
Linus Torvalds 已提交
298 299 300

	/* 2.5 has capacity in units of 512 bytes while still
	   having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
301
	set_capacity(gd, (new->size * tr->blksize) >> 9);
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310

	gd->private_data = new;
	new->blkcore_priv = gd;
	gd->queue = tr->blkcore_priv->rq;

	if (new->readonly)
		set_disk_ro(gd, 1);

	add_disk(gd);
311

L
Linus Torvalds 已提交
312 313 314 315 316
	return 0;
}

int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
{
I
Ingo Molnar 已提交
317 318
	if (!!mutex_trylock(&mtd_table_mutex)) {
		mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
319 320 321 322 323 324 325
		BUG();
	}

	list_del(&old->list);

	del_gendisk(old->blkcore_priv);
	put_disk(old->blkcore_priv);
326

L
Linus Torvalds 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
	return 0;
}

static void blktrans_notify_remove(struct mtd_info *mtd)
{
	struct list_head *this, *this2, *next;

	list_for_each(this, &blktrans_majors) {
		struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);

		list_for_each_safe(this2, next, &tr->devs) {
			struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);

			if (dev->mtd == mtd)
				tr->remove_dev(dev);
		}
	}
}

static void blktrans_notify_add(struct mtd_info *mtd)
{
	struct list_head *this;

	if (mtd->type == MTD_ABSENT)
		return;

	list_for_each(this, &blktrans_majors) {
		struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);

		tr->add_mtd(tr, mtd);
	}

}

static struct mtd_notifier blktrans_notifier = {
	.add = blktrans_notify_add,
	.remove = blktrans_notify_remove,
};
365

L
Linus Torvalds 已提交
366 367 368 369
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
	int ret, i;

370
	/* Register the notifier if/when the first device type is
L
Linus Torvalds 已提交
371 372 373 374 375
	   registered, to prevent the link/init ordering from fucking
	   us over. */
	if (!blktrans_notifier.list.next)
		register_mtd_user(&blktrans_notifier);

376
	tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
L
Linus Torvalds 已提交
377 378 379
	if (!tr->blkcore_priv)
		return -ENOMEM;

I
Ingo Molnar 已提交
380
	mutex_lock(&mtd_table_mutex);
L
Linus Torvalds 已提交
381 382 383 384 385 386

	ret = register_blkdev(tr->major, tr->name);
	if (ret) {
		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
		       tr->name, tr->major, ret);
		kfree(tr->blkcore_priv);
I
Ingo Molnar 已提交
387
		mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
388 389 390 391 392 393 394 395 396 397
		return ret;
	}
	spin_lock_init(&tr->blkcore_priv->queue_lock);
	init_completion(&tr->blkcore_priv->thread_dead);
	init_waitqueue_head(&tr->blkcore_priv->thread_wq);

	tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
	if (!tr->blkcore_priv->rq) {
		unregister_blkdev(tr->major, tr->name);
		kfree(tr->blkcore_priv);
I
Ingo Molnar 已提交
398
		mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
399 400 401 402
		return -ENOMEM;
	}

	tr->blkcore_priv->rq->queuedata = tr;
403 404
	blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
	tr->blkshift = ffs(tr->blksize) - 1;
L
Linus Torvalds 已提交
405 406 407 408 409 410

	ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
	if (ret < 0) {
		blk_cleanup_queue(tr->blkcore_priv->rq);
		unregister_blkdev(tr->major, tr->name);
		kfree(tr->blkcore_priv);
I
Ingo Molnar 已提交
411
		mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
412
		return ret;
413
	}
L
Linus Torvalds 已提交
414 415 416 417 418 419 420 421 422

	INIT_LIST_HEAD(&tr->devs);
	list_add(&tr->list, &blktrans_majors);

	for (i=0; i<MAX_MTD_DEVICES; i++) {
		if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
			tr->add_mtd(tr, mtd_table[i]);
	}

I
Ingo Molnar 已提交
423
	mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
424 425 426 427 428 429 430 431

	return 0;
}

int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
	struct list_head *this, *next;

I
Ingo Molnar 已提交
432
	mutex_lock(&mtd_table_mutex);
L
Linus Torvalds 已提交
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449

	/* Clean up the kernel thread */
	tr->blkcore_priv->exiting = 1;
	wake_up(&tr->blkcore_priv->thread_wq);
	wait_for_completion(&tr->blkcore_priv->thread_dead);

	/* Remove it from the list of active majors */
	list_del(&tr->list);

	list_for_each_safe(this, next, &tr->devs) {
		struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
		tr->remove_dev(dev);
	}

	blk_cleanup_queue(tr->blkcore_priv->rq);
	unregister_blkdev(tr->major, tr->name);

I
Ingo Molnar 已提交
450
	mutex_unlock(&mtd_table_mutex);
L
Linus Torvalds 已提交
451 452 453

	kfree(tr->blkcore_priv);

454
	BUG_ON(!list_empty(&tr->devs));
L
Linus Torvalds 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
	return 0;
}

static void __exit mtd_blktrans_exit(void)
{
	/* No race here -- if someone's currently in register_mtd_blktrans
	   we're screwed anyway. */
	if (blktrans_notifier.list.next)
		unregister_mtd_user(&blktrans_notifier);
}

module_exit(mtd_blktrans_exit);

EXPORT_SYMBOL_GPL(register_mtd_blktrans);
EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);

MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");