mtdoops.c 12.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 * MTD Oops/Panic logger
 *
 * Copyright (C) 2007 Nokia Corporation. All rights reserved.
 *
 * Author: Richard Purdie <rpurdie@openedhand.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
31
#include <linux/delay.h>
32
#include <linux/interrupt.h>
33
#include <linux/mtd/mtd.h>
34
#include <linux/kmsg_dump.h>
35

36 37 38
/* Maximum MTD partition size */
#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)

39
#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
40
#define MTDOOPS_HEADER_SIZE   8
41 42 43 44 45

static unsigned long record_size = 4096;
module_param(record_size, ulong, 0400);
MODULE_PARM_DESC(record_size,
		"record size for MTD OOPS pages in bytes (default 4096)");
46

47 48 49 50 51 52 53 54 55 56
static char mtddev[80];
module_param_string(mtddev, mtddev, 80, 0400);
MODULE_PARM_DESC(mtddev,
		"name or index number of the MTD device to use");

static int dump_oops = 1;
module_param(dump_oops, int, 0600);
MODULE_PARM_DESC(dump_oops,
		"set to 1 to dump oopses, 0 to only dump panics (default 1)");

57
static struct mtdoops_context {
58 59
	struct kmsg_dumper dump;

60
	int mtd_index;
61 62
	struct work_struct work_erase;
	struct work_struct work_write;
63 64 65 66
	struct mtd_info *mtd;
	int oops_pages;
	int nextpage;
	int nextcount;
67
	unsigned long *oops_page_used;
68 69 70 71

	void *oops_buf;
} oops_cxt;

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static void mark_page_used(struct mtdoops_context *cxt, int page)
{
	set_bit(page, cxt->oops_page_used);
}

static void mark_page_unused(struct mtdoops_context *cxt, int page)
{
	clear_bit(page, cxt->oops_page_used);
}

static int page_is_used(struct mtdoops_context *cxt, int page)
{
	return test_bit(page, cxt->oops_page_used);
}

87 88 89 90 91 92
static void mtdoops_erase_callback(struct erase_info *done)
{
	wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
	wake_up(wait_q);
}

93
static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
94
{
95 96
	struct mtd_info *mtd = cxt->mtd;
	u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
97 98
	u32 start_page = start_page_offset / record_size;
	u32 erase_pages = mtd->erasesize / record_size;
99 100 101 102
	struct erase_info erase;
	DECLARE_WAITQUEUE(wait, current);
	wait_queue_head_t wait_q;
	int ret;
103
	int page;
104 105 106 107 108

	init_waitqueue_head(&wait_q);
	erase.mtd = mtd;
	erase.callback = mtdoops_erase_callback;
	erase.addr = offset;
109
	erase.len = mtd->erasesize;
110 111 112 113 114 115 116 117 118
	erase.priv = (u_long)&wait_q;

	set_current_state(TASK_INTERRUPTIBLE);
	add_wait_queue(&wait_q, &wait);

	ret = mtd->erase(mtd, &erase);
	if (ret) {
		set_current_state(TASK_RUNNING);
		remove_wait_queue(&wait_q, &wait);
119 120
		printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
		       (unsigned long long)erase.addr,
121
		       (unsigned long long)erase.len, mtddev);
122 123 124 125 126 127
		return ret;
	}

	schedule();  /* Wait for erase to finish. */
	remove_wait_queue(&wait_q, &wait);

128 129 130 131
	/* Mark pages as unused */
	for (page = start_page; page < start_page + erase_pages; page++)
		mark_page_unused(cxt, page);

132 133 134
	return 0;
}

135
static void mtdoops_inc_counter(struct mtdoops_context *cxt)
136 137
{
	cxt->nextpage++;
138
	if (cxt->nextpage >= cxt->oops_pages)
139 140 141 142 143
		cxt->nextpage = 0;
	cxt->nextcount++;
	if (cxt->nextcount == 0xffffffff)
		cxt->nextcount = 0;

144
	if (page_is_used(cxt, cxt->nextpage)) {
145 146 147
		schedule_work(&cxt->work_erase);
		return;
	}
148

149 150
	printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
	       cxt->nextpage, cxt->nextcount);
151 152
}

153 154
/* Scheduled work - when we can't proceed without erasing a block */
static void mtdoops_workfunc_erase(struct work_struct *work)
155
{
156 157
	struct mtdoops_context *cxt =
			container_of(work, struct mtdoops_context, work_erase);
158 159 160 161 162 163 164
	struct mtd_info *mtd = cxt->mtd;
	int i = 0, j, ret, mod;

	/* We were unregistered */
	if (!mtd)
		return;

165
	mod = (cxt->nextpage * record_size) % mtd->erasesize;
166
	if (mod != 0) {
167
		cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
168
		if (cxt->nextpage >= cxt->oops_pages)
169 170 171
			cxt->nextpage = 0;
	}

172
	while (mtd->block_isbad) {
173
		ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
174 175 176
		if (!ret)
			break;
		if (ret < 0) {
177
			printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n");
178 179
			return;
		}
180
badblock:
181 182
		printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
		       cxt->nextpage * record_size);
183
		i++;
184
		cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
185
		if (cxt->nextpage >= cxt->oops_pages)
186
			cxt->nextpage = 0;
187
		if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
188
			printk(KERN_ERR "mtdoops: all blocks bad!\n");
189 190 191 192 193
			return;
		}
	}

	for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
194
		ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
195

196
	if (ret >= 0) {
197 198
		printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
		       cxt->nextpage, cxt->nextcount);
199
		return;
200 201
	}

202
	if (mtd->block_markbad && ret == -EIO) {
203
		ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
204
		if (ret < 0) {
205
			printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
206 207 208 209
			return;
		}
	}
	goto badblock;
210 211
}

212
static void mtdoops_write(struct mtdoops_context *cxt, int panic)
213
{
214 215
	struct mtd_info *mtd = cxt->mtd;
	size_t retlen;
216
	u32 *hdr;
217
	int ret;
218

219 220 221 222
	/* Add mtdoops header to the buffer */
	hdr = cxt->oops_buf;
	hdr[0] = cxt->nextcount;
	hdr[1] = MTDOOPS_KERNMSG_MAGIC;
223

224
	if (panic)
225 226
		ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
					record_size, &retlen, cxt->oops_buf);
227
	else
228 229
		ret = mtd->write(mtd, cxt->nextpage * record_size,
					record_size, &retlen, cxt->oops_buf);
230

231 232 233
	if (retlen != record_size || ret < 0)
		printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
		       cxt->nextpage * record_size, retlen, record_size, ret);
234
	mark_page_used(cxt, cxt->nextpage);
235
	memset(cxt->oops_buf, 0xff, record_size);
236 237

	mtdoops_inc_counter(cxt);
238 239 240 241 242 243 244 245
}

static void mtdoops_workfunc_write(struct work_struct *work)
{
	struct mtdoops_context *cxt =
			container_of(work, struct mtdoops_context, work_write);

	mtdoops_write(cxt, 0);
246
}
247

248
static void find_next_position(struct mtdoops_context *cxt)
249 250
{
	struct mtd_info *mtd = cxt->mtd;
251
	int ret, page, maxpos = 0;
252
	u32 count[2], maxcount = 0xffffffff;
253 254 255
	size_t retlen;

	for (page = 0; page < cxt->oops_pages; page++) {
256 257
		/* Assume the page is used */
		mark_page_used(cxt, page);
258 259 260 261 262 263 264
		ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
				&retlen, (u_char *) &count[0]);
		if (retlen != MTDOOPS_HEADER_SIZE ||
				(ret < 0 && ret != -EUCLEAN)) {
			printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
			       page * record_size, retlen,
			       MTDOOPS_HEADER_SIZE, ret);
265 266 267
			continue;
		}

268 269
		if (count[0] == 0xffffffff && count[1] == 0xffffffff)
			mark_page_unused(cxt, page);
270
		if (count[0] == 0xffffffff)
271 272
			continue;
		if (maxcount == 0xffffffff) {
273
			maxcount = count[0];
274
			maxpos = page;
275
		} else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
276
			maxcount = count[0];
277
			maxpos = page;
278
		} else if (count[0] > maxcount && count[0] < 0xc0000000) {
279
			maxcount = count[0];
280
			maxpos = page;
281 282
		} else if (count[0] > maxcount && count[0] > 0xc0000000
					&& maxcount > 0x80000000) {
283
			maxcount = count[0];
284 285 286 287 288 289
			maxpos = page;
		}
	}
	if (maxcount == 0xffffffff) {
		cxt->nextpage = 0;
		cxt->nextcount = 1;
290
		schedule_work(&cxt->work_erase);
291
		return;
292 293 294 295 296
	}

	cxt->nextpage = maxpos;
	cxt->nextcount = maxcount;

297
	mtdoops_inc_counter(cxt);
298 299
}

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
		enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
		const char *s2, unsigned long l2)
{
	struct mtdoops_context *cxt = container_of(dumper,
			struct mtdoops_context, dump);
	unsigned long s1_start, s2_start;
	unsigned long l1_cpy, l2_cpy;
	char *dst;

	/* Only dump oopses if dump_oops is set */
	if (reason == KMSG_DUMP_OOPS && !dump_oops)
		return;

	dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
	l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
	l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);

	s2_start = l2 - l2_cpy;
	s1_start = l1 - l1_cpy;

	memcpy(dst, s1 + s1_start, l1_cpy);
	memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);

	/* Panics must be written immediately */
	if (reason == KMSG_DUMP_PANIC) {
		if (!cxt->mtd->panic_write)
			printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
		else
			mtdoops_write(cxt, 1);
		return;
	}

	/* For other cases, schedule work to write it "nicely" */
	schedule_work(&cxt->work_write);
}
336 337 338 339

static void mtdoops_notify_add(struct mtd_info *mtd)
{
	struct mtdoops_context *cxt = &oops_cxt;
340 341
	u64 mtdoops_pages = div_u64(mtd->size, record_size);
	int err;
342

343
	if (!strcmp(mtd->name, mtddev))
344 345
		cxt->mtd_index = mtd->index;

346
	if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
347 348
		return;

349 350 351
	if (mtd->size < mtd->erasesize * 2) {
		printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
		       mtd->index);
352 353
		return;
	}
354
	if (mtd->erasesize < record_size) {
355 356
		printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
		       mtd->index);
357 358
		return;
	}
359 360 361 362 363 364
	if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
		printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
		       mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
		return;
	}

365 366 367 368
	/* oops_page_used is a bit field */
	cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
			BITS_PER_LONG));
	if (!cxt->oops_page_used) {
369 370 371 372 373 374 375 376 377 378
		printk(KERN_ERR "mtdoops: could not allocate page array\n");
		return;
	}

	cxt->dump.dump = mtdoops_do_dump;
	err = kmsg_dump_register(&cxt->dump);
	if (err) {
		printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
		vfree(cxt->oops_page_used);
		cxt->oops_page_used = NULL;
379 380
		return;
	}
381

382
	cxt->mtd = mtd;
383
	cxt->oops_pages = (int)mtd->size / record_size;
384
	find_next_position(cxt);
385
	printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
386 387 388 389 390 391
}

static void mtdoops_notify_remove(struct mtd_info *mtd)
{
	struct mtdoops_context *cxt = &oops_cxt;

392
	if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
393 394
		return;

395 396 397
	if (kmsg_dump_unregister(&cxt->dump) < 0)
		printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");

398 399 400 401 402 403 404 405 406 407
	cxt->mtd = NULL;
	flush_scheduled_work();
}


static struct mtd_notifier mtdoops_notifier = {
	.add	= mtdoops_notify_add,
	.remove	= mtdoops_notify_remove,
};

408
static int __init mtdoops_init(void)
409 410
{
	struct mtdoops_context *cxt = &oops_cxt;
411 412
	int mtd_index;
	char *endp;
413

414 415 416 417
	if (strlen(mtddev) == 0) {
		printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
		return -EINVAL;
	}
418 419 420 421 422 423 424 425
	if ((record_size & 4095) != 0) {
		printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
		return -EINVAL;
	}
	if (record_size < 4096) {
		printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
		return -EINVAL;
	}
426 427

	/* Setup the MTD device to use */
428
	cxt->mtd_index = -1;
429 430 431 432 433 434 435 436 437
	mtd_index = simple_strtoul(mtddev, &endp, 0);
	if (*endp == '\0')
		cxt->mtd_index = mtd_index;
	if (cxt->mtd_index > MAX_MTD_DEVICES) {
		printk(KERN_ERR "mtdoops: invalid mtd device number (%u) given\n",
				mtd_index);
		return -EINVAL;
	}

438
	cxt->oops_buf = vmalloc(record_size);
439
	if (!cxt->oops_buf) {
440
		printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
441 442
		return -ENOMEM;
	}
443
	memset(cxt->oops_buf, 0xff, record_size);
444

445 446
	INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
	INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
447 448 449 450 451

	register_mtd_user(&mtdoops_notifier);
	return 0;
}

452
static void __exit mtdoops_exit(void)
453 454 455 456 457
{
	struct mtdoops_context *cxt = &oops_cxt;

	unregister_mtd_user(&mtdoops_notifier);
	vfree(cxt->oops_buf);
458
	vfree(cxt->oops_page_used);
459 460 461
}


462 463
module_init(mtdoops_init);
module_exit(mtdoops_exit);
464 465 466 467

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");