ram_core.c 14.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/*
 * Copyright (C) 2012 Google, Inc.
 */

6
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7

8 9
#include <linux/device.h>
#include <linux/err.h>
10 11 12
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/io.h>
13
#include <linux/kernel.h>
14 15
#include <linux/list.h>
#include <linux/memblock.h>
16
#include <linux/pstore_ram.h>
17
#include <linux/rslib.h>
18
#include <linux/slab.h>
19
#include <linux/uaccess.h>
20
#include <linux/vmalloc.h>
21
#include <asm/page.h>
22

23 24 25 26 27 28 29 30 31 32
/**
 * struct persistent_ram_buffer - persistent circular RAM buffer
 *
 * @sig:
 *	signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
 * @start:
 *	offset into @data where the beginning of the stored bytes begin
 * @size:
 *	number of valid bytes stored in @data
 */
33 34
struct persistent_ram_buffer {
	uint32_t    sig;
35 36
	atomic_t    start;
	atomic_t    size;
37
	uint8_t     data[];
38 39 40 41
};

#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */

42 43 44 45 46 47 48 49 50 51
static inline size_t buffer_size(struct persistent_ram_zone *prz)
{
	return atomic_read(&prz->buffer->size);
}

static inline size_t buffer_start(struct persistent_ram_zone *prz)
{
	return atomic_read(&prz->buffer->start);
}

52
/* increase and wrap the start pointer, returning the old value */
53
static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
54 55 56
{
	int old;
	int new;
57
	unsigned long flags = 0;
58

59 60
	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
61 62 63

	old = atomic_read(&prz->buffer->start);
	new = old + a;
64
	while (unlikely(new >= prz->buffer_size))
65 66 67
		new -= prz->buffer_size;
	atomic_set(&prz->buffer->start, new);

68 69
	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
70 71 72 73 74

	return old;
}

/* increase the size counter until it hits the max size */
75
static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
76 77 78
{
	size_t old;
	size_t new;
79
	unsigned long flags = 0;
80

81 82
	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
83 84 85 86 87 88 89 90 91 92 93

	old = atomic_read(&prz->buffer->size);
	if (old == prz->buffer_size)
		goto exit;

	new = old + a;
	if (new > prz->buffer_size)
		new = prz->buffer_size;
	atomic_set(&prz->buffer->size, new);

exit:
94 95
	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
96 97
}

98
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
99 100 101
	uint8_t *data, size_t len, uint8_t *ecc)
{
	int i;
102

103
	/* Initialize the parity buffer */
104 105 106
	memset(prz->ecc_info.par, 0,
	       prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0]));
	encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0);
107
	for (i = 0; i < prz->ecc_info.ecc_size; i++)
108
		ecc[i] = prz->ecc_info.par[i];
109 110 111 112 113 114
}

static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
	void *data, size_t len, uint8_t *ecc)
{
	int i;
115

116
	for (i = 0; i < prz->ecc_info.ecc_size; i++)
117 118
		prz->ecc_info.par[i] = ecc[i];
	return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len,
119 120 121
				NULL, 0, NULL, 0, NULL);
}

122
static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
123
	unsigned int start, unsigned int count)
124 125 126 127 128
{
	struct persistent_ram_buffer *buffer = prz->buffer;
	uint8_t *buffer_end = buffer->data + prz->buffer_size;
	uint8_t *block;
	uint8_t *par;
129 130 131
	int ecc_block_size = prz->ecc_info.block_size;
	int ecc_size = prz->ecc_info.ecc_size;
	int size = ecc_block_size;
132

133
	if (!ecc_size)
134 135
		return;

136
	block = buffer->data + (start & ~(ecc_block_size - 1));
137
	par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
138

139
	do {
140
		if (block + ecc_block_size > buffer_end)
141 142
			size = buffer_end - block;
		persistent_ram_encode_rs8(prz, block, size, par);
143 144
		block += ecc_block_size;
		par += ecc_size;
145
	} while (block < buffer->data + start + count);
146 147
}

148
static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
149 150 151
{
	struct persistent_ram_buffer *buffer = prz->buffer;

152
	if (!prz->ecc_info.ecc_size)
153 154
		return;

155 156 157 158
	persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
				  prz->par_header);
}

159
static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
160 161 162 163 164
{
	struct persistent_ram_buffer *buffer = prz->buffer;
	uint8_t *block;
	uint8_t *par;

165
	if (!prz->ecc_info.ecc_size)
166 167
		return;

168 169
	block = buffer->data;
	par = prz->par_buffer;
170
	while (block < buffer->data + buffer_size(prz)) {
171
		int numerr;
172
		int size = prz->ecc_info.block_size;
173 174 175 176
		if (block + size > buffer->data + prz->buffer_size)
			size = buffer->data + prz->buffer_size - block;
		numerr = persistent_ram_decode_rs8(prz, block, size, par);
		if (numerr > 0) {
177
			pr_devel("error in block %p, %d\n", block, numerr);
178 179
			prz->corrected_bytes += numerr;
		} else if (numerr < 0) {
180
			pr_devel("uncorrectable error in block %p\n", block);
181 182
			prz->bad_blocks++;
		}
183 184
		block += prz->ecc_info.block_size;
		par += prz->ecc_info.ecc_size;
185 186 187
	}
}

188
static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
189
				   struct persistent_ram_ecc_info *ecc_info)
190 191 192 193
{
	int numerr;
	struct persistent_ram_buffer *buffer = prz->buffer;
	int ecc_blocks;
194
	size_t ecc_total;
195

196
	if (!ecc_info || !ecc_info->ecc_size)
197 198
		return 0;

199 200 201 202
	prz->ecc_info.block_size = ecc_info->block_size ?: 128;
	prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
	prz->ecc_info.symsize = ecc_info->symsize ?: 8;
	prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
203

204 205 206 207
	ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
				  prz->ecc_info.block_size +
				  prz->ecc_info.ecc_size);
	ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
208 209
	if (ecc_total >= prz->buffer_size) {
		pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
210 211
		       __func__, prz->ecc_info.ecc_size,
		       ecc_total, prz->buffer_size);
212 213 214
		return -EINVAL;
	}

215
	prz->buffer_size -= ecc_total;
216
	prz->par_buffer = buffer->data + prz->buffer_size;
217 218
	prz->par_header = prz->par_buffer +
			  ecc_blocks * prz->ecc_info.ecc_size;
219 220 221 222 223

	/*
	 * first consecutive root is 0
	 * primitive element to generate roots = 1
	 */
224 225
	prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
				  0, 1, prz->ecc_info.ecc_size);
226
	if (prz->rs_decoder == NULL) {
227
		pr_info("init_rs failed\n");
228
		return -EINVAL;
229
	}
230

231 232 233 234 235 236 237 238 239
	/* allocate workspace instead of using stack VLA */
	prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size,
					  sizeof(*prz->ecc_info.par),
					  GFP_KERNEL);
	if (!prz->ecc_info.par) {
		pr_err("cannot allocate ECC parity workspace\n");
		return -ENOMEM;
	}

240 241 242 243 244 245
	prz->corrected_bytes = 0;
	prz->bad_blocks = 0;

	numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
					   prz->par_header);
	if (numerr > 0) {
246
		pr_info("error in header, %d\n", numerr);
247 248
		prz->corrected_bytes += numerr;
	} else if (numerr < 0) {
249
		pr_info_ratelimited("uncorrectable error in header\n");
250 251 252 253 254 255 256 257 258 259 260
		prz->bad_blocks++;
	}

	return 0;
}

ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
	char *str, size_t len)
{
	ssize_t ret;

261 262 263
	if (!prz->ecc_info.ecc_size)
		return 0;

264 265 266 267 268 269 270 271 272 273
	if (prz->corrected_bytes || prz->bad_blocks)
		ret = snprintf(str, len, ""
			"\n%d Corrected bytes, %d unrecoverable blocks\n",
			prz->corrected_bytes, prz->bad_blocks);
	else
		ret = snprintf(str, len, "\nNo errors detected\n");

	return ret;
}

274
static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
275
	const void *s, unsigned int start, unsigned int count)
276 277
{
	struct persistent_ram_buffer *buffer = prz->buffer;
278
	memcpy_toio(buffer->data + start, s, count);
279
	persistent_ram_update_ecc(prz, start, count);
280 281
}

282 283 284 285
static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
	const void __user *s, unsigned int start, unsigned int count)
{
	struct persistent_ram_buffer *buffer = prz->buffer;
286
	int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ?
287 288 289 290 291
		-EFAULT : 0;
	persistent_ram_update_ecc(prz, start, count);
	return ret;
}

292
void persistent_ram_save_old(struct persistent_ram_zone *prz)
293 294
{
	struct persistent_ram_buffer *buffer = prz->buffer;
295 296
	size_t size = buffer_size(prz);
	size_t start = buffer_start(prz);
297

298 299
	if (!size)
		return;
300

301 302 303 304 305
	if (!prz->old_log) {
		persistent_ram_ecc_old(prz);
		prz->old_log = kmalloc(size, GFP_KERNEL);
	}
	if (!prz->old_log) {
306
		pr_err("failed to allocate buffer\n");
307 308 309
		return;
	}

310
	prz->old_log_size = size;
311 312
	memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
	memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
313 314
}

315
int notrace persistent_ram_write(struct persistent_ram_zone *prz,
316 317 318 319
	const void *s, unsigned int count)
{
	int rem;
	int c = count;
320
	size_t start;
321

322
	if (unlikely(c > prz->buffer_size)) {
323 324 325
		s += c - prz->buffer_size;
		c = prz->buffer_size;
	}
326

327
	buffer_size_add(prz, c);
328 329 330 331 332 333

	start = buffer_start_add(prz, c);

	rem = prz->buffer_size - start;
	if (unlikely(rem < c)) {
		persistent_ram_update(prz, s, start, rem);
334 335
		s += rem;
		c -= rem;
336
		start = 0;
337
	}
338
	persistent_ram_update(prz, s, start, c);
339

340
	persistent_ram_update_header_ecc(prz);
341 342 343 344

	return count;
}

345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
	const void __user *s, unsigned int count)
{
	int rem, ret = 0, c = count;
	size_t start;

	if (unlikely(c > prz->buffer_size)) {
		s += c - prz->buffer_size;
		c = prz->buffer_size;
	}

	buffer_size_add(prz, c);

	start = buffer_start_add(prz, c);

	rem = prz->buffer_size - start;
	if (unlikely(rem < c)) {
		ret = persistent_ram_update_user(prz, s, start, rem);
		s += rem;
		c -= rem;
		start = 0;
	}
	if (likely(!ret))
		ret = persistent_ram_update_user(prz, s, start, c);

	persistent_ram_update_header_ecc(prz);

	return unlikely(ret) ? ret : count;
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
{
	return prz->old_log_size;
}

void *persistent_ram_old(struct persistent_ram_zone *prz)
{
	return prz->old_log;
}

void persistent_ram_free_old(struct persistent_ram_zone *prz)
{
	kfree(prz->old_log);
	prz->old_log = NULL;
	prz->old_log_size = 0;
}

392 393 394 395 396 397 398
void persistent_ram_zap(struct persistent_ram_zone *prz)
{
	atomic_set(&prz->buffer->start, 0);
	atomic_set(&prz->buffer->size, 0);
	persistent_ram_update_header_ecc(prz);
}

399 400
static void *persistent_ram_vmap(phys_addr_t start, size_t size,
		unsigned int memtype)
401
{
402 403 404 405 406
	struct page **pages;
	phys_addr_t page_start;
	unsigned int page_count;
	pgprot_t prot;
	unsigned int i;
407
	void *vaddr;
408 409 410 411

	page_start = start - offset_in_page(start);
	page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);

412 413 414 415
	if (memtype)
		prot = pgprot_noncached(PAGE_KERNEL);
	else
		prot = pgprot_writecombine(PAGE_KERNEL);
416

417
	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
418
	if (!pages) {
419 420
		pr_err("%s: Failed to allocate array for %u pages\n",
		       __func__, page_count);
421
		return NULL;
422 423 424 425 426 427
	}

	for (i = 0; i < page_count; i++) {
		phys_addr_t addr = page_start + i * PAGE_SIZE;
		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
	}
428
	vaddr = vmap(pages, page_count, VM_MAP, prot);
429
	kfree(pages);
430

431 432 433 434 435 436
	/*
	 * Since vmap() uses page granularity, we must add the offset
	 * into the page here, to get the byte granularity address
	 * into the mapping to represent the actual "start" location.
	 */
	return vaddr + offset_in_page(start);
437 438
}

439
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
440
		unsigned int memtype, char *label)
441
{
442 443
	void *va;

444
	if (!request_mem_region(start, size, label ?: "ramoops")) {
445 446
		pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
			label ?: "ramoops",
447 448 449 450
			(unsigned long long)size, (unsigned long long)start);
		return NULL;
	}

451 452 453 454 455
	if (memtype)
		va = ioremap(start, size);
	else
		va = ioremap_wc(start, size);

456 457 458 459 460
	/*
	 * Since request_mem_region() and ioremap() are byte-granularity
	 * there is no need handle anything special like we do when the
	 * vmap() case in persistent_ram_vmap() above.
	 */
461
	return va;
462 463
}

464
static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
465
		struct persistent_ram_zone *prz, int memtype)
466
{
467 468 469
	prz->paddr = start;
	prz->size = size;

470
	if (pfn_valid(start >> PAGE_SHIFT))
471
		prz->vaddr = persistent_ram_vmap(start, size, memtype);
472
	else
473 474
		prz->vaddr = persistent_ram_iomap(start, size, memtype,
						  prz->label);
475

476
	if (!prz->vaddr) {
477 478
		pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
			(unsigned long long)size, (unsigned long long)start);
479 480 481
		return -ENOMEM;
	}

482
	prz->buffer = prz->vaddr;
483 484 485 486 487
	prz->buffer_size = size - sizeof(struct persistent_ram_buffer);

	return 0;
}

488
static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
489
				    struct persistent_ram_ecc_info *ecc_info)
490
{
491
	int ret;
492
	bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
493

494
	ret = persistent_ram_init_ecc(prz, ecc_info);
495 496
	if (ret) {
		pr_warn("ECC failed %s\n", prz->label);
497
		return ret;
498
	}
499

500 501 502
	sig ^= PERSISTENT_RAM_SIG;

	if (prz->buffer->sig == sig) {
503 504 505 506 507
		if (buffer_size(prz) == 0) {
			pr_debug("found existing empty buffer\n");
			return 0;
		}

508
		if (buffer_size(prz) > prz->buffer_size ||
509
		    buffer_start(prz) > buffer_size(prz)) {
510 511
			pr_info("found existing invalid buffer, size %zu, start %zu\n",
				buffer_size(prz), buffer_start(prz));
512 513
			zap = true;
		} else {
514 515
			pr_debug("found existing buffer, size %zu, start %zu\n",
				 buffer_size(prz), buffer_start(prz));
516 517 518
			persistent_ram_save_old(prz);
		}
	} else {
519 520
		pr_debug("no valid data in buffer (sig = 0x%08x)\n",
			 prz->buffer->sig);
521 522
		prz->buffer->sig = sig;
		zap = true;
523 524
	}

525 526 527
	/* Reset missing, invalid, or single-use memory area. */
	if (zap)
		persistent_ram_zap(prz);
528

529 530 531
	return 0;
}

532 533
void persistent_ram_free(struct persistent_ram_zone *prz)
{
534 535 536 537 538
	if (!prz)
		return;

	if (prz->vaddr) {
		if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
539 540
			/* We must vunmap() at page-granularity. */
			vunmap(prz->vaddr - offset_in_page(prz->paddr));
541 542 543 544 545
		} else {
			iounmap(prz->vaddr);
			release_mem_region(prz->paddr, prz->size);
		}
		prz->vaddr = NULL;
546
	}
547 548 549 550 551 552 553
	if (prz->rs_decoder) {
		free_rs(prz->rs_decoder);
		prz->rs_decoder = NULL;
	}
	kfree(prz->ecc_info.par);
	prz->ecc_info.par = NULL;

554
	persistent_ram_free_old(prz);
555
	kfree(prz->label);
556 557 558
	kfree(prz);
}

559
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
560
			u32 sig, struct persistent_ram_ecc_info *ecc_info,
561
			unsigned int memtype, u32 flags, char *label)
562 563 564 565 566 567
{
	struct persistent_ram_zone *prz;
	int ret = -ENOMEM;

	prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
	if (!prz) {
568
		pr_err("failed to allocate persistent ram zone\n");
569 570 571
		goto err;
	}

572
	/* Initialize general buffer state. */
573
	raw_spin_lock_init(&prz->buffer_lock);
574
	prz->flags = flags;
575
	prz->label = kstrdup(label, GFP_KERNEL);
576

577
	ret = persistent_ram_buffer_map(start, size, prz, memtype);
578 579 580
	if (ret)
		goto err;

581
	ret = persistent_ram_post_init(prz, sig, ecc_info);
582 583
	if (ret)
		goto err;
584

585 586 587 588 589 590
	pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
		prz->label, prz->size, (unsigned long long)prz->paddr,
		sizeof(*prz->buffer), prz->buffer_size,
		prz->size - sizeof(*prz->buffer) - prz->buffer_size,
		prz->ecc_info.ecc_size, prz->ecc_info.block_size);

591 592
	return prz;
err:
593
	persistent_ram_free(prz);
594 595
	return ERR_PTR(ret);
}
新手
引导
客服 返回
顶部