kcore.c 15.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12
/*
 *	fs/proc/kcore.c kernel ELF core dumper
 *
 *	Modelled on fs/exec.c:aout_core_dump()
 *	Jeremy Fitzhardinge <jeremy@sw.oz.au>
 *	ELF version written by David Howells <David.Howells@nexor.co.uk>
 *	Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
 *	Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
 *	Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
 */

13
#include <linux/crash_core.h>
L
Linus Torvalds 已提交
14 15
#include <linux/mm.h>
#include <linux/proc_fs.h>
16
#include <linux/kcore.h>
L
Linus Torvalds 已提交
17
#include <linux/user.h>
18
#include <linux/capability.h>
L
Linus Torvalds 已提交
19 20
#include <linux/elf.h>
#include <linux/elfcore.h>
21
#include <linux/notifier.h>
L
Linus Torvalds 已提交
22 23
#include <linux/vmalloc.h>
#include <linux/highmem.h>
A
Andrew Morton 已提交
24
#include <linux/printk.h>
M
Mike Rapoport 已提交
25
#include <linux/memblock.h>
L
Linus Torvalds 已提交
26
#include <linux/init.h>
27
#include <linux/slab.h>
28
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
29
#include <asm/io.h>
30
#include <linux/list.h>
31 32
#include <linux/ioport.h>
#include <linux/memory.h>
33
#include <linux/sched/task.h>
D
David Howells 已提交
34
#include <linux/security.h>
35
#include <asm/sections.h>
36
#include "internal.h"
L
Linus Torvalds 已提交
37

38
#define CORE_STR "CORE"
L
Linus Torvalds 已提交
39

40 41 42 43
#ifndef ELF_CORE_EFLAGS
#define ELF_CORE_EFLAGS	0
#endif

44 45
static struct proc_dir_entry *proc_root_kcore;

L
Linus Torvalds 已提交
46 47 48 49 50 51 52 53

#ifndef kc_vaddr_to_offset
#define	kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
#endif
#ifndef	kc_offset_to_vaddr
#define	kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
#endif

54
static LIST_HEAD(kclist_head);
55
static DECLARE_RWSEM(kclist_lock);
56
static int kcore_need_update = 1;
L
Linus Torvalds 已提交
57

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
 * Same as oldmem_pfn_is_ram in vmcore
 */
static int (*mem_pfn_is_ram)(unsigned long pfn);

int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
{
	if (mem_pfn_is_ram)
		return -EBUSY;
	mem_pfn_is_ram = fn;
	return 0;
}

static int pfn_is_ram(unsigned long pfn)
{
	if (mem_pfn_is_ram)
		return mem_pfn_is_ram(pfn);
	else
		return 1;
}

80 81 82
/* This doesn't grab kclist_lock, so it should only be used at init time. */
void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
		       int type)
L
Linus Torvalds 已提交
83 84 85
{
	new->addr = (unsigned long)addr;
	new->size = size;
K
KAMEZAWA Hiroyuki 已提交
86
	new->type = type;
L
Linus Torvalds 已提交
87

88
	list_add_tail(&new->list, &kclist_head);
L
Linus Torvalds 已提交
89 90
}

91 92
static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
			     size_t *data_offset)
L
Linus Torvalds 已提交
93 94 95 96 97 98 99
{
	size_t try, size;
	struct kcore_list *m;

	*nphdr = 1; /* PT_NOTE */
	size = 0;

100
	list_for_each_entry(m, &kclist_head, list) {
L
Linus Torvalds 已提交
101 102 103 104 105
		try = kc_vaddr_to_offset((size_t)m->addr + m->size);
		if (try > size)
			size = try;
		*nphdr = *nphdr + 1;
	}
106 107

	*phdrs_len = *nphdr * sizeof(struct elf_phdr);
108 109 110
	*notes_len = (4 * sizeof(struct elf_note) +
		      3 * ALIGN(sizeof(CORE_STR), 4) +
		      VMCOREINFO_NOTE_NAME_BYTES +
111 112
		      ALIGN(sizeof(struct elf_prstatus), 4) +
		      ALIGN(sizeof(struct elf_prpsinfo), 4) +
113 114
		      ALIGN(arch_task_struct_size, 4) +
		      ALIGN(vmcoreinfo_size, 4));
115 116 117
	*data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
				  *notes_len);
	return *data_offset + size;
L
Linus Torvalds 已提交
118 119
}

120 121 122 123 124 125
#ifdef CONFIG_HIGHMEM
/*
 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
 * because memory hole is not as big as !HIGHMEM case.
 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
 */
126
static int kcore_ram_list(struct list_head *head)
127 128 129 130 131 132 133 134 135
{
	struct kcore_list *ent;

	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
	if (!ent)
		return -ENOMEM;
	ent->addr = (unsigned long)__va(0);
	ent->size = max_low_pfn << PAGE_SHIFT;
	ent->type = KCORE_RAM;
136 137
	list_add(&ent->list, head);
	return 0;
138 139 140 141
}

#else /* !CONFIG_HIGHMEM */

142 143
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* calculate vmemmap's address from given system ram pfn and register it */
144 145
static int
get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
146 147 148 149 150 151 152 153 154
{
	unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
	unsigned long nr_pages = ent->size >> PAGE_SHIFT;
	unsigned long start, end;
	struct kcore_list *vmm, *tmp;


	start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
	end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
155
	end = PAGE_ALIGN(end);
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	/* overlap check (because we have to align page */
	list_for_each_entry(tmp, head, list) {
		if (tmp->type != KCORE_VMEMMAP)
			continue;
		if (start < tmp->addr + tmp->size)
			if (end > tmp->addr)
				end = tmp->addr;
	}
	if (start < end) {
		vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
		if (!vmm)
			return 0;
		vmm->addr = start;
		vmm->size = end - start;
		vmm->type = KCORE_VMEMMAP;
		list_add_tail(&vmm->list, head);
	}
	return 1;

}
#else
177 178
static int
get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
179 180 181 182 183 184
{
	return 1;
}

#endif

185 186 187 188 189
static int
kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
{
	struct list_head *head = (struct list_head *)arg;
	struct kcore_list *ent;
190 191 192 193 194 195
	struct page *p;

	if (!pfn_valid(pfn))
		return 1;

	p = pfn_to_page(pfn);
196 197 198 199

	ent = kmalloc(sizeof(*ent), GFP_KERNEL);
	if (!ent)
		return -ENOMEM;
200
	ent->addr = (unsigned long)page_to_virt(p);
201 202
	ent->size = nr_pages << PAGE_SHIFT;

203
	if (!virt_addr_valid(ent->addr))
204 205 206 207 208 209
		goto free_out;

	/* cut not-mapped area. ....from ppc-32 code. */
	if (ULONG_MAX - ent->addr < ent->size)
		ent->size = ULONG_MAX - ent->addr;

210 211 212 213 214 215
	/*
	 * We've already checked virt_addr_valid so we know this address
	 * is a valid pointer, therefore we can check against it to determine
	 * if we need to trim
	 */
	if (VMALLOC_START > ent->addr) {
216 217 218 219 220 221
		if (VMALLOC_START - ent->addr < ent->size)
			ent->size = VMALLOC_START - ent->addr;
	}

	ent->type = KCORE_RAM;
	list_add_tail(&ent->list, head);
222 223 224 225 226 227

	if (!get_sparsemem_vmemmap_info(ent, head)) {
		list_del(&ent->list);
		goto free_out;
	}

228 229 230 231 232 233
	return 0;
free_out:
	kfree(ent);
	return 1;
}

234
static int kcore_ram_list(struct list_head *list)
235 236 237 238 239 240 241
{
	int nid, ret;
	unsigned long end_pfn;

	/* Not inialized....update now */
	/* find out "max pfn" */
	end_pfn = 0;
242
	for_each_node_state(nid, N_MEMORY) {
243
		unsigned long node_end;
244
		node_end = node_end_pfn(nid);
245 246 247 248
		if (end_pfn < node_end)
			end_pfn = node_end;
	}
	/* scan 0 to max_pfn */
249 250
	ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
	if (ret)
251
		return -ENOMEM;
252 253 254 255 256 257 258 259 260
	return 0;
}
#endif /* CONFIG_HIGHMEM */

static int kcore_update_ram(void)
{
	LIST_HEAD(list);
	LIST_HEAD(garbage);
	int nphdr;
261
	size_t phdrs_len, notes_len, data_offset;
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	struct kcore_list *tmp, *pos;
	int ret = 0;

	down_write(&kclist_lock);
	if (!xchg(&kcore_need_update, 0))
		goto out;

	ret = kcore_ram_list(&list);
	if (ret) {
		/* Couldn't get the RAM list, try again next time. */
		WRITE_ONCE(kcore_need_update, 1);
		list_splice_tail(&list, &garbage);
		goto out;
	}

	list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
		if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
			list_move(&pos->list, &garbage);
	}
	list_splice_tail(&list, &kclist_head);

283 284
	proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, &notes_len,
					       &data_offset);
285 286 287 288 289 290

out:
	up_write(&kclist_lock);
	list_for_each_entry_safe(pos, tmp, &garbage, list) {
		list_del(&pos->list);
		kfree(pos);
291 292 293
	}
	return ret;
}
L
Linus Torvalds 已提交
294

295 296 297
static void append_kcore_note(char *notes, size_t *i, const char *name,
			      unsigned int type, const void *desc,
			      size_t descsz)
L
Linus Torvalds 已提交
298
{
299 300 301 302 303 304 305 306 307 308 309
	struct elf_note *note = (struct elf_note *)&notes[*i];

	note->n_namesz = strlen(name) + 1;
	note->n_descsz = descsz;
	note->n_type = type;
	*i += sizeof(*note);
	memcpy(&notes[*i], name, note->n_namesz);
	*i = ALIGN(*i + note->n_namesz, 4);
	memcpy(&notes[*i], desc, descsz);
	*i = ALIGN(*i + descsz, 4);
}
L
Linus Torvalds 已提交
310 311 312 313

static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
314
	char *buf = file->private_data;
315 316 317 318
	size_t phdrs_offset, notes_offset, data_offset;
	size_t phdrs_len, notes_len;
	struct kcore_list *m;
	size_t tsz;
L
Linus Torvalds 已提交
319 320
	int nphdr;
	unsigned long start;
321 322
	size_t orig_buflen = buflen;
	int ret = 0;
L
Linus Torvalds 已提交
323

324
	down_read(&kclist_lock);
325

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
	get_kcore_size(&nphdr, &phdrs_len, &notes_len, &data_offset);
	phdrs_offset = sizeof(struct elfhdr);
	notes_offset = phdrs_offset + phdrs_len;

	/* ELF file header. */
	if (buflen && *fpos < sizeof(struct elfhdr)) {
		struct elfhdr ehdr = {
			.e_ident = {
				[EI_MAG0] = ELFMAG0,
				[EI_MAG1] = ELFMAG1,
				[EI_MAG2] = ELFMAG2,
				[EI_MAG3] = ELFMAG3,
				[EI_CLASS] = ELF_CLASS,
				[EI_DATA] = ELF_DATA,
				[EI_VERSION] = EV_CURRENT,
				[EI_OSABI] = ELF_OSABI,
			},
			.e_type = ET_CORE,
			.e_machine = ELF_ARCH,
			.e_version = EV_CURRENT,
			.e_phoff = sizeof(struct elfhdr),
			.e_flags = ELF_CORE_EFLAGS,
			.e_ehsize = sizeof(struct elfhdr),
			.e_phentsize = sizeof(struct elf_phdr),
			.e_phnum = nphdr,
		};

		tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
		if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
			ret = -EFAULT;
			goto out;
		}
L
Linus Torvalds 已提交
358

359 360 361 362
		buffer += tsz;
		buflen -= tsz;
		*fpos += tsz;
	}
L
Linus Torvalds 已提交
363

364 365 366
	/* ELF program headers. */
	if (buflen && *fpos < phdrs_offset + phdrs_len) {
		struct elf_phdr *phdrs, *phdr;
L
Linus Torvalds 已提交
367

368 369
		phdrs = kzalloc(phdrs_len, GFP_KERNEL);
		if (!phdrs) {
370 371
			ret = -ENOMEM;
			goto out;
L
Linus Torvalds 已提交
372
		}
373 374 375 376 377 378 379 380 381 382

		phdrs[0].p_type = PT_NOTE;
		phdrs[0].p_offset = notes_offset;
		phdrs[0].p_filesz = notes_len;

		phdr = &phdrs[1];
		list_for_each_entry(m, &kclist_head, list) {
			phdr->p_type = PT_LOAD;
			phdr->p_flags = PF_R | PF_W | PF_X;
			phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
383 384
			phdr->p_vaddr = (size_t)m->addr;
			if (m->type == KCORE_RAM)
385 386 387 388 389 390 391 392 393 394 395 396 397 398
				phdr->p_paddr = __pa(m->addr);
			else if (m->type == KCORE_TEXT)
				phdr->p_paddr = __pa_symbol(m->addr);
			else
				phdr->p_paddr = (elf_addr_t)-1;
			phdr->p_filesz = phdr->p_memsz = m->size;
			phdr->p_align = PAGE_SIZE;
			phdr++;
		}

		tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
		if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
				 tsz)) {
			kfree(phdrs);
399 400
			ret = -EFAULT;
			goto out;
L
Linus Torvalds 已提交
401
		}
402 403 404
		kfree(phdrs);

		buffer += tsz;
L
Linus Torvalds 已提交
405 406
		buflen -= tsz;
		*fpos += tsz;
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
	}

	/* ELF note segment. */
	if (buflen && *fpos < notes_offset + notes_len) {
		struct elf_prstatus prstatus = {};
		struct elf_prpsinfo prpsinfo = {
			.pr_sname = 'R',
			.pr_fname = "vmlinux",
		};
		char *notes;
		size_t i = 0;

		strlcpy(prpsinfo.pr_psargs, saved_command_line,
			sizeof(prpsinfo.pr_psargs));

		notes = kzalloc(notes_len, GFP_KERNEL);
		if (!notes) {
			ret = -ENOMEM;
			goto out;
		}

		append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
				  sizeof(prstatus));
		append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
				  sizeof(prpsinfo));
		append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
				  arch_task_struct_size);
434 435 436 437 438 439 440 441 442 443
		/*
		 * vmcoreinfo_size is mostly constant after init time, but it
		 * can be changed by crash_save_vmcoreinfo(). Racing here with a
		 * panic on another CPU before the machine goes down is insanely
		 * unlikely, but it's better to not leave potential buffer
		 * overflows lying around, regardless.
		 */
		append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
				  vmcoreinfo_data,
				  min(vmcoreinfo_size, notes_len - i));
L
Linus Torvalds 已提交
444

445 446 447 448
		tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
		if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
			kfree(notes);
			ret = -EFAULT;
449
			goto out;
450 451 452 453 454 455
		}
		kfree(notes);

		buffer += tsz;
		buflen -= tsz;
		*fpos += tsz;
456
	}
L
Linus Torvalds 已提交
457 458 459 460 461

	/*
	 * Check to see if our file offset matches with any of
	 * the addresses in the elf_phdr on our list.
	 */
462
	start = kc_offset_to_vaddr(*fpos - data_offset);
L
Linus Torvalds 已提交
463 464 465
	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
		tsz = buflen;

466
	m = NULL;
467
	while (buflen) {
468 469 470 471 472 473 474 475 476 477
		/*
		 * If this is the first iteration or the address is not within
		 * the previous entry, search for a matching entry.
		 */
		if (!m || start < m->addr || start >= m->addr + m->size) {
			list_for_each_entry(m, &kclist_head, list) {
				if (start >= m->addr &&
				    start < m->addr + m->size)
					break;
			}
L
Linus Torvalds 已提交
478 479
		}

D
Dan Carpenter 已提交
480
		if (&m->list == &kclist_head) {
481 482 483 484
			if (clear_user(buffer, tsz)) {
				ret = -EFAULT;
				goto out;
			}
485
			m = NULL;	/* skip the list anchor */
486 487 488 489 490
		} else if (!pfn_is_ram(__pa(start) >> PAGE_SHIFT)) {
			if (clear_user(buffer, tsz)) {
				ret = -EFAULT;
				goto out;
			}
491
		} else if (m->type == KCORE_VMALLOC) {
492
			vread(buf, (char *)start, tsz);
493
			/* we have to zero-fill user buffer even if no read */
494 495 496 497
			if (copy_to_user(buffer, buf, tsz)) {
				ret = -EFAULT;
				goto out;
			}
498 499
		} else if (m->type == KCORE_USER) {
			/* User page is handled prior to normal kernel page: */
500 501 502 503
			if (copy_to_user(buffer, (char *)start, tsz)) {
				ret = -EFAULT;
				goto out;
			}
L
Linus Torvalds 已提交
504 505
		} else {
			if (kern_addr_valid(start)) {
506 507 508 509
				/*
				 * Using bounce buffer to bypass the
				 * hardened user copy kernel text checks.
				 */
510 511
				if (copy_from_kernel_nofault(buf, (void *)start,
						tsz)) {
512 513 514 515
					if (clear_user(buffer, tsz)) {
						ret = -EFAULT;
						goto out;
					}
516
				} else {
517 518 519 520
					if (copy_to_user(buffer, buf, tsz)) {
						ret = -EFAULT;
						goto out;
					}
L
Linus Torvalds 已提交
521 522
				}
			} else {
523 524 525 526
				if (clear_user(buffer, tsz)) {
					ret = -EFAULT;
					goto out;
				}
L
Linus Torvalds 已提交
527 528 529 530 531 532 533 534 535
			}
		}
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		start += tsz;
		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
	}

536 537 538 539 540
out:
	up_read(&kclist_lock);
	if (ret)
		return ret;
	return orig_buflen - buflen;
L
Linus Torvalds 已提交
541
}
542

543 544
static int open_kcore(struct inode *inode, struct file *filp)
{
D
David Howells 已提交
545 546
	int ret = security_locked_down(LOCKDOWN_KCORE);

547 548
	if (!capable(CAP_SYS_RAWIO))
		return -EPERM;
549

550 551 552
	if (ret)
		return ret;

553 554 555 556
	filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!filp->private_data)
		return -ENOMEM;

557 558
	if (kcore_need_update)
		kcore_update_ram();
559
	if (i_size_read(inode) != proc_root_kcore->size) {
A
Al Viro 已提交
560
		inode_lock(inode);
561
		i_size_write(inode, proc_root_kcore->size);
A
Al Viro 已提交
562
		inode_unlock(inode);
563
	}
564 565 566
	return 0;
}

567 568 569 570 571
static int release_kcore(struct inode *inode, struct file *file)
{
	kfree(file->private_data);
	return 0;
}
572

573 574 575 576 577
static const struct proc_ops kcore_proc_ops = {
	.proc_read	= read_kcore,
	.proc_open	= open_kcore,
	.proc_release	= release_kcore,
	.proc_lseek	= default_llseek,
578 579 580 581 582 583 584 585 586 587
};

/* just remember that we have to update kcore */
static int __meminit kcore_callback(struct notifier_block *self,
				    unsigned long action, void *arg)
{
	switch (action) {
	case MEM_ONLINE:
	case MEM_OFFLINE:
		kcore_need_update = 1;
588
		break;
589 590 591 592
	}
	return NOTIFY_OK;
}

593 594 595 596
static struct notifier_block kcore_callback_nb __meminitdata = {
	.notifier_call = kcore_callback,
	.priority = 0,
};
597

598 599
static struct kcore_list kcore_vmalloc;

600 601 602 603 604 605 606 607
#ifdef CONFIG_ARCH_PROC_KCORE_TEXT
static struct kcore_list kcore_text;
/*
 * If defined, special segment is used for mapping kernel text instead of
 * direct-map area. We need to create special TEXT section.
 */
static void __init proc_kcore_text_init(void)
{
W
Wu Fengguang 已提交
608
	kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
609 610 611 612 613 614 615
}
#else
static void __init proc_kcore_text_init(void)
{
}
#endif

616 617 618 619
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
/*
 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
 */
620
static struct kcore_list kcore_modules;
621 622
static void __init add_modules_range(void)
{
623 624
	if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
		kclist_add(&kcore_modules, (void *)MODULES_VADDR,
625
			MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
626
	}
627 628 629 630 631 632 633
}
#else
static void __init add_modules_range(void)
{
}
#endif

634 635
static int __init proc_kcore_init(void)
{
636
	proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
K
KAMEZAWA Hiroyuki 已提交
637
	if (!proc_root_kcore) {
A
Andrew Morton 已提交
638
		pr_err("couldn't create /proc/kcore\n");
K
KAMEZAWA Hiroyuki 已提交
639 640
		return 0; /* Always returns 0. */
	}
641
	/* Store text area if it's special */
642
	proc_kcore_text_init();
643
	/* Store vmalloc area */
644 645
	kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
		VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
646
	add_modules_range();
647 648
	/* Store direct-map area from physical memory map */
	kcore_update_ram();
649
	register_hotmemory_notifier(&kcore_callback_nb);
650

651 652
	return 0;
}
653
fs_initcall(proc_kcore_init);