dump_pagetables.c 8.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * Debug helper to dump the current kernel pagetables of the system
 * so that we can see what the various memory ranges are set to.
 *
 * (C) Copyright 2008 Intel Corporation
 *
 * Author: Arjan van de Ven <arjan@linux.intel.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 */

15 16
#include <linux/debugfs.h>
#include <linux/mm.h>
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
#include <linux/module.h>
#include <linux/seq_file.h>

#include <asm/pgtable.h>

/*
 * The dumper groups pagetable entries of the same type into one, and for
 * that it needs to keep some state when walking, and flush this state
 * when a "break" in the continuity is found.
 */
struct pg_state {
	int level;
	pgprot_t current_prot;
	unsigned long start_address;
	unsigned long current_address;
32
	const struct addr_marker *marker;
33 34
};

35 36 37 38 39 40 41 42 43 44
struct addr_marker {
	unsigned long start_address;
	const char *name;
};

/* Address space markers hints */
static struct addr_marker address_markers[] = {
	{ 0, "User Space" },
#ifdef CONFIG_X86_64
	{ 0x8000000000000000UL, "Kernel Space" },
45
	{ PAGE_OFFSET,		"Low Kernel Mapping" },
46 47 48
	{ VMALLOC_START,        "vmalloc() Area" },
	{ VMEMMAP_START,        "Vmemmap" },
	{ __START_KERNEL_map,   "High Kernel Mapping" },
49 50
	{ MODULES_VADDR,        "Modules" },
	{ MODULES_END,          "End Modules" },
51 52 53 54 55 56 57 58 59 60 61
#else
	{ PAGE_OFFSET,          "Kernel Mapping" },
	{ 0/* VMALLOC_START */, "vmalloc() Area" },
	{ 0/*VMALLOC_END*/,     "vmalloc() End" },
# ifdef CONFIG_HIGHMEM
	{ 0/*PKMAP_BASE*/,      "Persisent kmap() Area" },
# endif
	{ 0/*FIXADDR_START*/,   "Fixmap Area" },
#endif
	{ -1, NULL }		/* End of list */
};
62

63 64 65 66 67
/* Multipliers for offsets within the PTEs */
#define PTE_LEVEL_MULT (PAGE_SIZE)
#define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
#define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
#define PGD_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
68 69 70 71 72 73

/*
 * Print a readable form of a pgprot_t to the seq_file
 */
static void printk_prot(struct seq_file *m, pgprot_t prot, int level)
{
74 75 76 77 78 79 80 81 82 83
	pgprotval_t pr = pgprot_val(prot);
	static const char * const level_name[] =
		{ "cr3", "pgd", "pud", "pmd", "pte" };

	if (!pgprot_val(prot)) {
		/* Not present */
		seq_printf(m, "                          ");
	} else {
		if (pr & _PAGE_USER)
			seq_printf(m, "USR ");
84 85
		else
			seq_printf(m, "    ");
86 87 88 89 90 91 92 93 94 95
		if (pr & _PAGE_RW)
			seq_printf(m, "RW ");
		else
			seq_printf(m, "ro ");
		if (pr & _PAGE_PWT)
			seq_printf(m, "PWT ");
		else
			seq_printf(m, "    ");
		if (pr & _PAGE_PCD)
			seq_printf(m, "PCD ");
96 97
		else
			seq_printf(m, "    ");
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

		/* Bit 9 has a different meaning on level 3 vs 4 */
		if (level <= 3) {
			if (pr & _PAGE_PSE)
				seq_printf(m, "PSE ");
			else
				seq_printf(m, "    ");
		} else {
			if (pr & _PAGE_PAT)
				seq_printf(m, "pat ");
			else
				seq_printf(m, "    ");
		}
		if (pr & _PAGE_GLOBAL)
			seq_printf(m, "GLB ");
		else
			seq_printf(m, "    ");
		if (pr & _PAGE_NX)
			seq_printf(m, "NX ");
		else
			seq_printf(m, "x  ");
119
	}
120
	seq_printf(m, "%s\n", level_name[level]);
121 122 123
}

/*
124
 * On 64 bits, sign-extend the 48 bit address to 64 bit
125
 */
126
static unsigned long normalize_addr(unsigned long u)
127
{
128 129 130
#ifdef CONFIG_X86_64
	return (signed long)(u << 16) >> 16;
#else
131
	return u;
132
#endif
133 134 135 136 137 138 139 140
}

/*
 * This function gets called on a break in a continuous series
 * of PTE entries; the next one is different so we need to
 * print what we collected so far.
 */
static void note_page(struct seq_file *m, struct pg_state *st,
141
		      pgprot_t new_prot, int level)
142
{
143 144
	pgprotval_t prot, cur;
	static const char units[] = "KMGTPE";
145 146 147

	/*
	 * If we have a "break" in the series, we need to flush the state that
148 149
	 * we have now. "break" is either changing perms, levels or
	 * address space marker.
150 151 152 153
	 */
	prot = pgprot_val(new_prot) & ~(PTE_MASK);
	cur = pgprot_val(st->current_prot) & ~(PTE_MASK);

154 155 156 157 158 159 160 161 162
	if (!st->level) {
		/* First entry */
		st->current_prot = new_prot;
		st->level = level;
		st->marker = address_markers;
		seq_printf(m, "---[ %s ]---\n", st->marker->name);
	} else if (prot != cur || level != st->level ||
		   st->current_address >= st->marker[1].start_address) {
		const char *unit = units;
163 164 165 166 167
		unsigned long delta;

		/*
		 * Now print the actual finished series
		 */
168 169 170
		seq_printf(m, "0x%p-0x%p   ",
			   (void *)st->start_address,
			   (void *)st->current_address);
171 172

		delta = (st->current_address - st->start_address) >> 10;
173 174 175
		while (!(delta & 1023) && unit[1]) {
			delta >>= 10;
			unit++;
176
		}
177 178 179 180 181 182 183 184 185 186 187
		seq_printf(m, "%9lu%c ", delta, *unit);
		printk_prot(m, st->current_prot, st->level);

		/*
		 * We print markers for special areas of address space,
		 * such as the start of vmalloc space etc.
		 * This helps in the interpretation.
		 */
		if (st->current_address >= st->marker[1].start_address) {
			st->marker++;
			seq_printf(m, "---[ %s ]---\n", st->marker->name);
188
		}
189

190 191 192
		st->start_address = st->current_address;
		st->current_prot = new_prot;
		st->level = level;
193
	}
194 195
}

196
static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
197 198 199 200 201 202 203 204 205
							unsigned long P)
{
	int i;
	pte_t *start;

	start = (pte_t *) pmd_page_vaddr(addr);
	for (i = 0; i < PTRS_PER_PTE; i++) {
		pgprot_t prot = pte_pgprot(*start);

206
		st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
207 208 209 210 211
		note_page(m, st, prot, 4);
		start++;
	}
}

212
#if PTRS_PER_PMD > 1
213

214
static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
215 216 217 218 219 220 221
							unsigned long P)
{
	int i;
	pmd_t *start;

	start = (pmd_t *) pud_page_vaddr(addr);
	for (i = 0; i < PTRS_PER_PMD; i++) {
222
		st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
223
		if (!pmd_none(*start)) {
224
			pgprotval_t prot = pmd_val(*start) & ~PTE_MASK;
225

226
			if (pmd_large(*start) || !pmd_present(*start))
227 228
				note_page(m, st, __pgprot(prot), 3);
			else
229 230
				walk_pte_level(m, st, *start,
					       P + i * PMD_LEVEL_MULT);
231 232 233 234 235 236
		} else
			note_page(m, st, __pgprot(0), 3);
		start++;
	}
}

237 238 239 240 241
#else
#define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p)
#define pud_large(a) pmd_large(__pmd(pud_val(a)))
#define pud_none(a)  pmd_none(__pmd(pud_val(a)))
#endif
242

243 244 245
#if PTRS_PER_PUD > 1

static void walk_pud_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
246 247 248 249 250 251 252 253
							unsigned long P)
{
	int i;
	pud_t *start;

	start = (pud_t *) pgd_page_vaddr(addr);

	for (i = 0; i < PTRS_PER_PUD; i++) {
254
		st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
255
		if (!pud_none(*start)) {
256
			pgprotval_t prot = pud_val(*start) & ~PTE_MASK;
257

258
			if (pud_large(*start) || !pud_present(*start))
259 260
				note_page(m, st, __pgprot(prot), 2);
			else
261 262
				walk_pmd_level(m, st, *start,
					       P + i * PUD_LEVEL_MULT);
263 264 265 266 267 268 269
		} else
			note_page(m, st, __pgprot(0), 2);

		start++;
	}
}

270 271 272 273 274 275 276
#else
#define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(pgd_val(a)),p)
#define pgd_large(a) pud_large(__pud(pgd_val(a)))
#define pgd_none(a)  pud_none(__pud(pgd_val(a)))
#endif

static void walk_pgd_level(struct seq_file *m)
277
{
278
#ifdef CONFIG_X86_64
279
	pgd_t *start = (pgd_t *) &init_level4_pgt;
280 281 282
#else
	pgd_t *start = swapper_pg_dir;
#endif
283 284 285 286 287 288
	int i;
	struct pg_state st;

	memset(&st, 0, sizeof(st));

	for (i = 0; i < PTRS_PER_PGD; i++) {
289 290 291 292 293 294 295 296 297 298
		st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
		if (!pgd_none(*start)) {
			pgprotval_t prot = pgd_val(*start) & ~PTE_MASK;

			if (pgd_large(*start) || !pgd_present(*start))
				note_page(m, &st, __pgprot(prot), 1);
			else
				walk_pud_level(m, &st, *start,
					       i * PGD_LEVEL_MULT);
		} else
299
			note_page(m, &st, __pgprot(0), 1);
300

301 302
		start++;
	}
303 304 305 306

	/* Flush out the last page */
	st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
	note_page(m, &st, __pgprot(0), 0);
307 308 309 310
}

static int ptdump_show(struct seq_file *m, void *v)
{
311
	walk_pgd_level(m);
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	return 0;
}

static int ptdump_open(struct inode *inode, struct file *filp)
{
	return single_open(filp, ptdump_show, NULL);
}

static const struct file_operations ptdump_fops = {
	.open		= ptdump_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

I
Ingo Molnar 已提交
327
static int pt_dump_init(void)
328 329 330
{
	struct dentry *pe;

331 332 333 334 335 336 337 338 339 340 341 342
#ifdef CONFIG_X86_32
	/* Not a compile-time constant on x86-32 */
	address_markers[2].start_address = VMALLOC_START;
	address_markers[3].start_address = VMALLOC_END;
# ifdef CONFIG_HIGHMEM
	address_markers[4].start_address = PKMAP_BASE;
	address_markers[5].start_address = FIXADDR_START;
# else
	address_markers[4].start_address = FIXADDR_START;
# endif
#endif

343 344 345 346 347 348 349 350 351 352 353 354
	pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL,
				 &ptdump_fops);
	if (!pe)
		return -ENOMEM;

	return 0;
}

__initcall(pt_dump_init);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");