task_mmu.c 25.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
#include <linux/mm.h>
#include <linux/hugetlb.h>
3
#include <linux/huge_mm.h>
L
Linus Torvalds 已提交
4 5
#include <linux/mount.h>
#include <linux/seq_file.h>
M
Mauricio Lin 已提交
6
#include <linux/highmem.h>
K
Kees Cook 已提交
7
#include <linux/ptrace.h>
8
#include <linux/slab.h>
9 10
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
11
#include <linux/rmap.h>
12 13
#include <linux/swap.h>
#include <linux/swapops.h>
M
Mauricio Lin 已提交
14

L
Linus Torvalds 已提交
15 16
#include <asm/elf.h>
#include <asm/uaccess.h>
M
Mauricio Lin 已提交
17
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
18 19
#include "internal.h"

20
void task_mem(struct seq_file *m, struct mm_struct *mm)
L
Linus Torvalds 已提交
21
{
K
KAMEZAWA Hiroyuki 已提交
22
	unsigned long data, text, lib, swap;
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

	/*
	 * Note: to minimize their overhead, mm maintains hiwater_vm and
	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
	 * collector of these hiwater stats must therefore get total_vm
	 * and rss too, which will usually be the higher.  Barriers? not
	 * worth the effort, such snapshots can always be inconsistent.
	 */
	hiwater_vm = total_vm = mm->total_vm;
	if (hiwater_vm < mm->hiwater_vm)
		hiwater_vm = mm->hiwater_vm;
	hiwater_rss = total_rss = get_mm_rss(mm);
	if (hiwater_rss < mm->hiwater_rss)
		hiwater_rss = mm->hiwater_rss;
L
Linus Torvalds 已提交
38 39 40 41

	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
K
KAMEZAWA Hiroyuki 已提交
42
	swap = get_mm_counter(mm, MM_SWAPENTS);
43
	seq_printf(m,
44
		"VmPeak:\t%8lu kB\n"
L
Linus Torvalds 已提交
45 46
		"VmSize:\t%8lu kB\n"
		"VmLck:\t%8lu kB\n"
47
		"VmHWM:\t%8lu kB\n"
L
Linus Torvalds 已提交
48 49 50 51 52
		"VmRSS:\t%8lu kB\n"
		"VmData:\t%8lu kB\n"
		"VmStk:\t%8lu kB\n"
		"VmExe:\t%8lu kB\n"
		"VmLib:\t%8lu kB\n"
K
KAMEZAWA Hiroyuki 已提交
53 54
		"VmPTE:\t%8lu kB\n"
		"VmSwap:\t%8lu kB\n",
55 56
		hiwater_vm << (PAGE_SHIFT-10),
		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
57
		mm->locked_vm << (PAGE_SHIFT-10),
58 59
		hiwater_rss << (PAGE_SHIFT-10),
		total_rss << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
60 61
		data << (PAGE_SHIFT-10),
		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
K
KAMEZAWA Hiroyuki 已提交
62 63
		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
		swap << (PAGE_SHIFT-10));
L
Linus Torvalds 已提交
64 65 66 67 68 69 70
}

unsigned long task_vsize(struct mm_struct *mm)
{
	return PAGE_SIZE * mm->total_vm;
}

71 72 73
unsigned long task_statm(struct mm_struct *mm,
			 unsigned long *shared, unsigned long *text,
			 unsigned long *data, unsigned long *resident)
L
Linus Torvalds 已提交
74
{
K
KAMEZAWA Hiroyuki 已提交
75
	*shared = get_mm_counter(mm, MM_FILEPAGES);
L
Linus Torvalds 已提交
76 77 78
	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
								>> PAGE_SHIFT;
	*data = mm->total_vm - mm->shared_vm;
K
KAMEZAWA Hiroyuki 已提交
79
	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
L
Linus Torvalds 已提交
80 81 82 83 84 85 86 87 88 89 90
	return mm->total_vm;
}

static void pad_len_spaces(struct seq_file *m, int len)
{
	len = 25 + sizeof(void*) * 6 - len;
	if (len < 1)
		len = 1;
	seq_printf(m, "%*c", len, ' ');
}

91 92 93 94 95 96 97 98
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
	if (vma && vma != priv->tail_vma) {
		struct mm_struct *mm = vma->vm_mm;
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
}
99

100
static void *m_start(struct seq_file *m, loff_t *pos)
M
Mauricio Lin 已提交
101
{
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	struct proc_maps_private *priv = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *vma, *tail_vma = NULL;
	loff_t l = *pos;

	/* Clear the per syscall fields in priv */
	priv->task = NULL;
	priv->tail_vma = NULL;

	/*
	 * We remember last_addr rather than next_addr to hit with
	 * mmap_cache most of the time. We have zero last_addr at
	 * the beginning and also after lseek. We will have -1 last_addr
	 * after the end of the vmas.
	 */

	if (last_addr == -1UL)
		return NULL;

	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
	if (!priv->task)
A
Al Viro 已提交
124
		return ERR_PTR(-ESRCH);
125 126

	mm = mm_for_maps(priv->task);
A
Al Viro 已提交
127 128
	if (!mm || IS_ERR(mm))
		return mm;
129
	down_read(&mm->mmap_sem);
130

131
	tail_vma = get_gate_vma(priv->task->mm);
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
	priv->tail_vma = tail_vma;

	/* Start with last addr hint */
	vma = find_vma(mm, last_addr);
	if (last_addr && vma) {
		vma = vma->vm_next;
		goto out;
	}

	/*
	 * Check the vma index is within the range and do
	 * sequential scan until m_index.
	 */
	vma = NULL;
	if ((unsigned long)l < mm->map_count) {
		vma = mm->mmap;
		while (l-- && vma)
			vma = vma->vm_next;
		goto out;
	}

	if (l != mm->map_count)
		tail_vma = NULL; /* After gate vma */

out:
	if (vma)
		return vma;

	/* End of vmas has been reached */
	m->version = (tail_vma != NULL)? 0: -1UL;
	up_read(&mm->mmap_sem);
	mmput(mm);
	return tail_vma;
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;
	struct vm_area_struct *tail_vma = priv->tail_vma;

	(*pos)++;
	if (vma && (vma != tail_vma) && vma->vm_next)
		return vma->vm_next;
	vma_stop(priv, vma);
	return (vma != tail_vma)? tail_vma: NULL;
}

static void m_stop(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;

185 186
	if (!IS_ERR(vma))
		vma_stop(priv, vma);
187 188 189 190 191
	if (priv->task)
		put_task_struct(priv->task);
}

static int do_maps_open(struct inode *inode, struct file *file,
192
			const struct seq_operations *ops)
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
{
	struct proc_maps_private *priv;
	int ret = -ENOMEM;
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv) {
		priv->pid = proc_pid(inode);
		ret = seq_open(file, ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = priv;
		} else {
			kfree(priv);
		}
	}
	return ret;
}
M
Mauricio Lin 已提交
209

210
static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
211
{
M
Mauricio Lin 已提交
212 213 214
	struct mm_struct *mm = vma->vm_mm;
	struct file *file = vma->vm_file;
	int flags = vma->vm_flags;
L
Linus Torvalds 已提交
215
	unsigned long ino = 0;
216
	unsigned long long pgoff = 0;
217
	unsigned long start, end;
L
Linus Torvalds 已提交
218 219 220 221
	dev_t dev = 0;
	int len;

	if (file) {
222
		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
L
Linus Torvalds 已提交
223 224
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
225
		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
L
Linus Torvalds 已提交
226 227
	}

228 229
	/* We don't show the stack guard page in /proc/maps */
	start = vma->vm_start;
230 231 232 233 234
	if (stack_guard_page_start(vma, start))
		start += PAGE_SIZE;
	end = vma->vm_end;
	if (stack_guard_page_end(vma, end))
		end -= PAGE_SIZE;
235

236
	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
237
			start,
238
			end,
L
Linus Torvalds 已提交
239 240 241 242
			flags & VM_READ ? 'r' : '-',
			flags & VM_WRITE ? 'w' : '-',
			flags & VM_EXEC ? 'x' : '-',
			flags & VM_MAYSHARE ? 's' : 'p',
243
			pgoff,
L
Linus Torvalds 已提交
244 245 246 247 248 249
			MAJOR(dev), MINOR(dev), ino, &len);

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
M
Mauricio Lin 已提交
250
	if (file) {
L
Linus Torvalds 已提交
251
		pad_len_spaces(m, len);
252
		seq_path(m, &file->f_path, "\n");
L
Linus Torvalds 已提交
253
	} else {
254 255 256
		const char *name = arch_vma_name(vma);
		if (!name) {
			if (mm) {
257 258
				if (vma->vm_start <= mm->brk &&
						vma->vm_end >= mm->start_brk) {
259 260 261 262
					name = "[heap]";
				} else if (vma->vm_start <= mm->start_stack &&
					   vma->vm_end >= mm->start_stack) {
					name = "[stack]";
L
Linus Torvalds 已提交
263
				}
264 265
			} else {
				name = "[vdso]";
L
Linus Torvalds 已提交
266
			}
267 268
		}
		if (name) {
L
Linus Torvalds 已提交
269
			pad_len_spaces(m, len);
270
			seq_puts(m, name);
L
Linus Torvalds 已提交
271 272 273
		}
	}
	seq_putc(m, '\n');
274 275 276 277 278 279 280 281 282
}

static int show_map(struct seq_file *m, void *v)
{
	struct vm_area_struct *vma = v;
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;

	show_map_vma(m, vma);
M
Mauricio Lin 已提交
283 284

	if (m->count < m->size)  /* vma is copied successfully */
285 286
		m->version = (vma != get_gate_vma(task->mm))
			? vma->vm_start : 0;
L
Linus Torvalds 已提交
287 288 289
	return 0;
}

290
static const struct seq_operations proc_pid_maps_op = {
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_map
};

static int maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_pid_maps_op);
}

const struct file_operations proc_maps_operations = {
	.open		= maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

/*
 * Proportional Set Size(PSS): my share of RSS.
 *
 * PSS of a process is the count of pages it has in memory, where each
 * page is divided by the number of processes sharing it.  So if a
 * process has 1000 pages all to itself, and 1000 shared with one other
 * process, its PSS will be 1500.
 *
 * To keep (accumulated) division errors low, we adopt a 64bit
 * fixed-point pss counter to minimize division errors. So (pss >>
 * PSS_SHIFT) would be the real byte count.
 *
 * A shift of 12 before division means (assuming 4K page size):
 * 	- 1M 3-user-pages add up to 8KB errors;
 * 	- supports mapcount up to 2^24, or 16M;
 * 	- supports PSS up to 2^52 bytes, or 4PB.
 */
#define PSS_SHIFT 12

328
#ifdef CONFIG_PROC_PAGE_MONITOR
P
Peter Zijlstra 已提交
329
struct mem_size_stats {
330 331 332 333 334 335 336
	struct vm_area_struct *vma;
	unsigned long resident;
	unsigned long shared_clean;
	unsigned long shared_dirty;
	unsigned long private_clean;
	unsigned long private_dirty;
	unsigned long referenced;
337
	unsigned long anonymous;
338
	unsigned long anonymous_thp;
P
Peter Zijlstra 已提交
339
	unsigned long swap;
340 341 342
	u64 pss;
};

343 344

static void smaps_pte_entry(pte_t ptent, unsigned long addr,
345
		unsigned long ptent_size, struct mm_walk *walk)
346 347 348 349 350 351 352
{
	struct mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = mss->vma;
	struct page *page;
	int mapcount;

	if (is_swap_pte(ptent)) {
353
		mss->swap += ptent_size;
354 355 356 357 358 359 360 361 362 363 364
		return;
	}

	if (!pte_present(ptent))
		return;

	page = vm_normal_page(vma, addr, ptent);
	if (!page)
		return;

	if (PageAnon(page))
365
		mss->anonymous += ptent_size;
366

367
	mss->resident += ptent_size;
368 369
	/* Accumulate the size in pages that have been accessed. */
	if (pte_young(ptent) || PageReferenced(page))
370
		mss->referenced += ptent_size;
371 372 373
	mapcount = page_mapcount(page);
	if (mapcount >= 2) {
		if (pte_dirty(ptent) || PageDirty(page))
374
			mss->shared_dirty += ptent_size;
375
		else
376 377
			mss->shared_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
378 379
	} else {
		if (pte_dirty(ptent) || PageDirty(page))
380
			mss->private_dirty += ptent_size;
381
		else
382 383
			mss->private_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT);
384 385 386
	}
}

387
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
388
			   struct mm_walk *walk)
M
Mauricio Lin 已提交
389
{
D
Dave Hansen 已提交
390
	struct mem_size_stats *mss = walk->private;
391
	struct vm_area_struct *vma = mss->vma;
392
	pte_t *pte;
393
	spinlock_t *ptl;
M
Mauricio Lin 已提交
394

395 396 397 398 399 400 401 402 403
	spin_lock(&walk->mm->page_table_lock);
	if (pmd_trans_huge(*pmd)) {
		if (pmd_trans_splitting(*pmd)) {
			spin_unlock(&walk->mm->page_table_lock);
			wait_split_huge_page(vma->anon_vma, pmd);
		} else {
			smaps_pte_entry(*(pte_t *)pmd, addr,
					HPAGE_PMD_SIZE, walk);
			spin_unlock(&walk->mm->page_table_lock);
404
			mss->anonymous_thp += HPAGE_PMD_SIZE;
405 406 407 408 409 410 411 412 413 414
			return 0;
		}
	} else {
		spin_unlock(&walk->mm->page_table_lock);
	}
	/*
	 * The mmap_sem held all the way back in m_start() is what
	 * keeps khugepaged out of here and from collapsing things
	 * in here.
	 */
415
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
416
	for (; addr != end; pte++, addr += PAGE_SIZE)
417
		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
418 419
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
420
	return 0;
M
Mauricio Lin 已提交
421 422 423 424
}

static int show_smap(struct seq_file *m, void *v)
{
425 426
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;
M
Mauricio Lin 已提交
427 428
	struct vm_area_struct *vma = v;
	struct mem_size_stats mss;
D
Dave Hansen 已提交
429 430 431 432 433
	struct mm_walk smaps_walk = {
		.pmd_entry = smaps_pte_range,
		.mm = vma->vm_mm,
		.private = &mss,
	};
M
Mauricio Lin 已提交
434 435

	memset(&mss, 0, sizeof mss);
436
	mss.vma = vma;
437
	/* mmap_sem is held in m_start */
N
Nick Piggin 已提交
438
	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
D
Dave Hansen 已提交
439
		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
440

441
	show_map_vma(m, vma);
442 443 444 445 446 447 448 449 450

	seq_printf(m,
		   "Size:           %8lu kB\n"
		   "Rss:            %8lu kB\n"
		   "Pss:            %8lu kB\n"
		   "Shared_Clean:   %8lu kB\n"
		   "Shared_Dirty:   %8lu kB\n"
		   "Private_Clean:  %8lu kB\n"
		   "Private_Dirty:  %8lu kB\n"
P
Peter Zijlstra 已提交
451
		   "Referenced:     %8lu kB\n"
452
		   "Anonymous:      %8lu kB\n"
453
		   "AnonHugePages:  %8lu kB\n"
454
		   "Swap:           %8lu kB\n"
455
		   "KernelPageSize: %8lu kB\n"
456 457
		   "MMUPageSize:    %8lu kB\n"
		   "Locked:         %8lu kB\n",
458 459 460 461 462 463 464
		   (vma->vm_end - vma->vm_start) >> 10,
		   mss.resident >> 10,
		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
		   mss.shared_clean  >> 10,
		   mss.shared_dirty  >> 10,
		   mss.private_clean >> 10,
		   mss.private_dirty >> 10,
P
Peter Zijlstra 已提交
465
		   mss.referenced >> 10,
466
		   mss.anonymous >> 10,
467
		   mss.anonymous_thp >> 10,
468
		   mss.swap >> 10,
469
		   vma_kernel_pagesize(vma) >> 10,
470 471 472
		   vma_mmu_pagesize(vma) >> 10,
		   (vma->vm_flags & VM_LOCKED) ?
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
473

474
	if (m->count < m->size)  /* vma is copied successfully */
475 476
		m->version = (vma != get_gate_vma(task->mm))
			? vma->vm_start : 0;
477
	return 0;
M
Mauricio Lin 已提交
478 479
}

480
static const struct seq_operations proc_pid_smaps_op = {
481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_smap
};

static int smaps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_pid_smaps_op);
}

const struct file_operations proc_smaps_operations = {
	.open		= smaps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
D
Dave Hansen 已提交
500
				unsigned long end, struct mm_walk *walk)
501
{
D
Dave Hansen 已提交
502
	struct vm_area_struct *vma = walk->private;
503 504 505 506
	pte_t *pte, ptent;
	spinlock_t *ptl;
	struct page *page;

507 508
	split_huge_page_pmd(walk->mm, pmd);

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE) {
		ptent = *pte;
		if (!pte_present(ptent))
			continue;

		page = vm_normal_page(vma, addr, ptent);
		if (!page)
			continue;

		/* Clear accessed and referenced bits. */
		ptep_test_and_clear_young(vma, addr, pte);
		ClearPageReferenced(page);
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
	return 0;
}

528 529 530 531
#define CLEAR_REFS_ALL 1
#define CLEAR_REFS_ANON 2
#define CLEAR_REFS_MAPPED 3

532 533
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
				size_t count, loff_t *ppos)
534
{
535
	struct task_struct *task;
536
	char buffer[PROC_NUMBUF];
537
	struct mm_struct *mm;
538
	struct vm_area_struct *vma;
539
	long type;
540

541 542 543 544 545
	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count))
		return -EFAULT;
546 547
	if (strict_strtol(strstrip(buffer), 10, &type))
		return -EINVAL;
548
	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
549 550 551 552 553 554
		return -EINVAL;
	task = get_proc_task(file->f_path.dentry->d_inode);
	if (!task)
		return -ESRCH;
	mm = get_task_mm(task);
	if (mm) {
555 556 557 558
		struct mm_walk clear_refs_walk = {
			.pmd_entry = clear_refs_pte_range,
			.mm = mm,
		};
559
		down_read(&mm->mmap_sem);
D
Dave Hansen 已提交
560 561
		for (vma = mm->mmap; vma; vma = vma->vm_next) {
			clear_refs_walk.private = vma;
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
			if (is_vm_hugetlb_page(vma))
				continue;
			/*
			 * Writing 1 to /proc/pid/clear_refs affects all pages.
			 *
			 * Writing 2 to /proc/pid/clear_refs only affects
			 * Anonymous pages.
			 *
			 * Writing 3 to /proc/pid/clear_refs only affects file
			 * mapped pages.
			 */
			if (type == CLEAR_REFS_ANON && vma->vm_file)
				continue;
			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
				continue;
			walk_page_range(vma->vm_start, vma->vm_end,
					&clear_refs_walk);
D
Dave Hansen 已提交
579
		}
580 581 582 583 584
		flush_tlb_mm(mm);
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	put_task_struct(task);
585 586

	return count;
587 588
}

589 590
const struct file_operations proc_clear_refs_operations = {
	.write		= clear_refs_write,
591
	.llseek		= noop_llseek,
592 593
};

594
struct pagemapread {
595 596
	int pos, len;
	u64 *buffer;
597 598
};

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
#define PM_ENTRY_BYTES      sizeof(u64)
#define PM_STATUS_BITS      3
#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
#define PM_PSHIFT_BITS      6
#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
#define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)

#define PM_PRESENT          PM_STATUS(4LL)
#define PM_SWAP             PM_STATUS(2LL)
#define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
614 615 616 617 618
#define PM_END_OF_BUFFER    1

static int add_to_pagemap(unsigned long addr, u64 pfn,
			  struct pagemapread *pm)
{
619 620
	pm->buffer[pm->pos++] = pfn;
	if (pm->pos >= pm->len)
621
		return PM_END_OF_BUFFER;
622 623 624 625
	return 0;
}

static int pagemap_pte_hole(unsigned long start, unsigned long end,
D
Dave Hansen 已提交
626
				struct mm_walk *walk)
627
{
D
Dave Hansen 已提交
628
	struct pagemapread *pm = walk->private;
629 630 631 632 633 634 635 636 637 638
	unsigned long addr;
	int err = 0;
	for (addr = start; addr < end; addr += PAGE_SIZE) {
		err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
		if (err)
			break;
	}
	return err;
}

639
static u64 swap_pte_to_pagemap_entry(pte_t pte)
640 641
{
	swp_entry_t e = pte_to_swp_entry(pte);
642
	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
643 644
}

645
static u64 pte_to_pagemap_entry(pte_t pte)
646
{
647
	u64 pme = 0;
648 649 650 651 652 653 654 655 656
	if (is_swap_pte(pte))
		pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
			| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
	else if (pte_present(pte))
		pme = PM_PFRAME(pte_pfn(pte))
			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
	return pme;
}

657
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
658
			     struct mm_walk *walk)
659
{
660
	struct vm_area_struct *vma;
D
Dave Hansen 已提交
661
	struct pagemapread *pm = walk->private;
662 663 664
	pte_t *pte;
	int err = 0;

665 666
	split_huge_page_pmd(walk->mm, pmd);

667 668
	/* find the first VMA at or above 'addr' */
	vma = find_vma(walk->mm, addr);
669 670
	for (; addr != end; addr += PAGE_SIZE) {
		u64 pfn = PM_NOT_PRESENT;
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685

		/* check to see if we've left 'vma' behind
		 * and need a new, higher one */
		if (vma && (addr >= vma->vm_end))
			vma = find_vma(walk->mm, addr);

		/* check that 'vma' actually covers this address,
		 * and that it isn't a huge page vma */
		if (vma && (vma->vm_start <= addr) &&
		    !is_vm_hugetlb_page(vma)) {
			pte = pte_offset_map(pmd, addr);
			pfn = pte_to_pagemap_entry(*pte);
			/* unmap before userspace copy */
			pte_unmap(pte);
		}
686 687 688 689 690 691 692 693 694 695
		err = add_to_pagemap(addr, pfn, pm);
		if (err)
			return err;
	}

	cond_resched();

	return err;
}

696
#ifdef CONFIG_HUGETLB_PAGE
697 698 699 700 701 702 703 704 705
static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
{
	u64 pme = 0;
	if (pte_present(pte))
		pme = PM_PFRAME(pte_pfn(pte) + offset)
			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
	return pme;
}

706 707 708 709
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
				 unsigned long addr, unsigned long end,
				 struct mm_walk *walk)
710 711 712
{
	struct pagemapread *pm = walk->private;
	int err = 0;
713
	u64 pfn;
714 715

	for (; addr != end; addr += PAGE_SIZE) {
716 717
		int offset = (addr & ~hmask) >> PAGE_SHIFT;
		pfn = huge_pte_to_pagemap_entry(*pte, offset);
718 719 720 721 722 723 724 725 726
		err = add_to_pagemap(addr, pfn, pm);
		if (err)
			return err;
	}

	cond_resched();

	return err;
}
727
#endif /* HUGETLB_PAGE */
728

729 730 731
/*
 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 *
732 733 734 735 736 737 738 739 740 741 742 743 744 745
 * For each page in the address space, this file contains one 64-bit entry
 * consisting of the following:
 *
 * Bits 0-55  page frame number (PFN) if present
 * Bits 0-4   swap type if swapped
 * Bits 5-55  swap offset if swapped
 * Bits 55-60 page shift (page size = 1<<page shift)
 * Bit  61    reserved for future use
 * Bit  62    page swapped
 * Bit  63    page present
 *
 * If the page is not present but in swap, then the PFN contains an
 * encoding of the swap file number and the page's offset into the
 * swap. Unmapped pages return a null PFN. This allows determining
746 747 748 749 750 751 752
 * precisely which pages are mapped (or in swap) and comparing mapped
 * pages between processes.
 *
 * Efficient users of this interface will use /proc/pid/maps to
 * determine which areas of memory are actually mapped and llseek to
 * skip over unmapped regions.
 */
753
#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
754
#define PAGEMAP_WALK_MASK	(PMD_MASK)
755 756 757 758 759 760 761
static ssize_t pagemap_read(struct file *file, char __user *buf,
			    size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
	struct mm_struct *mm;
	struct pagemapread pm;
	int ret = -ESRCH;
762
	struct mm_walk pagemap_walk = {};
763 764 765 766
	unsigned long src;
	unsigned long svpfn;
	unsigned long start_vaddr;
	unsigned long end_vaddr;
767
	int copied = 0;
768 769 770 771

	if (!task)
		goto out;

A
Al Viro 已提交
772
	mm = mm_for_maps(task);
A
Al Viro 已提交
773 774
	ret = PTR_ERR(mm);
	if (!mm || IS_ERR(mm))
775
		goto out_task;
776 777 778

	ret = -EINVAL;
	/* file position must be aligned */
779
	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
780
		goto out_task;
781 782

	ret = 0;
783 784 785 786

	if (!count)
		goto out_task;

787 788
	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
789
	ret = -ENOMEM;
790
	if (!pm.buffer)
791
		goto out_mm;
792

793 794
	pagemap_walk.pmd_entry = pagemap_pte_range;
	pagemap_walk.pte_hole = pagemap_pte_hole;
795
#ifdef CONFIG_HUGETLB_PAGE
796
	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
797
#endif
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	pagemap_walk.mm = mm;
	pagemap_walk.private = &pm;

	src = *ppos;
	svpfn = src / PM_ENTRY_BYTES;
	start_vaddr = svpfn << PAGE_SHIFT;
	end_vaddr = TASK_SIZE_OF(task);

	/* watch out for wraparound */
	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
		start_vaddr = end_vaddr;

	/*
	 * The odds are that this will stop walking way
	 * before end_vaddr, because the length of the
	 * user buffer is tracked in "pm", and the walk
	 * will stop when we hit the end of the buffer.
	 */
816 817 818 819 820 821
	ret = 0;
	while (count && (start_vaddr < end_vaddr)) {
		int len;
		unsigned long end;

		pm.pos = 0;
822
		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
823 824 825 826 827 828 829 830 831
		/* overflow ? */
		if (end < start_vaddr || end > end_vaddr)
			end = end_vaddr;
		down_read(&mm->mmap_sem);
		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
		up_read(&mm->mmap_sem);
		start_vaddr = end;

		len = min(count, PM_ENTRY_BYTES * pm.pos);
832
		if (copy_to_user(buf, pm.buffer, len)) {
833 834 835 836 837 838
			ret = -EFAULT;
			goto out_free;
		}
		copied += len;
		buf += len;
		count -= len;
839
	}
840 841 842 843
	*ppos += copied;
	if (!ret || ret == PM_END_OF_BUFFER)
		ret = copied;

844
out_free:
845
	kfree(pm.buffer);
846 847
out_mm:
	mmput(mm);
848 849 850 851 852 853 854 855 856 857
out_task:
	put_task_struct(task);
out:
	return ret;
}

const struct file_operations proc_pagemap_operations = {
	.llseek		= mem_lseek, /* borrow this */
	.read		= pagemap_read,
};
858
#endif /* CONFIG_PROC_PAGE_MONITOR */
859

860 861
#ifdef CONFIG_NUMA

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
struct numa_maps {
	struct vm_area_struct *vma;
	unsigned long pages;
	unsigned long anon;
	unsigned long active;
	unsigned long writeback;
	unsigned long mapcount_max;
	unsigned long dirty;
	unsigned long swapcache;
	unsigned long node[MAX_NUMNODES];
};

static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty)
{
	int count = page_mapcount(page);

	md->pages++;
	if (pte_dirty || PageDirty(page))
		md->dirty++;

	if (PageSwapCache(page))
		md->swapcache++;

	if (PageActive(page) || PageUnevictable(page))
		md->active++;

	if (PageWriteback(page))
		md->writeback++;

	if (PageAnon(page))
		md->anon++;

	if (count > md->mapcount_max)
		md->mapcount_max = count;

	md->node[page_to_nid(page)]++;
}

static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
		unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	spinlock_t *ptl;
	pte_t *orig_pte;
	pte_t *pte;

	md = walk->private;
	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
	do {
		struct page *page;
		int nid;

		if (!pte_present(*pte))
			continue;

		page = vm_normal_page(md->vma, addr, *pte);
		if (!page)
			continue;

		if (PageReserved(page))
			continue;

		nid = page_to_nid(page);
		if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
			continue;

		gather_stats(page, md, pte_dirty(*pte));

	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap_unlock(orig_pte, ptl);
	return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	struct page *page;

	if (pte_none(*pte))
		return 0;

	page = pte_page(*pte);
	if (!page)
		return 0;

	md = walk->private;
	gather_stats(page, md, pte_dirty(*pte));
	return 0;
}

#else
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	return 0;
}
#endif

/*
 * Display pages allocated per node and memory policy via /proc.
 */
static int show_numa_map(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;
	struct numa_maps *md;
	struct file *file = vma->vm_file;
	struct mm_struct *mm = vma->vm_mm;
	struct mm_walk walk = {};
	struct mempolicy *pol;
	int n;
	char buffer[50];

	if (!mm)
		return 0;

	md = kzalloc(sizeof(struct numa_maps), GFP_KERNEL);
	if (!md)
		return 0;

	md->vma = vma;

	walk.hugetlb_entry = gather_hugetbl_stats;
	walk.pmd_entry = gather_pte_stats;
	walk.private = md;
	walk.mm = mm;

	pol = get_vma_policy(priv->task, vma, vma->vm_start);
	mpol_to_str(buffer, sizeof(buffer), pol, 0);
	mpol_cond_put(pol);

	seq_printf(m, "%08lx %s", vma->vm_start, buffer);

	if (file) {
		seq_printf(m, " file=");
		seq_path(m, &file->f_path, "\n\t= ");
	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
		seq_printf(m, " heap");
	} else if (vma->vm_start <= mm->start_stack &&
			vma->vm_end >= mm->start_stack) {
		seq_printf(m, " stack");
	}

	walk_page_range(vma->vm_start, vma->vm_end, &walk);

	if (!md->pages)
		goto out;

	if (md->anon)
		seq_printf(m, " anon=%lu", md->anon);

	if (md->dirty)
		seq_printf(m, " dirty=%lu", md->dirty);

	if (md->pages != md->anon && md->pages != md->dirty)
		seq_printf(m, " mapped=%lu", md->pages);

	if (md->mapcount_max > 1)
		seq_printf(m, " mapmax=%lu", md->mapcount_max);

	if (md->swapcache)
		seq_printf(m, " swapcache=%lu", md->swapcache);

	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
		seq_printf(m, " active=%lu", md->active);

	if (md->writeback)
		seq_printf(m, " writeback=%lu", md->writeback);

	for_each_node_state(n, N_HIGH_MEMORY)
		if (md->node[n])
			seq_printf(m, " N%d=%lu", n, md->node[n]);
out:
	seq_putc(m, '\n');
	kfree(md);

	if (m->count < m->size)
		m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
	return 0;
}
1043
static const struct seq_operations proc_pid_numa_maps_op = {
1044 1045 1046
        .start  = m_start,
        .next   = m_next,
        .stop   = m_stop,
A
Alexey Dobriyan 已提交
1047
        .show   = show_numa_map,
1048
};
1049 1050 1051 1052 1053 1054

static int numa_maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_pid_numa_maps_op);
}

1055
const struct file_operations proc_numa_maps_operations = {
1056 1057 1058
	.open		= numa_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
1059
	.release	= seq_release_private,
1060
};
1061
#endif /* CONFIG_NUMA */