task_mmu.c 37.5 KB
Newer Older
L
Linus Torvalds 已提交
1
#include <linux/mm.h>
D
Davidlohr Bueso 已提交
2
#include <linux/vmacache.h>
L
Linus Torvalds 已提交
3
#include <linux/hugetlb.h>
4
#include <linux/huge_mm.h>
L
Linus Torvalds 已提交
5 6
#include <linux/mount.h>
#include <linux/seq_file.h>
M
Mauricio Lin 已提交
7
#include <linux/highmem.h>
K
Kees Cook 已提交
8
#include <linux/ptrace.h>
9
#include <linux/slab.h>
10 11
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
12
#include <linux/rmap.h>
13 14
#include <linux/swap.h>
#include <linux/swapops.h>
15
#include <linux/mmu_notifier.h>
M
Mauricio Lin 已提交
16

L
Linus Torvalds 已提交
17 18
#include <asm/elf.h>
#include <asm/uaccess.h>
M
Mauricio Lin 已提交
19
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
20 21
#include "internal.h"

22
void task_mem(struct seq_file *m, struct mm_struct *mm)
L
Linus Torvalds 已提交
23
{
24
	unsigned long data, text, lib, swap, ptes, pmds;
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

	/*
	 * Note: to minimize their overhead, mm maintains hiwater_vm and
	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
	 * collector of these hiwater stats must therefore get total_vm
	 * and rss too, which will usually be the higher.  Barriers? not
	 * worth the effort, such snapshots can always be inconsistent.
	 */
	hiwater_vm = total_vm = mm->total_vm;
	if (hiwater_vm < mm->hiwater_vm)
		hiwater_vm = mm->hiwater_vm;
	hiwater_rss = total_rss = get_mm_rss(mm);
	if (hiwater_rss < mm->hiwater_rss)
		hiwater_rss = mm->hiwater_rss;
L
Linus Torvalds 已提交
40 41 42 43

	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
K
KAMEZAWA Hiroyuki 已提交
44
	swap = get_mm_counter(mm, MM_SWAPENTS);
45 46
	ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes);
	pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm);
47
	seq_printf(m,
48
		"VmPeak:\t%8lu kB\n"
L
Linus Torvalds 已提交
49 50
		"VmSize:\t%8lu kB\n"
		"VmLck:\t%8lu kB\n"
51
		"VmPin:\t%8lu kB\n"
52
		"VmHWM:\t%8lu kB\n"
L
Linus Torvalds 已提交
53 54 55 56 57
		"VmRSS:\t%8lu kB\n"
		"VmData:\t%8lu kB\n"
		"VmStk:\t%8lu kB\n"
		"VmExe:\t%8lu kB\n"
		"VmLib:\t%8lu kB\n"
K
KAMEZAWA Hiroyuki 已提交
58
		"VmPTE:\t%8lu kB\n"
59
		"VmPMD:\t%8lu kB\n"
K
KAMEZAWA Hiroyuki 已提交
60
		"VmSwap:\t%8lu kB\n",
61
		hiwater_vm << (PAGE_SHIFT-10),
62
		total_vm << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
63
		mm->locked_vm << (PAGE_SHIFT-10),
64
		mm->pinned_vm << (PAGE_SHIFT-10),
65 66
		hiwater_rss << (PAGE_SHIFT-10),
		total_rss << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
67 68
		data << (PAGE_SHIFT-10),
		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
69 70
		ptes >> 10,
		pmds >> 10,
K
KAMEZAWA Hiroyuki 已提交
71
		swap << (PAGE_SHIFT-10));
L
Linus Torvalds 已提交
72 73 74 75 76 77 78
}

unsigned long task_vsize(struct mm_struct *mm)
{
	return PAGE_SIZE * mm->total_vm;
}

79 80 81
unsigned long task_statm(struct mm_struct *mm,
			 unsigned long *shared, unsigned long *text,
			 unsigned long *data, unsigned long *resident)
L
Linus Torvalds 已提交
82
{
K
KAMEZAWA Hiroyuki 已提交
83
	*shared = get_mm_counter(mm, MM_FILEPAGES);
L
Linus Torvalds 已提交
84 85 86
	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
								>> PAGE_SHIFT;
	*data = mm->total_vm - mm->shared_vm;
K
KAMEZAWA Hiroyuki 已提交
87
	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
L
Linus Torvalds 已提交
88 89 90
	return mm->total_vm;
}

91 92
#ifdef CONFIG_NUMA
/*
93
 * Save get_task_policy() for show_numa_map().
94 95 96 97 98 99
 */
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
	struct task_struct *task = priv->task;

	task_lock(task);
100
	priv->task_mempolicy = get_task_policy(task);
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
	mpol_get(priv->task_mempolicy);
	task_unlock(task);
}
static void release_task_mempolicy(struct proc_maps_private *priv)
{
	mpol_put(priv->task_mempolicy);
}
#else
static void hold_task_mempolicy(struct proc_maps_private *priv)
{
}
static void release_task_mempolicy(struct proc_maps_private *priv)
{
}
#endif

117
static void vma_stop(struct proc_maps_private *priv)
118
{
119 120 121 122 123
	struct mm_struct *mm = priv->mm;

	release_task_mempolicy(priv);
	up_read(&mm->mmap_sem);
	mmput(mm);
124
}
125

126 127 128 129 130 131 132 133
static struct vm_area_struct *
m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
	if (vma == priv->tail_vma)
		return NULL;
	return vma->vm_next ?: priv->tail_vma;
}

134 135 136 137 138 139
static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
{
	if (m->count < m->size)	/* vma is copied successfully */
		m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
}

140
static void *m_start(struct seq_file *m, loff_t *ppos)
M
Mauricio Lin 已提交
141
{
142
	struct proc_maps_private *priv = m->private;
143
	unsigned long last_addr = m->version;
144
	struct mm_struct *mm;
145 146
	struct vm_area_struct *vma;
	unsigned int pos = *ppos;
147

148 149 150 151
	/* See m_cache_vma(). Zero at the start or after lseek. */
	if (last_addr == -1UL)
		return NULL;

152
	priv->task = get_proc_task(priv->inode);
153
	if (!priv->task)
A
Al Viro 已提交
154
		return ERR_PTR(-ESRCH);
155

156 157 158
	mm = priv->mm;
	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
		return NULL;
159

160
	down_read(&mm->mmap_sem);
161
	hold_task_mempolicy(priv);
162
	priv->tail_vma = get_gate_vma(mm);
163

164 165 166 167 168 169 170
	if (last_addr) {
		vma = find_vma(mm, last_addr);
		if (vma && (vma = m_next_vma(priv, vma)))
			return vma;
	}

	m->version = 0;
171
	if (pos < mm->map_count) {
172 173
		for (vma = mm->mmap; pos; pos--) {
			m->version = vma->vm_start;
174
			vma = vma->vm_next;
175
		}
176
		return vma;
177
	}
178

179
	/* we do not bother to update m->version in this case */
180 181
	if (pos == mm->map_count && priv->tail_vma)
		return priv->tail_vma;
182 183 184

	vma_stop(priv);
	return NULL;
185 186 187 188 189
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct proc_maps_private *priv = m->private;
190
	struct vm_area_struct *next;
191 192

	(*pos)++;
193
	next = m_next_vma(priv, v);
194 195 196
	if (!next)
		vma_stop(priv);
	return next;
197 198 199 200 201 202
}

static void m_stop(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;

203 204
	if (!IS_ERR_OR_NULL(v))
		vma_stop(priv);
205
	if (priv->task) {
206
		put_task_struct(priv->task);
207 208
		priv->task = NULL;
	}
209 210
}

211 212 213 214 215 216 217 218
static int proc_maps_open(struct inode *inode, struct file *file,
			const struct seq_operations *ops, int psize)
{
	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);

	if (!priv)
		return -ENOMEM;

219
	priv->inode = inode;
220 221 222 223 224 225 226 227
	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
	if (IS_ERR(priv->mm)) {
		int err = PTR_ERR(priv->mm);

		seq_release_private(inode, file);
		return err;
	}

228 229 230
	return 0;
}

231 232 233 234 235 236 237 238 239 240 241
static int proc_map_release(struct inode *inode, struct file *file)
{
	struct seq_file *seq = file->private_data;
	struct proc_maps_private *priv = seq->private;

	if (priv->mm)
		mmdrop(priv->mm);

	return seq_release_private(inode, file);
}

242
static int do_maps_open(struct inode *inode, struct file *file,
243
			const struct seq_operations *ops)
244
{
245 246
	return proc_maps_open(inode, file, ops,
				sizeof(struct proc_maps_private));
247
}
M
Mauricio Lin 已提交
248

249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
static pid_t pid_of_stack(struct proc_maps_private *priv,
				struct vm_area_struct *vma, bool is_pid)
{
	struct inode *inode = priv->inode;
	struct task_struct *task;
	pid_t ret = 0;

	rcu_read_lock();
	task = pid_task(proc_pid(inode), PIDTYPE_PID);
	if (task) {
		task = task_of_stack(task, vma, is_pid);
		if (task)
			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
	}
	rcu_read_unlock();

	return ret;
}

268 269
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
L
Linus Torvalds 已提交
270
{
M
Mauricio Lin 已提交
271 272
	struct mm_struct *mm = vma->vm_mm;
	struct file *file = vma->vm_file;
273
	struct proc_maps_private *priv = m->private;
274
	vm_flags_t flags = vma->vm_flags;
L
Linus Torvalds 已提交
275
	unsigned long ino = 0;
276
	unsigned long long pgoff = 0;
277
	unsigned long start, end;
L
Linus Torvalds 已提交
278
	dev_t dev = 0;
279
	const char *name = NULL;
L
Linus Torvalds 已提交
280 281

	if (file) {
A
Al Viro 已提交
282
		struct inode *inode = file_inode(vma->vm_file);
L
Linus Torvalds 已提交
283 284
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
285
		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
L
Linus Torvalds 已提交
286 287
	}

288 289
	/* We don't show the stack guard page in /proc/maps */
	start = vma->vm_start;
290 291 292 293 294
	if (stack_guard_page_start(vma, start))
		start += PAGE_SIZE;
	end = vma->vm_end;
	if (stack_guard_page_end(vma, end))
		end -= PAGE_SIZE;
295

296 297
	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
298
			start,
299
			end,
L
Linus Torvalds 已提交
300 301 302 303
			flags & VM_READ ? 'r' : '-',
			flags & VM_WRITE ? 'w' : '-',
			flags & VM_EXEC ? 'x' : '-',
			flags & VM_MAYSHARE ? 's' : 'p',
304
			pgoff,
305
			MAJOR(dev), MINOR(dev), ino);
L
Linus Torvalds 已提交
306 307 308 309 310

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
M
Mauricio Lin 已提交
311
	if (file) {
312
		seq_pad(m, ' ');
M
Miklos Szeredi 已提交
313
		seq_file_path(m, file, "\n");
314 315 316
		goto done;
	}

317 318 319 320 321 322
	if (vma->vm_ops && vma->vm_ops->name) {
		name = vma->vm_ops->name(vma);
		if (name)
			goto done;
	}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	name = arch_vma_name(vma);
	if (!name) {
		pid_t tid;

		if (!mm) {
			name = "[vdso]";
			goto done;
		}

		if (vma->vm_start <= mm->brk &&
		    vma->vm_end >= mm->start_brk) {
			name = "[heap]";
			goto done;
		}

338
		tid = pid_of_stack(priv, vma, is_pid);
339 340 341 342 343 344 345 346
		if (tid != 0) {
			/*
			 * Thread stack in /proc/PID/task/TID/maps or
			 * the main process stack.
			 */
			if (!is_pid || (vma->vm_start <= mm->start_stack &&
			    vma->vm_end >= mm->start_stack)) {
				name = "[stack]";
347
			} else {
348
				/* Thread stack in /proc/PID/maps */
349
				seq_pad(m, ' ');
350
				seq_printf(m, "[stack:%d]", tid);
L
Linus Torvalds 已提交
351
			}
352
		}
353 354 355 356
	}

done:
	if (name) {
357
		seq_pad(m, ' ');
358
		seq_puts(m, name);
L
Linus Torvalds 已提交
359 360
	}
	seq_putc(m, '\n');
361 362
}

363
static int show_map(struct seq_file *m, void *v, int is_pid)
364
{
365
	show_map_vma(m, v, is_pid);
366
	m_cache_vma(m, v);
L
Linus Torvalds 已提交
367 368 369
	return 0;
}

370 371 372 373 374 375 376 377 378 379
static int show_pid_map(struct seq_file *m, void *v)
{
	return show_map(m, v, 1);
}

static int show_tid_map(struct seq_file *m, void *v)
{
	return show_map(m, v, 0);
}

380
static const struct seq_operations proc_pid_maps_op = {
381 382 383
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
384 385 386 387 388 389 390 391
	.show	= show_pid_map
};

static const struct seq_operations proc_tid_maps_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_tid_map
392 393
};

394
static int pid_maps_open(struct inode *inode, struct file *file)
395 396 397 398
{
	return do_maps_open(inode, file, &proc_pid_maps_op);
}

399 400 401 402 403 404 405 406 407
static int tid_maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_tid_maps_op);
}

const struct file_operations proc_pid_maps_operations = {
	.open		= pid_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
408
	.release	= proc_map_release,
409 410 411 412
};

const struct file_operations proc_tid_maps_operations = {
	.open		= tid_maps_open,
413 414
	.read		= seq_read,
	.llseek		= seq_lseek,
415
	.release	= proc_map_release,
416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
};

/*
 * Proportional Set Size(PSS): my share of RSS.
 *
 * PSS of a process is the count of pages it has in memory, where each
 * page is divided by the number of processes sharing it.  So if a
 * process has 1000 pages all to itself, and 1000 shared with one other
 * process, its PSS will be 1500.
 *
 * To keep (accumulated) division errors low, we adopt a 64bit
 * fixed-point pss counter to minimize division errors. So (pss >>
 * PSS_SHIFT) would be the real byte count.
 *
 * A shift of 12 before division means (assuming 4K page size):
 * 	- 1M 3-user-pages add up to 8KB errors;
 * 	- supports mapcount up to 2^24, or 16M;
 * 	- supports PSS up to 2^52 bytes, or 4PB.
 */
#define PSS_SHIFT 12

437
#ifdef CONFIG_PROC_PAGE_MONITOR
P
Peter Zijlstra 已提交
438
struct mem_size_stats {
439 440 441 442 443 444
	unsigned long resident;
	unsigned long shared_clean;
	unsigned long shared_dirty;
	unsigned long private_clean;
	unsigned long private_dirty;
	unsigned long referenced;
445
	unsigned long anonymous;
446
	unsigned long anonymous_thp;
P
Peter Zijlstra 已提交
447
	unsigned long swap;
448
	u64 pss;
449
	u64 swap_pss;
450 451
};

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
static void smaps_account(struct mem_size_stats *mss, struct page *page,
		unsigned long size, bool young, bool dirty)
{
	int mapcount;

	if (PageAnon(page))
		mss->anonymous += size;

	mss->resident += size;
	/* Accumulate the size in pages that have been accessed. */
	if (young || PageReferenced(page))
		mss->referenced += size;
	mapcount = page_mapcount(page);
	if (mapcount >= 2) {
		u64 pss_delta;

		if (dirty || PageDirty(page))
			mss->shared_dirty += size;
		else
			mss->shared_clean += size;
		pss_delta = (u64)size << PSS_SHIFT;
		do_div(pss_delta, mapcount);
		mss->pss += pss_delta;
	} else {
		if (dirty || PageDirty(page))
			mss->private_dirty += size;
		else
			mss->private_clean += size;
		mss->pss += (u64)size << PSS_SHIFT;
	}
}
483

484 485
static void smaps_pte_entry(pte_t *pte, unsigned long addr,
		struct mm_walk *walk)
486 487
{
	struct mem_size_stats *mss = walk->private;
488
	struct vm_area_struct *vma = walk->vma;
489
	struct page *page = NULL;
490

491 492 493 494
	if (pte_present(*pte)) {
		page = vm_normal_page(vma, addr, *pte);
	} else if (is_swap_pte(*pte)) {
		swp_entry_t swpent = pte_to_swp_entry(*pte);
495

496 497 498
		if (!non_swap_entry(swpent)) {
			int mapcount;

499
			mss->swap += PAGE_SIZE;
500 501 502 503 504 505 506 507 508 509
			mapcount = swp_swapcount(swpent);
			if (mapcount >= 2) {
				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;

				do_div(pss_delta, mapcount);
				mss->swap_pss += pss_delta;
			} else {
				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
			}
		} else if (is_migration_entry(swpent))
510 511
			page = migration_entry_to_page(swpent);
	}
512 513 514

	if (!page)
		return;
515
	smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
516 517
}

518 519 520 521 522
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
		struct mm_walk *walk)
{
	struct mem_size_stats *mss = walk->private;
523
	struct vm_area_struct *vma = walk->vma;
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
	struct page *page;

	/* FOLL_DUMP will return -EFAULT on huge zero page */
	page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
	if (IS_ERR_OR_NULL(page))
		return;
	mss->anonymous_thp += HPAGE_PMD_SIZE;
	smaps_account(mss, page, HPAGE_PMD_SIZE,
			pmd_young(*pmd), pmd_dirty(*pmd));
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
		struct mm_walk *walk)
{
}
#endif

541
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
542
			   struct mm_walk *walk)
M
Mauricio Lin 已提交
543
{
544
	struct vm_area_struct *vma = walk->vma;
545
	pte_t *pte;
546
	spinlock_t *ptl;
M
Mauricio Lin 已提交
547

548
	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
549
		smaps_pmd_entry(pmd, addr, walk);
550
		spin_unlock(ptl);
551
		return 0;
552
	}
553 554 555

	if (pmd_trans_unstable(pmd))
		return 0;
556 557 558 559 560
	/*
	 * The mmap_sem held all the way back in m_start() is what
	 * keeps khugepaged out of here and from collapsing things
	 * in here.
	 */
561
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
562
	for (; addr != end; pte++, addr += PAGE_SIZE)
563
		smaps_pte_entry(pte, addr, walk);
564 565
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
566
	return 0;
M
Mauricio Lin 已提交
567 568
}

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
{
	/*
	 * Don't forget to update Documentation/ on changes.
	 */
	static const char mnemonics[BITS_PER_LONG][2] = {
		/*
		 * In case if we meet a flag we don't know about.
		 */
		[0 ... (BITS_PER_LONG-1)] = "??",

		[ilog2(VM_READ)]	= "rd",
		[ilog2(VM_WRITE)]	= "wr",
		[ilog2(VM_EXEC)]	= "ex",
		[ilog2(VM_SHARED)]	= "sh",
		[ilog2(VM_MAYREAD)]	= "mr",
		[ilog2(VM_MAYWRITE)]	= "mw",
		[ilog2(VM_MAYEXEC)]	= "me",
		[ilog2(VM_MAYSHARE)]	= "ms",
		[ilog2(VM_GROWSDOWN)]	= "gd",
		[ilog2(VM_PFNMAP)]	= "pf",
		[ilog2(VM_DENYWRITE)]	= "dw",
591 592 593
#ifdef CONFIG_X86_INTEL_MPX
		[ilog2(VM_MPX)]		= "mp",
#endif
594 595 596 597 598 599 600 601 602 603 604
		[ilog2(VM_LOCKED)]	= "lo",
		[ilog2(VM_IO)]		= "io",
		[ilog2(VM_SEQ_READ)]	= "sr",
		[ilog2(VM_RAND_READ)]	= "rr",
		[ilog2(VM_DONTCOPY)]	= "dc",
		[ilog2(VM_DONTEXPAND)]	= "de",
		[ilog2(VM_ACCOUNT)]	= "ac",
		[ilog2(VM_NORESERVE)]	= "nr",
		[ilog2(VM_HUGETLB)]	= "ht",
		[ilog2(VM_ARCH_1)]	= "ar",
		[ilog2(VM_DONTDUMP)]	= "dd",
605 606 607
#ifdef CONFIG_MEM_SOFT_DIRTY
		[ilog2(VM_SOFTDIRTY)]	= "sd",
#endif
608 609 610 611
		[ilog2(VM_MIXEDMAP)]	= "mm",
		[ilog2(VM_HUGEPAGE)]	= "hg",
		[ilog2(VM_NOHUGEPAGE)]	= "nh",
		[ilog2(VM_MERGEABLE)]	= "mg",
612 613
		[ilog2(VM_UFFD_MISSING)]= "um",
		[ilog2(VM_UFFD_WP)]	= "uw",
614 615 616 617 618 619 620 621 622 623 624 625 626
	};
	size_t i;

	seq_puts(m, "VmFlags: ");
	for (i = 0; i < BITS_PER_LONG; i++) {
		if (vma->vm_flags & (1UL << i)) {
			seq_printf(m, "%c%c ",
				   mnemonics[i][0], mnemonics[i][1]);
		}
	}
	seq_putc(m, '\n');
}

627
static int show_smap(struct seq_file *m, void *v, int is_pid)
M
Mauricio Lin 已提交
628 629 630
{
	struct vm_area_struct *vma = v;
	struct mem_size_stats mss;
D
Dave Hansen 已提交
631 632 633 634 635
	struct mm_walk smaps_walk = {
		.pmd_entry = smaps_pte_range,
		.mm = vma->vm_mm,
		.private = &mss,
	};
M
Mauricio Lin 已提交
636 637

	memset(&mss, 0, sizeof mss);
638
	/* mmap_sem is held in m_start */
639
	walk_page_vma(vma, &smaps_walk);
640

641
	show_map_vma(m, vma, is_pid);
642 643 644 645 646 647 648 649 650

	seq_printf(m,
		   "Size:           %8lu kB\n"
		   "Rss:            %8lu kB\n"
		   "Pss:            %8lu kB\n"
		   "Shared_Clean:   %8lu kB\n"
		   "Shared_Dirty:   %8lu kB\n"
		   "Private_Clean:  %8lu kB\n"
		   "Private_Dirty:  %8lu kB\n"
P
Peter Zijlstra 已提交
651
		   "Referenced:     %8lu kB\n"
652
		   "Anonymous:      %8lu kB\n"
653
		   "AnonHugePages:  %8lu kB\n"
654
		   "Swap:           %8lu kB\n"
655
		   "SwapPss:        %8lu kB\n"
656
		   "KernelPageSize: %8lu kB\n"
657 658
		   "MMUPageSize:    %8lu kB\n"
		   "Locked:         %8lu kB\n",
659 660 661 662 663 664 665
		   (vma->vm_end - vma->vm_start) >> 10,
		   mss.resident >> 10,
		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
		   mss.shared_clean  >> 10,
		   mss.shared_dirty  >> 10,
		   mss.private_clean >> 10,
		   mss.private_dirty >> 10,
P
Peter Zijlstra 已提交
666
		   mss.referenced >> 10,
667
		   mss.anonymous >> 10,
668
		   mss.anonymous_thp >> 10,
669
		   mss.swap >> 10,
670
		   (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
671
		   vma_kernel_pagesize(vma) >> 10,
672 673 674
		   vma_mmu_pagesize(vma) >> 10,
		   (vma->vm_flags & VM_LOCKED) ?
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
675

676
	show_smap_vma_flags(m, vma);
677
	m_cache_vma(m, vma);
678
	return 0;
M
Mauricio Lin 已提交
679 680
}

681 682 683 684 685 686 687 688 689 690
static int show_pid_smap(struct seq_file *m, void *v)
{
	return show_smap(m, v, 1);
}

static int show_tid_smap(struct seq_file *m, void *v)
{
	return show_smap(m, v, 0);
}

691
static const struct seq_operations proc_pid_smaps_op = {
692 693 694
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
695 696 697 698 699 700 701 702
	.show	= show_pid_smap
};

static const struct seq_operations proc_tid_smaps_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_tid_smap
703 704
};

705
static int pid_smaps_open(struct inode *inode, struct file *file)
706 707 708 709
{
	return do_maps_open(inode, file, &proc_pid_smaps_op);
}

710 711 712 713 714 715 716 717 718
static int tid_smaps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_tid_smaps_op);
}

const struct file_operations proc_pid_smaps_operations = {
	.open		= pid_smaps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
719
	.release	= proc_map_release,
720 721 722 723
};

const struct file_operations proc_tid_smaps_operations = {
	.open		= tid_smaps_open,
724 725
	.read		= seq_read,
	.llseek		= seq_lseek,
726
	.release	= proc_map_release,
727 728
};

729 730 731 732
enum clear_refs_types {
	CLEAR_REFS_ALL = 1,
	CLEAR_REFS_ANON,
	CLEAR_REFS_MAPPED,
733
	CLEAR_REFS_SOFT_DIRTY,
734
	CLEAR_REFS_MM_HIWATER_RSS,
735 736 737
	CLEAR_REFS_LAST,
};

738
struct clear_refs_private {
739
	enum clear_refs_types type;
740 741
};

742
#ifdef CONFIG_MEM_SOFT_DIRTY
743 744 745 746 747 748 749 750 751 752
static inline void clear_soft_dirty(struct vm_area_struct *vma,
		unsigned long addr, pte_t *pte)
{
	/*
	 * The soft-dirty tracker uses #PF-s to catch writes
	 * to pages, so write-protect the pte as well. See the
	 * Documentation/vm/soft-dirty.txt for full description
	 * of how soft-dirty works.
	 */
	pte_t ptent = *pte;
753 754 755 756 757 758 759 760

	if (pte_present(ptent)) {
		ptent = pte_wrprotect(ptent);
		ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
	} else if (is_swap_pte(ptent)) {
		ptent = pte_swp_clear_soft_dirty(ptent);
	}

761 762 763
	set_pte_at(vma->vm_mm, addr, pte, ptent);
}

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmdp)
{
	pmd_t pmd = *pmdp;

	pmd = pmd_wrprotect(pmd);
	pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);

	if (vma->vm_flags & VM_SOFTDIRTY)
		vma->vm_flags &= ~VM_SOFTDIRTY;

	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
}

#else

static inline void clear_soft_dirty(struct vm_area_struct *vma,
		unsigned long addr, pte_t *pte)
{
}

static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmdp)
{
}
#endif

791
static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
D
Dave Hansen 已提交
792
				unsigned long end, struct mm_walk *walk)
793
{
794
	struct clear_refs_private *cp = walk->private;
795
	struct vm_area_struct *vma = walk->vma;
796 797 798 799
	pte_t *pte, ptent;
	spinlock_t *ptl;
	struct page *page;

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
			clear_soft_dirty_pmd(vma, addr, pmd);
			goto out;
		}

		page = pmd_page(*pmd);

		/* Clear accessed and referenced bits. */
		pmdp_test_and_clear_young(vma, addr, pmd);
		ClearPageReferenced(page);
out:
		spin_unlock(ptl);
		return 0;
	}

816 817
	if (pmd_trans_unstable(pmd))
		return 0;
818

819 820 821 822
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE) {
		ptent = *pte;

823 824 825 826 827
		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
			clear_soft_dirty(vma, addr, pte);
			continue;
		}

828 829 830
		if (!pte_present(ptent))
			continue;

831 832 833 834 835 836 837 838 839 840 841 842 843
		page = vm_normal_page(vma, addr, ptent);
		if (!page)
			continue;

		/* Clear accessed and referenced bits. */
		ptep_test_and_clear_young(vma, addr, pte);
		ClearPageReferenced(page);
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
	return 0;
}

844 845 846 847 848 849
static int clear_refs_test_walk(unsigned long start, unsigned long end,
				struct mm_walk *walk)
{
	struct clear_refs_private *cp = walk->private;
	struct vm_area_struct *vma = walk->vma;

850 851 852
	if (vma->vm_flags & VM_PFNMAP)
		return 1;

853 854 855 856 857 858 859 860 861 862 863 864 865
	/*
	 * Writing 1 to /proc/pid/clear_refs affects all pages.
	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
	 * Writing 4 to /proc/pid/clear_refs affects all pages.
	 */
	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
		return 1;
	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
		return 1;
	return 0;
}

866 867
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
				size_t count, loff_t *ppos)
868
{
869
	struct task_struct *task;
870
	char buffer[PROC_NUMBUF];
871
	struct mm_struct *mm;
872
	struct vm_area_struct *vma;
873 874
	enum clear_refs_types type;
	int itype;
A
Alexey Dobriyan 已提交
875
	int rv;
876

877 878 879 880 881
	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count))
		return -EFAULT;
882
	rv = kstrtoint(strstrip(buffer), 10, &itype);
A
Alexey Dobriyan 已提交
883 884
	if (rv < 0)
		return rv;
885 886
	type = (enum clear_refs_types)itype;
	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
887
		return -EINVAL;
888

A
Al Viro 已提交
889
	task = get_proc_task(file_inode(file));
890 891 892 893
	if (!task)
		return -ESRCH;
	mm = get_task_mm(task);
	if (mm) {
894
		struct clear_refs_private cp = {
895
			.type = type,
896
		};
897 898
		struct mm_walk clear_refs_walk = {
			.pmd_entry = clear_refs_pte_range,
899
			.test_walk = clear_refs_test_walk,
900
			.mm = mm,
901
			.private = &cp,
902
		};
903 904 905 906 907 908 909 910 911 912 913 914

		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
			/*
			 * Writing 5 to /proc/pid/clear_refs resets the peak
			 * resident set size to this mm's current rss value.
			 */
			down_write(&mm->mmap_sem);
			reset_mm_hiwater_rss(mm);
			up_write(&mm->mmap_sem);
			goto out_mm;
		}

915
		down_read(&mm->mmap_sem);
916 917 918 919 920 921 922 923 924 925 926 927 928
		if (type == CLEAR_REFS_SOFT_DIRTY) {
			for (vma = mm->mmap; vma; vma = vma->vm_next) {
				if (!(vma->vm_flags & VM_SOFTDIRTY))
					continue;
				up_read(&mm->mmap_sem);
				down_write(&mm->mmap_sem);
				for (vma = mm->mmap; vma; vma = vma->vm_next) {
					vma->vm_flags &= ~VM_SOFTDIRTY;
					vma_set_page_prot(vma);
				}
				downgrade_write(&mm->mmap_sem);
				break;
			}
929
			mmu_notifier_invalidate_range_start(mm, 0, -1);
930
		}
931
		walk_page_range(0, ~0UL, &clear_refs_walk);
932 933
		if (type == CLEAR_REFS_SOFT_DIRTY)
			mmu_notifier_invalidate_range_end(mm, 0, -1);
934 935
		flush_tlb_mm(mm);
		up_read(&mm->mmap_sem);
936
out_mm:
937 938 939
		mmput(mm);
	}
	put_task_struct(task);
940 941

	return count;
942 943
}

944 945
const struct file_operations proc_clear_refs_operations = {
	.write		= clear_refs_write,
946
	.llseek		= noop_llseek,
947 948
};

949 950 951 952
typedef struct {
	u64 pme;
} pagemap_entry_t;

953
struct pagemapread {
954
	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
955
	pagemap_entry_t *buffer;
956
	bool show_pfn;
957 958
};

959 960 961
#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
#define PAGEMAP_WALK_MASK	(PMD_MASK)

962 963 964 965
#define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
#define PM_PFRAME_BITS		55
#define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
#define PM_SOFT_DIRTY		BIT_ULL(55)
966
#define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
967 968 969 970
#define PM_FILE			BIT_ULL(61)
#define PM_SWAP			BIT_ULL(62)
#define PM_PRESENT		BIT_ULL(63)

971 972
#define PM_END_OF_BUFFER    1

973
static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
974
{
975
	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
976 977 978
}

static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
979 980
			  struct pagemapread *pm)
{
981
	pm->buffer[pm->pos++] = *pme;
982
	if (pm->pos >= pm->len)
983
		return PM_END_OF_BUFFER;
984 985 986 987
	return 0;
}

static int pagemap_pte_hole(unsigned long start, unsigned long end,
D
Dave Hansen 已提交
988
				struct mm_walk *walk)
989
{
D
Dave Hansen 已提交
990
	struct pagemapread *pm = walk->private;
991
	unsigned long addr = start;
992
	int err = 0;
993

994 995
	while (addr < end) {
		struct vm_area_struct *vma = find_vma(walk->mm, addr);
996
		pagemap_entry_t pme = make_pme(0, 0);
997 998
		/* End of address space hole, which we mark as non-present. */
		unsigned long hole_end;
999

1000 1001 1002 1003 1004 1005 1006 1007 1008
		if (vma)
			hole_end = min(end, vma->vm_start);
		else
			hole_end = end;

		for (; addr < hole_end; addr += PAGE_SIZE) {
			err = add_to_pagemap(addr, &pme, pm);
			if (err)
				goto out;
1009 1010
		}

1011 1012 1013 1014 1015
		if (!vma)
			break;

		/* Addresses in the VMA. */
		if (vma->vm_flags & VM_SOFTDIRTY)
1016
			pme = make_pme(0, PM_SOFT_DIRTY);
1017
		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1018 1019 1020 1021
			err = add_to_pagemap(addr, &pme, pm);
			if (err)
				goto out;
		}
1022
	}
1023
out:
1024 1025 1026
	return err;
}

1027
static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1028
		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1029
{
1030
	u64 frame = 0, flags = 0;
1031
	struct page *page = NULL;
1032

1033
	if (pte_present(pte)) {
1034 1035
		if (pm->show_pfn)
			frame = pte_pfn(pte);
1036
		flags |= PM_PRESENT;
1037
		page = vm_normal_page(vma, addr, pte);
1038
		if (pte_soft_dirty(pte))
1039
			flags |= PM_SOFT_DIRTY;
1040
	} else if (is_swap_pte(pte)) {
1041 1042
		swp_entry_t entry;
		if (pte_swp_soft_dirty(pte))
1043
			flags |= PM_SOFT_DIRTY;
1044
		entry = pte_to_swp_entry(pte);
1045 1046
		frame = swp_type(entry) |
			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1047
		flags |= PM_SWAP;
1048 1049 1050 1051 1052 1053
		if (is_migration_entry(entry))
			page = migration_entry_to_page(entry);
	}

	if (page && !PageAnon(page))
		flags |= PM_FILE;
1054 1055
	if (page && page_mapcount(page) == 1)
		flags |= PM_MMAP_EXCLUSIVE;
1056 1057
	if (vma->vm_flags & VM_SOFTDIRTY)
		flags |= PM_SOFT_DIRTY;
1058

1059
	return make_pme(frame, flags);
1060 1061
}

1062
static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
1063
			     struct mm_walk *walk)
1064
{
1065
	struct vm_area_struct *vma = walk->vma;
D
Dave Hansen 已提交
1066
	struct pagemapread *pm = walk->private;
1067
	spinlock_t *ptl;
1068
	pte_t *pte, *orig_pte;
1069 1070
	int err = 0;

1071 1072 1073 1074
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) {
		u64 flags = 0, frame = 0;
		pmd_t pmd = *pmdp;
1075

1076
		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
1077
			flags |= PM_SOFT_DIRTY;
1078

1079 1080 1081 1082 1083 1084 1085
		/*
		 * Currently pmd for thp is always present because thp
		 * can not be swapped-out, migrated, or HWPOISONed
		 * (split in such cases instead.)
		 * This if-check is just to prepare for future implementation.
		 */
		if (pmd_present(pmd)) {
1086 1087 1088 1089 1090
			struct page *page = pmd_page(pmd);

			if (page_mapcount(page) == 1)
				flags |= PM_MMAP_EXCLUSIVE;

1091
			flags |= PM_PRESENT;
1092 1093 1094
			if (pm->show_pfn)
				frame = pmd_pfn(pmd) +
					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1095 1096
		}

1097
		for (; addr != end; addr += PAGE_SIZE) {
1098
			pagemap_entry_t pme = make_pme(frame, flags);
1099

1100
			err = add_to_pagemap(addr, &pme, pm);
1101 1102
			if (err)
				break;
1103
			if (pm->show_pfn && (flags & PM_PRESENT))
1104
				frame++;
1105
		}
1106
		spin_unlock(ptl);
1107
		return err;
1108 1109
	}

1110
	if (pmd_trans_unstable(pmdp))
1111
		return 0;
1112
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1113

1114 1115 1116 1117
	/*
	 * We can assume that @vma always points to a valid one and @end never
	 * goes beyond vma->vm_end.
	 */
1118
	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1119 1120
	for (; addr < end; pte++, addr += PAGE_SIZE) {
		pagemap_entry_t pme;
1121

1122
		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1123
		err = add_to_pagemap(addr, &pme, pm);
1124
		if (err)
1125
			break;
1126
	}
1127
	pte_unmap_unlock(orig_pte, ptl);
1128 1129 1130 1131 1132 1133

	cond_resched();

	return err;
}

1134
#ifdef CONFIG_HUGETLB_PAGE
1135
/* This function walks within one hugetlb entry in the single call */
1136
static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1137 1138
				 unsigned long addr, unsigned long end,
				 struct mm_walk *walk)
1139 1140
{
	struct pagemapread *pm = walk->private;
1141
	struct vm_area_struct *vma = walk->vma;
1142
	u64 flags = 0, frame = 0;
1143
	int err = 0;
1144
	pte_t pte;
1145

1146
	if (vma->vm_flags & VM_SOFTDIRTY)
1147
		flags |= PM_SOFT_DIRTY;
1148

1149 1150 1151 1152 1153 1154 1155
	pte = huge_ptep_get(ptep);
	if (pte_present(pte)) {
		struct page *page = pte_page(pte);

		if (!PageAnon(page))
			flags |= PM_FILE;

1156 1157 1158
		if (page_mapcount(page) == 1)
			flags |= PM_MMAP_EXCLUSIVE;

1159
		flags |= PM_PRESENT;
1160 1161 1162
		if (pm->show_pfn)
			frame = pte_pfn(pte) +
				((addr & ~hmask) >> PAGE_SHIFT);
1163 1164
	}

1165
	for (; addr != end; addr += PAGE_SIZE) {
1166 1167
		pagemap_entry_t pme = make_pme(frame, flags);

1168
		err = add_to_pagemap(addr, &pme, pm);
1169 1170
		if (err)
			return err;
1171
		if (pm->show_pfn && (flags & PM_PRESENT))
1172
			frame++;
1173 1174 1175 1176 1177 1178
	}

	cond_resched();

	return err;
}
1179
#endif /* HUGETLB_PAGE */
1180

1181 1182 1183
/*
 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 *
1184 1185 1186
 * For each page in the address space, this file contains one 64-bit entry
 * consisting of the following:
 *
1187
 * Bits 0-54  page frame number (PFN) if present
1188
 * Bits 0-4   swap type if swapped
1189
 * Bits 5-54  swap offset if swapped
1190
 * Bit  55    pte is soft-dirty (see Documentation/vm/soft-dirty.txt)
1191 1192
 * Bit  56    page exclusively mapped
 * Bits 57-60 zero
1193
 * Bit  61    page is file-page or shared-anon
1194 1195 1196 1197 1198 1199
 * Bit  62    page swapped
 * Bit  63    page present
 *
 * If the page is not present but in swap, then the PFN contains an
 * encoding of the swap file number and the page's offset into the
 * swap. Unmapped pages return a null PFN. This allows determining
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
 * precisely which pages are mapped (or in swap) and comparing mapped
 * pages between processes.
 *
 * Efficient users of this interface will use /proc/pid/maps to
 * determine which areas of memory are actually mapped and llseek to
 * skip over unmapped regions.
 */
static ssize_t pagemap_read(struct file *file, char __user *buf,
			    size_t count, loff_t *ppos)
{
1210
	struct mm_struct *mm = file->private_data;
1211
	struct pagemapread pm;
1212
	struct mm_walk pagemap_walk = {};
1213 1214 1215 1216
	unsigned long src;
	unsigned long svpfn;
	unsigned long start_vaddr;
	unsigned long end_vaddr;
1217
	int ret = 0, copied = 0;
1218

1219
	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
1220 1221 1222 1223
		goto out;

	ret = -EINVAL;
	/* file position must be aligned */
1224
	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1225
		goto out_mm;
1226 1227

	ret = 0;
1228
	if (!count)
1229
		goto out_mm;
1230

1231 1232 1233
	/* do not disclose physical addresses: attack vector */
	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);

1234 1235
	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1236
	ret = -ENOMEM;
1237
	if (!pm.buffer)
1238
		goto out_mm;
1239

1240
	pagemap_walk.pmd_entry = pagemap_pmd_range;
1241
	pagemap_walk.pte_hole = pagemap_pte_hole;
1242
#ifdef CONFIG_HUGETLB_PAGE
1243
	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1244
#endif
1245 1246 1247 1248 1249 1250
	pagemap_walk.mm = mm;
	pagemap_walk.private = &pm;

	src = *ppos;
	svpfn = src / PM_ENTRY_BYTES;
	start_vaddr = svpfn << PAGE_SHIFT;
1251
	end_vaddr = mm->task_size;
1252 1253

	/* watch out for wraparound */
1254
	if (svpfn > mm->task_size >> PAGE_SHIFT)
1255 1256 1257 1258 1259 1260 1261 1262
		start_vaddr = end_vaddr;

	/*
	 * The odds are that this will stop walking way
	 * before end_vaddr, because the length of the
	 * user buffer is tracked in "pm", and the walk
	 * will stop when we hit the end of the buffer.
	 */
1263 1264 1265 1266 1267 1268
	ret = 0;
	while (count && (start_vaddr < end_vaddr)) {
		int len;
		unsigned long end;

		pm.pos = 0;
1269
		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1270 1271 1272 1273 1274 1275 1276 1277 1278
		/* overflow ? */
		if (end < start_vaddr || end > end_vaddr)
			end = end_vaddr;
		down_read(&mm->mmap_sem);
		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
		up_read(&mm->mmap_sem);
		start_vaddr = end;

		len = min(count, PM_ENTRY_BYTES * pm.pos);
1279
		if (copy_to_user(buf, pm.buffer, len)) {
1280
			ret = -EFAULT;
1281
			goto out_free;
1282 1283 1284 1285
		}
		copied += len;
		buf += len;
		count -= len;
1286
	}
1287 1288 1289 1290
	*ppos += copied;
	if (!ret || ret == PM_END_OF_BUFFER)
		ret = copied;

1291 1292
out_free:
	kfree(pm.buffer);
1293 1294
out_mm:
	mmput(mm);
1295 1296 1297 1298
out:
	return ret;
}

1299 1300
static int pagemap_open(struct inode *inode, struct file *file)
{
1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	struct mm_struct *mm;

	mm = proc_mem_open(inode, PTRACE_MODE_READ);
	if (IS_ERR(mm))
		return PTR_ERR(mm);
	file->private_data = mm;
	return 0;
}

static int pagemap_release(struct inode *inode, struct file *file)
{
	struct mm_struct *mm = file->private_data;

	if (mm)
		mmdrop(mm);
1316 1317 1318
	return 0;
}

1319 1320 1321
const struct file_operations proc_pagemap_operations = {
	.llseek		= mem_lseek, /* borrow this */
	.read		= pagemap_read,
1322
	.open		= pagemap_open,
1323
	.release	= pagemap_release,
1324
};
1325
#endif /* CONFIG_PROC_PAGE_MONITOR */
1326

1327 1328
#ifdef CONFIG_NUMA

1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339
struct numa_maps {
	unsigned long pages;
	unsigned long anon;
	unsigned long active;
	unsigned long writeback;
	unsigned long mapcount_max;
	unsigned long dirty;
	unsigned long swapcache;
	unsigned long node[MAX_NUMNODES];
};

1340 1341 1342 1343 1344
struct numa_maps_private {
	struct proc_maps_private proc_maps;
	struct numa_maps md;
};

1345 1346
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
			unsigned long nr_pages)
1347 1348 1349
{
	int count = page_mapcount(page);

1350
	md->pages += nr_pages;
1351
	if (pte_dirty || PageDirty(page))
1352
		md->dirty += nr_pages;
1353 1354

	if (PageSwapCache(page))
1355
		md->swapcache += nr_pages;
1356 1357

	if (PageActive(page) || PageUnevictable(page))
1358
		md->active += nr_pages;
1359 1360

	if (PageWriteback(page))
1361
		md->writeback += nr_pages;
1362 1363

	if (PageAnon(page))
1364
		md->anon += nr_pages;
1365 1366 1367 1368

	if (count > md->mapcount_max)
		md->mapcount_max = count;

1369
	md->node[page_to_nid(page)] += nr_pages;
1370 1371
}

1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
		unsigned long addr)
{
	struct page *page;
	int nid;

	if (!pte_present(pte))
		return NULL;

	page = vm_normal_page(vma, addr, pte);
	if (!page)
		return NULL;

	if (PageReserved(page))
		return NULL;

	nid = page_to_nid(page);
1389
	if (!node_isset(nid, node_states[N_MEMORY]))
1390 1391 1392 1393 1394
		return NULL;

	return page;
}

1395 1396 1397
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
		unsigned long end, struct mm_walk *walk)
{
1398 1399
	struct numa_maps *md = walk->private;
	struct vm_area_struct *vma = walk->vma;
1400 1401 1402 1403
	spinlock_t *ptl;
	pte_t *orig_pte;
	pte_t *pte;

1404
	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1405 1406 1407
		pte_t huge_pte = *(pte_t *)pmd;
		struct page *page;

1408
		page = can_gather_numa_stats(huge_pte, vma, addr);
1409 1410 1411
		if (page)
			gather_stats(page, md, pte_dirty(huge_pte),
				     HPAGE_PMD_SIZE/PAGE_SIZE);
1412
		spin_unlock(ptl);
1413
		return 0;
1414 1415
	}

1416 1417
	if (pmd_trans_unstable(pmd))
		return 0;
1418 1419
	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
	do {
1420
		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1421 1422
		if (!page)
			continue;
1423
		gather_stats(page, md, pte_dirty(*pte), 1);
1424 1425 1426 1427 1428 1429

	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap_unlock(orig_pte, ptl);
	return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
1430
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1431 1432 1433 1434 1435
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	struct page *page;

1436
	if (!pte_present(*pte))
1437 1438 1439 1440 1441 1442 1443
		return 0;

	page = pte_page(*pte);
	if (!page)
		return 0;

	md = walk->private;
1444
	gather_stats(page, md, pte_dirty(*pte), 1);
1445 1446 1447 1448
	return 0;
}

#else
1449
static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1450 1451 1452 1453 1454 1455 1456 1457 1458
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	return 0;
}
#endif

/*
 * Display pages allocated per node and memory policy via /proc.
 */
1459
static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1460
{
1461 1462
	struct numa_maps_private *numa_priv = m->private;
	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1463
	struct vm_area_struct *vma = v;
1464
	struct numa_maps *md = &numa_priv->md;
1465 1466
	struct file *file = vma->vm_file;
	struct mm_struct *mm = vma->vm_mm;
1467 1468 1469 1470 1471 1472
	struct mm_walk walk = {
		.hugetlb_entry = gather_hugetlb_stats,
		.pmd_entry = gather_pte_stats,
		.private = md,
		.mm = mm,
	};
1473
	struct mempolicy *pol;
1474 1475
	char buffer[64];
	int nid;
1476 1477 1478 1479

	if (!mm)
		return 0;

1480 1481
	/* Ensure we start with an empty set of numa_maps statistics. */
	memset(md, 0, sizeof(*md));
1482

1483 1484 1485 1486 1487 1488 1489
	pol = __get_vma_policy(vma, vma->vm_start);
	if (pol) {
		mpol_to_str(buffer, sizeof(buffer), pol);
		mpol_cond_put(pol);
	} else {
		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
	}
1490 1491 1492 1493

	seq_printf(m, "%08lx %s", vma->vm_start, buffer);

	if (file) {
1494
		seq_puts(m, " file=");
M
Miklos Szeredi 已提交
1495
		seq_file_path(m, file, "\n\t= ");
1496
	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1497
		seq_puts(m, " heap");
1498
	} else {
1499
		pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
1500 1501 1502 1503 1504 1505 1506
		if (tid != 0) {
			/*
			 * Thread stack in /proc/PID/task/TID/maps or
			 * the main process stack.
			 */
			if (!is_pid || (vma->vm_start <= mm->start_stack &&
			    vma->vm_end >= mm->start_stack))
1507
				seq_puts(m, " stack");
1508 1509 1510
			else
				seq_printf(m, " stack:%d", tid);
		}
1511 1512
	}

1513
	if (is_vm_hugetlb_page(vma))
1514
		seq_puts(m, " huge");
1515

1516 1517
	/* mmap_sem is held by m_start */
	walk_page_vma(vma, &walk);
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542

	if (!md->pages)
		goto out;

	if (md->anon)
		seq_printf(m, " anon=%lu", md->anon);

	if (md->dirty)
		seq_printf(m, " dirty=%lu", md->dirty);

	if (md->pages != md->anon && md->pages != md->dirty)
		seq_printf(m, " mapped=%lu", md->pages);

	if (md->mapcount_max > 1)
		seq_printf(m, " mapmax=%lu", md->mapcount_max);

	if (md->swapcache)
		seq_printf(m, " swapcache=%lu", md->swapcache);

	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
		seq_printf(m, " active=%lu", md->active);

	if (md->writeback)
		seq_printf(m, " writeback=%lu", md->writeback);

1543 1544 1545
	for_each_node_state(nid, N_MEMORY)
		if (md->node[nid])
			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1546 1547

	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
1548 1549
out:
	seq_putc(m, '\n');
1550
	m_cache_vma(m, vma);
1551 1552
	return 0;
}
1553

1554 1555 1556 1557 1558 1559 1560 1561 1562 1563
static int show_pid_numa_map(struct seq_file *m, void *v)
{
	return show_numa_map(m, v, 1);
}

static int show_tid_numa_map(struct seq_file *m, void *v)
{
	return show_numa_map(m, v, 0);
}

1564
static const struct seq_operations proc_pid_numa_maps_op = {
1565 1566 1567 1568
	.start  = m_start,
	.next   = m_next,
	.stop   = m_stop,
	.show   = show_pid_numa_map,
1569
};
1570

1571 1572 1573 1574 1575 1576 1577 1578 1579
static const struct seq_operations proc_tid_numa_maps_op = {
	.start  = m_start,
	.next   = m_next,
	.stop   = m_stop,
	.show   = show_tid_numa_map,
};

static int numa_maps_open(struct inode *inode, struct file *file,
			  const struct seq_operations *ops)
1580
{
1581 1582
	return proc_maps_open(inode, file, ops,
				sizeof(struct numa_maps_private));
1583 1584
}

1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
}

static int tid_numa_maps_open(struct inode *inode, struct file *file)
{
	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
}

const struct file_operations proc_pid_numa_maps_operations = {
	.open		= pid_numa_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
1599
	.release	= proc_map_release,
1600 1601 1602 1603
};

const struct file_operations proc_tid_numa_maps_operations = {
	.open		= tid_numa_maps_open,
1604 1605
	.read		= seq_read,
	.llseek		= seq_lseek,
1606
	.release	= proc_map_release,
1607
};
1608
#endif /* CONFIG_NUMA */