task_mmu.c 31.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2
#include <linux/mm.h>
#include <linux/hugetlb.h>
3
#include <linux/huge_mm.h>
L
Linus Torvalds 已提交
4 5
#include <linux/mount.h>
#include <linux/seq_file.h>
M
Mauricio Lin 已提交
6
#include <linux/highmem.h>
K
Kees Cook 已提交
7
#include <linux/ptrace.h>
8
#include <linux/slab.h>
9 10
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
11
#include <linux/rmap.h>
12 13
#include <linux/swap.h>
#include <linux/swapops.h>
M
Mauricio Lin 已提交
14

L
Linus Torvalds 已提交
15 16
#include <asm/elf.h>
#include <asm/uaccess.h>
M
Mauricio Lin 已提交
17
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
18 19
#include "internal.h"

20
void task_mem(struct seq_file *m, struct mm_struct *mm)
L
Linus Torvalds 已提交
21
{
K
KAMEZAWA Hiroyuki 已提交
22
	unsigned long data, text, lib, swap;
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

	/*
	 * Note: to minimize their overhead, mm maintains hiwater_vm and
	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
	 * collector of these hiwater stats must therefore get total_vm
	 * and rss too, which will usually be the higher.  Barriers? not
	 * worth the effort, such snapshots can always be inconsistent.
	 */
	hiwater_vm = total_vm = mm->total_vm;
	if (hiwater_vm < mm->hiwater_vm)
		hiwater_vm = mm->hiwater_vm;
	hiwater_rss = total_rss = get_mm_rss(mm);
	if (hiwater_rss < mm->hiwater_rss)
		hiwater_rss = mm->hiwater_rss;
L
Linus Torvalds 已提交
38 39 40 41

	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
K
KAMEZAWA Hiroyuki 已提交
42
	swap = get_mm_counter(mm, MM_SWAPENTS);
43
	seq_printf(m,
44
		"VmPeak:\t%8lu kB\n"
L
Linus Torvalds 已提交
45 46
		"VmSize:\t%8lu kB\n"
		"VmLck:\t%8lu kB\n"
47
		"VmPin:\t%8lu kB\n"
48
		"VmHWM:\t%8lu kB\n"
L
Linus Torvalds 已提交
49 50 51 52 53
		"VmRSS:\t%8lu kB\n"
		"VmData:\t%8lu kB\n"
		"VmStk:\t%8lu kB\n"
		"VmExe:\t%8lu kB\n"
		"VmLib:\t%8lu kB\n"
K
KAMEZAWA Hiroyuki 已提交
54 55
		"VmPTE:\t%8lu kB\n"
		"VmSwap:\t%8lu kB\n",
56
		hiwater_vm << (PAGE_SHIFT-10),
57
		total_vm << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
58
		mm->locked_vm << (PAGE_SHIFT-10),
59
		mm->pinned_vm << (PAGE_SHIFT-10),
60 61
		hiwater_rss << (PAGE_SHIFT-10),
		total_rss << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
62 63
		data << (PAGE_SHIFT-10),
		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
K
KAMEZAWA Hiroyuki 已提交
64 65
		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
		swap << (PAGE_SHIFT-10));
L
Linus Torvalds 已提交
66 67 68 69 70 71 72
}

unsigned long task_vsize(struct mm_struct *mm)
{
	return PAGE_SIZE * mm->total_vm;
}

73 74 75
unsigned long task_statm(struct mm_struct *mm,
			 unsigned long *shared, unsigned long *text,
			 unsigned long *data, unsigned long *resident)
L
Linus Torvalds 已提交
76
{
K
KAMEZAWA Hiroyuki 已提交
77
	*shared = get_mm_counter(mm, MM_FILEPAGES);
L
Linus Torvalds 已提交
78 79 80
	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
								>> PAGE_SHIFT;
	*data = mm->total_vm - mm->shared_vm;
K
KAMEZAWA Hiroyuki 已提交
81
	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89 90 91 92
	return mm->total_vm;
}

static void pad_len_spaces(struct seq_file *m, int len)
{
	len = 25 + sizeof(void*) * 6 - len;
	if (len < 1)
		len = 1;
	seq_printf(m, "%*c", len, ' ');
}

93 94 95 96 97 98 99 100
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
	if (vma && vma != priv->tail_vma) {
		struct mm_struct *mm = vma->vm_mm;
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
}
101

102
static void *m_start(struct seq_file *m, loff_t *pos)
M
Mauricio Lin 已提交
103
{
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	struct proc_maps_private *priv = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *vma, *tail_vma = NULL;
	loff_t l = *pos;

	/* Clear the per syscall fields in priv */
	priv->task = NULL;
	priv->tail_vma = NULL;

	/*
	 * We remember last_addr rather than next_addr to hit with
	 * mmap_cache most of the time. We have zero last_addr at
	 * the beginning and also after lseek. We will have -1 last_addr
	 * after the end of the vmas.
	 */

	if (last_addr == -1UL)
		return NULL;

	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
	if (!priv->task)
A
Al Viro 已提交
126
		return ERR_PTR(-ESRCH);
127

C
Cong Wang 已提交
128
	mm = mm_access(priv->task, PTRACE_MODE_READ);
A
Al Viro 已提交
129 130
	if (!mm || IS_ERR(mm))
		return mm;
131
	down_read(&mm->mmap_sem);
132

133
	tail_vma = get_gate_vma(priv->task->mm);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	priv->tail_vma = tail_vma;

	/* Start with last addr hint */
	vma = find_vma(mm, last_addr);
	if (last_addr && vma) {
		vma = vma->vm_next;
		goto out;
	}

	/*
	 * Check the vma index is within the range and do
	 * sequential scan until m_index.
	 */
	vma = NULL;
	if ((unsigned long)l < mm->map_count) {
		vma = mm->mmap;
		while (l-- && vma)
			vma = vma->vm_next;
		goto out;
	}

	if (l != mm->map_count)
		tail_vma = NULL; /* After gate vma */

out:
	if (vma)
		return vma;

	/* End of vmas has been reached */
	m->version = (tail_vma != NULL)? 0: -1UL;
	up_read(&mm->mmap_sem);
	mmput(mm);
	return tail_vma;
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;
	struct vm_area_struct *tail_vma = priv->tail_vma;

	(*pos)++;
	if (vma && (vma != tail_vma) && vma->vm_next)
		return vma->vm_next;
	vma_stop(priv, vma);
	return (vma != tail_vma)? tail_vma: NULL;
}

static void m_stop(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;

187 188
	if (!IS_ERR(vma))
		vma_stop(priv, vma);
189 190 191 192 193
	if (priv->task)
		put_task_struct(priv->task);
}

static int do_maps_open(struct inode *inode, struct file *file,
194
			const struct seq_operations *ops)
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
{
	struct proc_maps_private *priv;
	int ret = -ENOMEM;
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv) {
		priv->pid = proc_pid(inode);
		ret = seq_open(file, ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = priv;
		} else {
			kfree(priv);
		}
	}
	return ret;
}
M
Mauricio Lin 已提交
211

212 213
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
L
Linus Torvalds 已提交
214
{
M
Mauricio Lin 已提交
215 216
	struct mm_struct *mm = vma->vm_mm;
	struct file *file = vma->vm_file;
217 218
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;
219
	vm_flags_t flags = vma->vm_flags;
L
Linus Torvalds 已提交
220
	unsigned long ino = 0;
221
	unsigned long long pgoff = 0;
222
	unsigned long start, end;
L
Linus Torvalds 已提交
223 224
	dev_t dev = 0;
	int len;
225
	const char *name = NULL;
L
Linus Torvalds 已提交
226 227

	if (file) {
228
		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
L
Linus Torvalds 已提交
229 230
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
231
		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
L
Linus Torvalds 已提交
232 233
	}

234 235
	/* We don't show the stack guard page in /proc/maps */
	start = vma->vm_start;
236 237 238 239 240
	if (stack_guard_page_start(vma, start))
		start += PAGE_SIZE;
	end = vma->vm_end;
	if (stack_guard_page_end(vma, end))
		end -= PAGE_SIZE;
241

242
	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
243
			start,
244
			end,
L
Linus Torvalds 已提交
245 246 247 248
			flags & VM_READ ? 'r' : '-',
			flags & VM_WRITE ? 'w' : '-',
			flags & VM_EXEC ? 'x' : '-',
			flags & VM_MAYSHARE ? 's' : 'p',
249
			pgoff,
L
Linus Torvalds 已提交
250 251 252 253 254 255
			MAJOR(dev), MINOR(dev), ino, &len);

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
M
Mauricio Lin 已提交
256
	if (file) {
L
Linus Torvalds 已提交
257
		pad_len_spaces(m, len);
258
		seq_path(m, &file->f_path, "\n");
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
		goto done;
	}

	name = arch_vma_name(vma);
	if (!name) {
		pid_t tid;

		if (!mm) {
			name = "[vdso]";
			goto done;
		}

		if (vma->vm_start <= mm->brk &&
		    vma->vm_end >= mm->start_brk) {
			name = "[heap]";
			goto done;
		}

		tid = vm_is_stack(task, vma, is_pid);

		if (tid != 0) {
			/*
			 * Thread stack in /proc/PID/task/TID/maps or
			 * the main process stack.
			 */
			if (!is_pid || (vma->vm_start <= mm->start_stack &&
			    vma->vm_end >= mm->start_stack)) {
				name = "[stack]";
287
			} else {
288 289 290
				/* Thread stack in /proc/PID/maps */
				pad_len_spaces(m, len);
				seq_printf(m, "[stack:%d]", tid);
L
Linus Torvalds 已提交
291
			}
292
		}
293 294 295 296 297 298
	}

done:
	if (name) {
		pad_len_spaces(m, len);
		seq_puts(m, name);
L
Linus Torvalds 已提交
299 300
	}
	seq_putc(m, '\n');
301 302
}

303
static int show_map(struct seq_file *m, void *v, int is_pid)
304 305 306 307 308
{
	struct vm_area_struct *vma = v;
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;

309
	show_map_vma(m, vma, is_pid);
M
Mauricio Lin 已提交
310 311

	if (m->count < m->size)  /* vma is copied successfully */
312 313
		m->version = (vma != get_gate_vma(task->mm))
			? vma->vm_start : 0;
L
Linus Torvalds 已提交
314 315 316
	return 0;
}

317 318 319 320 321 322 323 324 325 326
static int show_pid_map(struct seq_file *m, void *v)
{
	return show_map(m, v, 1);
}

static int show_tid_map(struct seq_file *m, void *v)
{
	return show_map(m, v, 0);
}

327
static const struct seq_operations proc_pid_maps_op = {
328 329 330
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
331 332 333 334 335 336 337 338
	.show	= show_pid_map
};

static const struct seq_operations proc_tid_maps_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_tid_map
339 340
};

341
static int pid_maps_open(struct inode *inode, struct file *file)
342 343 344 345
{
	return do_maps_open(inode, file, &proc_pid_maps_op);
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359
static int tid_maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_tid_maps_op);
}

const struct file_operations proc_pid_maps_operations = {
	.open		= pid_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

const struct file_operations proc_tid_maps_operations = {
	.open		= tid_maps_open,
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

/*
 * Proportional Set Size(PSS): my share of RSS.
 *
 * PSS of a process is the count of pages it has in memory, where each
 * page is divided by the number of processes sharing it.  So if a
 * process has 1000 pages all to itself, and 1000 shared with one other
 * process, its PSS will be 1500.
 *
 * To keep (accumulated) division errors low, we adopt a 64bit
 * fixed-point pss counter to minimize division errors. So (pss >>
 * PSS_SHIFT) would be the real byte count.
 *
 * A shift of 12 before division means (assuming 4K page size):
 * 	- 1M 3-user-pages add up to 8KB errors;
 * 	- supports mapcount up to 2^24, or 16M;
 * 	- supports PSS up to 2^52 bytes, or 4PB.
 */
#define PSS_SHIFT 12

384
#ifdef CONFIG_PROC_PAGE_MONITOR
P
Peter Zijlstra 已提交
385
struct mem_size_stats {
386 387 388 389 390 391 392
	struct vm_area_struct *vma;
	unsigned long resident;
	unsigned long shared_clean;
	unsigned long shared_dirty;
	unsigned long private_clean;
	unsigned long private_dirty;
	unsigned long referenced;
393
	unsigned long anonymous;
394
	unsigned long anonymous_thp;
P
Peter Zijlstra 已提交
395
	unsigned long swap;
396
	unsigned long nonlinear;
397 398 399
	u64 pss;
};

400 401

static void smaps_pte_entry(pte_t ptent, unsigned long addr,
402
		unsigned long ptent_size, struct mm_walk *walk)
403 404 405
{
	struct mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = mss->vma;
406
	pgoff_t pgoff = linear_page_index(vma, addr);
407
	struct page *page = NULL;
408 409
	int mapcount;

410 411 412 413
	if (pte_present(ptent)) {
		page = vm_normal_page(vma, addr, ptent);
	} else if (is_swap_pte(ptent)) {
		swp_entry_t swpent = pte_to_swp_entry(ptent);
414

415 416 417 418
		if (!non_swap_entry(swpent))
			mss->swap += ptent_size;
		else if (is_migration_entry(swpent))
			page = migration_entry_to_page(swpent);
419 420 421
	} else if (pte_file(ptent)) {
		if (pte_to_pgoff(ptent) != pgoff)
			mss->nonlinear += ptent_size;
422
	}
423 424 425 426 427

	if (!page)
		return;

	if (PageAnon(page))
428
		mss->anonymous += ptent_size;
429

430 431 432
	if (page->index != pgoff)
		mss->nonlinear += ptent_size;

433
	mss->resident += ptent_size;
434 435
	/* Accumulate the size in pages that have been accessed. */
	if (pte_young(ptent) || PageReferenced(page))
436
		mss->referenced += ptent_size;
437 438 439
	mapcount = page_mapcount(page);
	if (mapcount >= 2) {
		if (pte_dirty(ptent) || PageDirty(page))
440
			mss->shared_dirty += ptent_size;
441
		else
442 443
			mss->shared_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
444 445
	} else {
		if (pte_dirty(ptent) || PageDirty(page))
446
			mss->private_dirty += ptent_size;
447
		else
448 449
			mss->private_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT);
450 451 452
	}
}

453
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
454
			   struct mm_walk *walk)
M
Mauricio Lin 已提交
455
{
D
Dave Hansen 已提交
456
	struct mem_size_stats *mss = walk->private;
457
	struct vm_area_struct *vma = mss->vma;
458
	pte_t *pte;
459
	spinlock_t *ptl;
M
Mauricio Lin 已提交
460

461 462
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
463
		spin_unlock(&walk->mm->page_table_lock);
464 465
		mss->anonymous_thp += HPAGE_PMD_SIZE;
		return 0;
466
	}
467 468 469

	if (pmd_trans_unstable(pmd))
		return 0;
470 471 472 473 474
	/*
	 * The mmap_sem held all the way back in m_start() is what
	 * keeps khugepaged out of here and from collapsing things
	 * in here.
	 */
475
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
476
	for (; addr != end; pte++, addr += PAGE_SIZE)
477
		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
478 479
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
480
	return 0;
M
Mauricio Lin 已提交
481 482
}

483
static int show_smap(struct seq_file *m, void *v, int is_pid)
M
Mauricio Lin 已提交
484
{
485 486
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;
M
Mauricio Lin 已提交
487 488
	struct vm_area_struct *vma = v;
	struct mem_size_stats mss;
D
Dave Hansen 已提交
489 490 491 492 493
	struct mm_walk smaps_walk = {
		.pmd_entry = smaps_pte_range,
		.mm = vma->vm_mm,
		.private = &mss,
	};
M
Mauricio Lin 已提交
494 495

	memset(&mss, 0, sizeof mss);
496
	mss.vma = vma;
497
	/* mmap_sem is held in m_start */
N
Nick Piggin 已提交
498
	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
D
Dave Hansen 已提交
499
		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
500

501
	show_map_vma(m, vma, is_pid);
502 503 504 505 506 507 508 509 510

	seq_printf(m,
		   "Size:           %8lu kB\n"
		   "Rss:            %8lu kB\n"
		   "Pss:            %8lu kB\n"
		   "Shared_Clean:   %8lu kB\n"
		   "Shared_Dirty:   %8lu kB\n"
		   "Private_Clean:  %8lu kB\n"
		   "Private_Dirty:  %8lu kB\n"
P
Peter Zijlstra 已提交
511
		   "Referenced:     %8lu kB\n"
512
		   "Anonymous:      %8lu kB\n"
513
		   "AnonHugePages:  %8lu kB\n"
514
		   "Swap:           %8lu kB\n"
515
		   "KernelPageSize: %8lu kB\n"
516 517
		   "MMUPageSize:    %8lu kB\n"
		   "Locked:         %8lu kB\n",
518 519 520 521 522 523 524
		   (vma->vm_end - vma->vm_start) >> 10,
		   mss.resident >> 10,
		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
		   mss.shared_clean  >> 10,
		   mss.shared_dirty  >> 10,
		   mss.private_clean >> 10,
		   mss.private_dirty >> 10,
P
Peter Zijlstra 已提交
525
		   mss.referenced >> 10,
526
		   mss.anonymous >> 10,
527
		   mss.anonymous_thp >> 10,
528
		   mss.swap >> 10,
529
		   vma_kernel_pagesize(vma) >> 10,
530 531 532
		   vma_mmu_pagesize(vma) >> 10,
		   (vma->vm_flags & VM_LOCKED) ?
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
533

534 535 536 537
	if (vma->vm_flags & VM_NONLINEAR)
		seq_printf(m, "Nonlinear:      %8lu kB\n",
				mss.nonlinear >> 10);

538
	if (m->count < m->size)  /* vma is copied successfully */
539 540
		m->version = (vma != get_gate_vma(task->mm))
			? vma->vm_start : 0;
541
	return 0;
M
Mauricio Lin 已提交
542 543
}

544 545 546 547 548 549 550 551 552 553
static int show_pid_smap(struct seq_file *m, void *v)
{
	return show_smap(m, v, 1);
}

static int show_tid_smap(struct seq_file *m, void *v)
{
	return show_smap(m, v, 0);
}

554
static const struct seq_operations proc_pid_smaps_op = {
555 556 557
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
558 559 560 561 562 563 564 565
	.show	= show_pid_smap
};

static const struct seq_operations proc_tid_smaps_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_tid_smap
566 567
};

568
static int pid_smaps_open(struct inode *inode, struct file *file)
569 570 571 572
{
	return do_maps_open(inode, file, &proc_pid_smaps_op);
}

573 574 575 576 577 578 579 580 581 582 583 584 585 586
static int tid_smaps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_tid_smaps_op);
}

const struct file_operations proc_pid_smaps_operations = {
	.open		= pid_smaps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

const struct file_operations proc_tid_smaps_operations = {
	.open		= tid_smaps_open,
587 588 589 590 591 592
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
D
Dave Hansen 已提交
593
				unsigned long end, struct mm_walk *walk)
594
{
D
Dave Hansen 已提交
595
	struct vm_area_struct *vma = walk->private;
596 597 598 599
	pte_t *pte, ptent;
	spinlock_t *ptl;
	struct page *page;

600
	split_huge_page_pmd(walk->mm, pmd);
601 602
	if (pmd_trans_unstable(pmd))
		return 0;
603

604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE) {
		ptent = *pte;
		if (!pte_present(ptent))
			continue;

		page = vm_normal_page(vma, addr, ptent);
		if (!page)
			continue;

		/* Clear accessed and referenced bits. */
		ptep_test_and_clear_young(vma, addr, pte);
		ClearPageReferenced(page);
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
	return 0;
}

623 624 625 626
#define CLEAR_REFS_ALL 1
#define CLEAR_REFS_ANON 2
#define CLEAR_REFS_MAPPED 3

627 628
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
				size_t count, loff_t *ppos)
629
{
630
	struct task_struct *task;
631
	char buffer[PROC_NUMBUF];
632
	struct mm_struct *mm;
633
	struct vm_area_struct *vma;
A
Alexey Dobriyan 已提交
634 635
	int type;
	int rv;
636

637 638 639 640 641
	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count))
		return -EFAULT;
A
Alexey Dobriyan 已提交
642 643 644
	rv = kstrtoint(strstrip(buffer), 10, &type);
	if (rv < 0)
		return rv;
645
	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
646 647 648 649 650 651
		return -EINVAL;
	task = get_proc_task(file->f_path.dentry->d_inode);
	if (!task)
		return -ESRCH;
	mm = get_task_mm(task);
	if (mm) {
652 653 654 655
		struct mm_walk clear_refs_walk = {
			.pmd_entry = clear_refs_pte_range,
			.mm = mm,
		};
656
		down_read(&mm->mmap_sem);
D
Dave Hansen 已提交
657 658
		for (vma = mm->mmap; vma; vma = vma->vm_next) {
			clear_refs_walk.private = vma;
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
			if (is_vm_hugetlb_page(vma))
				continue;
			/*
			 * Writing 1 to /proc/pid/clear_refs affects all pages.
			 *
			 * Writing 2 to /proc/pid/clear_refs only affects
			 * Anonymous pages.
			 *
			 * Writing 3 to /proc/pid/clear_refs only affects file
			 * mapped pages.
			 */
			if (type == CLEAR_REFS_ANON && vma->vm_file)
				continue;
			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
				continue;
			walk_page_range(vma->vm_start, vma->vm_end,
					&clear_refs_walk);
D
Dave Hansen 已提交
676
		}
677 678 679 680 681
		flush_tlb_mm(mm);
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	put_task_struct(task);
682 683

	return count;
684 685
}

686 687
const struct file_operations proc_clear_refs_operations = {
	.write		= clear_refs_write,
688
	.llseek		= noop_llseek,
689 690
};

691 692 693 694
typedef struct {
	u64 pme;
} pagemap_entry_t;

695
struct pagemapread {
696
	int pos, len;
697
	pagemap_entry_t *buffer;
698 699
};

700 701 702
#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
#define PAGEMAP_WALK_MASK	(PMD_MASK)

703 704 705 706 707 708 709 710 711 712 713 714 715 716
#define PM_ENTRY_BYTES      sizeof(u64)
#define PM_STATUS_BITS      3
#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
#define PM_PSHIFT_BITS      6
#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
#define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)

#define PM_PRESENT          PM_STATUS(4LL)
#define PM_SWAP             PM_STATUS(2LL)
717
#define PM_FILE             PM_STATUS(1LL)
718
#define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
719 720
#define PM_END_OF_BUFFER    1

721 722 723 724 725 726
static inline pagemap_entry_t make_pme(u64 val)
{
	return (pagemap_entry_t) { .pme = val };
}

static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
727 728
			  struct pagemapread *pm)
{
729
	pm->buffer[pm->pos++] = *pme;
730
	if (pm->pos >= pm->len)
731
		return PM_END_OF_BUFFER;
732 733 734 735
	return 0;
}

static int pagemap_pte_hole(unsigned long start, unsigned long end,
D
Dave Hansen 已提交
736
				struct mm_walk *walk)
737
{
D
Dave Hansen 已提交
738
	struct pagemapread *pm = walk->private;
739 740
	unsigned long addr;
	int err = 0;
741 742
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);

743
	for (addr = start; addr < end; addr += PAGE_SIZE) {
744
		err = add_to_pagemap(addr, &pme, pm);
745 746 747 748 749 750
		if (err)
			break;
	}
	return err;
}

751 752
static void pte_to_pagemap_entry(pagemap_entry_t *pme,
		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
753
{
754 755
	u64 frame, flags;
	struct page *page = NULL;
756

757 758 759 760 761 762 763 764 765 766 767 768 769
	if (pte_present(pte)) {
		frame = pte_pfn(pte);
		flags = PM_PRESENT;
		page = vm_normal_page(vma, addr, pte);
	} else if (is_swap_pte(pte)) {
		swp_entry_t entry = pte_to_swp_entry(pte);

		frame = swp_type(entry) |
			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
		flags = PM_SWAP;
		if (is_migration_entry(entry))
			page = migration_entry_to_page(entry);
	} else {
770
		*pme = make_pme(PM_NOT_PRESENT);
771 772 773 774 775 776 777
		return;
	}

	if (page && !PageAnon(page))
		flags |= PM_FILE;

	*pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
778 779
}

780
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
781 782
static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
					pmd_t pmd, int offset)
783 784 785 786 787 788 789
{
	/*
	 * Currently pmd for thp is always present because thp can not be
	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
	 * This if-check is just to prepare for future implementation.
	 */
	if (pmd_present(pmd))
790 791
		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
792 793
	else
		*pme = make_pme(PM_NOT_PRESENT);
794 795
}
#else
796 797
static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
						pmd_t pmd, int offset)
798 799 800 801
{
}
#endif

802
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
803
			     struct mm_walk *walk)
804
{
805
	struct vm_area_struct *vma;
D
Dave Hansen 已提交
806
	struct pagemapread *pm = walk->private;
807 808
	pte_t *pte;
	int err = 0;
809
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
810

811 812
	/* find the first VMA at or above 'addr' */
	vma = find_vma(walk->mm, addr);
813
	if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
814 815 816 817 818
		for (; addr != end; addr += PAGE_SIZE) {
			unsigned long offset;

			offset = (addr & ~PAGEMAP_WALK_MASK) >>
					PAGE_SHIFT;
819 820
			thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
			err = add_to_pagemap(addr, &pme, pm);
821 822
			if (err)
				break;
823 824
		}
		spin_unlock(&walk->mm->page_table_lock);
825
		return err;
826 827
	}

828 829
	if (pmd_trans_unstable(pmd))
		return 0;
830
	for (; addr != end; addr += PAGE_SIZE) {
831 832 833

		/* check to see if we've left 'vma' behind
		 * and need a new, higher one */
834
		if (vma && (addr >= vma->vm_end)) {
835
			vma = find_vma(walk->mm, addr);
836 837
			pme = make_pme(PM_NOT_PRESENT);
		}
838 839 840 841 842 843

		/* check that 'vma' actually covers this address,
		 * and that it isn't a huge page vma */
		if (vma && (vma->vm_start <= addr) &&
		    !is_vm_hugetlb_page(vma)) {
			pte = pte_offset_map(pmd, addr);
844
			pte_to_pagemap_entry(&pme, vma, addr, *pte);
845 846 847
			/* unmap before userspace copy */
			pte_unmap(pte);
		}
848
		err = add_to_pagemap(addr, &pme, pm);
849 850 851 852 853 854 855 856 857
		if (err)
			return err;
	}

	cond_resched();

	return err;
}

858
#ifdef CONFIG_HUGETLB_PAGE
859 860
static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
					pte_t pte, int offset)
861 862
{
	if (pte_present(pte))
863 864
		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
865 866
	else
		*pme = make_pme(PM_NOT_PRESENT);
867 868
}

869 870 871 872
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
				 unsigned long addr, unsigned long end,
				 struct mm_walk *walk)
873 874 875
{
	struct pagemapread *pm = walk->private;
	int err = 0;
876
	pagemap_entry_t pme;
877 878

	for (; addr != end; addr += PAGE_SIZE) {
879
		int offset = (addr & ~hmask) >> PAGE_SHIFT;
880 881
		huge_pte_to_pagemap_entry(&pme, *pte, offset);
		err = add_to_pagemap(addr, &pme, pm);
882 883 884 885 886 887 888 889
		if (err)
			return err;
	}

	cond_resched();

	return err;
}
890
#endif /* HUGETLB_PAGE */
891

892 893 894
/*
 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 *
895 896 897
 * For each page in the address space, this file contains one 64-bit entry
 * consisting of the following:
 *
898
 * Bits 0-54  page frame number (PFN) if present
899
 * Bits 0-4   swap type if swapped
900
 * Bits 5-54  swap offset if swapped
901
 * Bits 55-60 page shift (page size = 1<<page shift)
902
 * Bit  61    page is file-page or shared-anon
903 904 905 906 907 908
 * Bit  62    page swapped
 * Bit  63    page present
 *
 * If the page is not present but in swap, then the PFN contains an
 * encoding of the swap file number and the page's offset into the
 * swap. Unmapped pages return a null PFN. This allows determining
909 910 911 912 913 914 915 916 917 918 919 920 921 922
 * precisely which pages are mapped (or in swap) and comparing mapped
 * pages between processes.
 *
 * Efficient users of this interface will use /proc/pid/maps to
 * determine which areas of memory are actually mapped and llseek to
 * skip over unmapped regions.
 */
static ssize_t pagemap_read(struct file *file, char __user *buf,
			    size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
	struct mm_struct *mm;
	struct pagemapread pm;
	int ret = -ESRCH;
923
	struct mm_walk pagemap_walk = {};
924 925 926 927
	unsigned long src;
	unsigned long svpfn;
	unsigned long start_vaddr;
	unsigned long end_vaddr;
928
	int copied = 0;
929 930 931 932 933 934

	if (!task)
		goto out;

	ret = -EINVAL;
	/* file position must be aligned */
935
	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
936
		goto out_task;
937 938

	ret = 0;
939 940 941
	if (!count)
		goto out_task;

942 943
	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
944
	ret = -ENOMEM;
945
	if (!pm.buffer)
946 947
		goto out_task;

C
Cong Wang 已提交
948
	mm = mm_access(task, PTRACE_MODE_READ);
949 950 951
	ret = PTR_ERR(mm);
	if (!mm || IS_ERR(mm))
		goto out_free;
952

953 954
	pagemap_walk.pmd_entry = pagemap_pte_range;
	pagemap_walk.pte_hole = pagemap_pte_hole;
955
#ifdef CONFIG_HUGETLB_PAGE
956
	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
957
#endif
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
	pagemap_walk.mm = mm;
	pagemap_walk.private = &pm;

	src = *ppos;
	svpfn = src / PM_ENTRY_BYTES;
	start_vaddr = svpfn << PAGE_SHIFT;
	end_vaddr = TASK_SIZE_OF(task);

	/* watch out for wraparound */
	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
		start_vaddr = end_vaddr;

	/*
	 * The odds are that this will stop walking way
	 * before end_vaddr, because the length of the
	 * user buffer is tracked in "pm", and the walk
	 * will stop when we hit the end of the buffer.
	 */
976 977 978 979 980 981
	ret = 0;
	while (count && (start_vaddr < end_vaddr)) {
		int len;
		unsigned long end;

		pm.pos = 0;
982
		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
983 984 985 986 987 988 989 990 991
		/* overflow ? */
		if (end < start_vaddr || end > end_vaddr)
			end = end_vaddr;
		down_read(&mm->mmap_sem);
		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
		up_read(&mm->mmap_sem);
		start_vaddr = end;

		len = min(count, PM_ENTRY_BYTES * pm.pos);
992
		if (copy_to_user(buf, pm.buffer, len)) {
993
			ret = -EFAULT;
994
			goto out_mm;
995 996 997 998
		}
		copied += len;
		buf += len;
		count -= len;
999
	}
1000 1001 1002 1003
	*ppos += copied;
	if (!ret || ret == PM_END_OF_BUFFER)
		ret = copied;

1004 1005
out_mm:
	mmput(mm);
1006 1007
out_free:
	kfree(pm.buffer);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
out_task:
	put_task_struct(task);
out:
	return ret;
}

const struct file_operations proc_pagemap_operations = {
	.llseek		= mem_lseek, /* borrow this */
	.read		= pagemap_read,
};
1018
#endif /* CONFIG_PROC_PAGE_MONITOR */
1019

1020 1021
#ifdef CONFIG_NUMA

1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
struct numa_maps {
	struct vm_area_struct *vma;
	unsigned long pages;
	unsigned long anon;
	unsigned long active;
	unsigned long writeback;
	unsigned long mapcount_max;
	unsigned long dirty;
	unsigned long swapcache;
	unsigned long node[MAX_NUMNODES];
};

1034 1035 1036 1037 1038
struct numa_maps_private {
	struct proc_maps_private proc_maps;
	struct numa_maps md;
};

1039 1040
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
			unsigned long nr_pages)
1041 1042 1043
{
	int count = page_mapcount(page);

1044
	md->pages += nr_pages;
1045
	if (pte_dirty || PageDirty(page))
1046
		md->dirty += nr_pages;
1047 1048

	if (PageSwapCache(page))
1049
		md->swapcache += nr_pages;
1050 1051

	if (PageActive(page) || PageUnevictable(page))
1052
		md->active += nr_pages;
1053 1054

	if (PageWriteback(page))
1055
		md->writeback += nr_pages;
1056 1057

	if (PageAnon(page))
1058
		md->anon += nr_pages;
1059 1060 1061 1062

	if (count > md->mapcount_max)
		md->mapcount_max = count;

1063
	md->node[page_to_nid(page)] += nr_pages;
1064 1065
}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
		unsigned long addr)
{
	struct page *page;
	int nid;

	if (!pte_present(pte))
		return NULL;

	page = vm_normal_page(vma, addr, pte);
	if (!page)
		return NULL;

	if (PageReserved(page))
		return NULL;

	nid = page_to_nid(page);
	if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
		return NULL;

	return page;
}

1089 1090 1091 1092 1093 1094 1095 1096 1097
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
		unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	spinlock_t *ptl;
	pte_t *orig_pte;
	pte_t *pte;

	md = walk->private;
1098 1099 1100 1101 1102 1103 1104 1105 1106

	if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
		pte_t huge_pte = *(pte_t *)pmd;
		struct page *page;

		page = can_gather_numa_stats(huge_pte, md->vma, addr);
		if (page)
			gather_stats(page, md, pte_dirty(huge_pte),
				     HPAGE_PMD_SIZE/PAGE_SIZE);
1107
		spin_unlock(&walk->mm->page_table_lock);
1108
		return 0;
1109 1110
	}

1111 1112
	if (pmd_trans_unstable(pmd))
		return 0;
1113 1114
	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
	do {
1115
		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1116 1117
		if (!page)
			continue;
1118
		gather_stats(page, md, pte_dirty(*pte), 1);
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138

	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap_unlock(orig_pte, ptl);
	return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	struct page *page;

	if (pte_none(*pte))
		return 0;

	page = pte_page(*pte);
	if (!page)
		return 0;

	md = walk->private;
1139
	gather_stats(page, md, pte_dirty(*pte), 1);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	return 0;
}

#else
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	return 0;
}
#endif

/*
 * Display pages allocated per node and memory policy via /proc.
 */
1154
static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1155
{
1156 1157
	struct numa_maps_private *numa_priv = m->private;
	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1158
	struct vm_area_struct *vma = v;
1159
	struct numa_maps *md = &numa_priv->md;
1160
	struct file *file = vma->vm_file;
1161
	struct task_struct *task = proc_priv->task;
1162 1163 1164 1165 1166 1167 1168 1169 1170
	struct mm_struct *mm = vma->vm_mm;
	struct mm_walk walk = {};
	struct mempolicy *pol;
	int n;
	char buffer[50];

	if (!mm)
		return 0;

1171 1172
	/* Ensure we start with an empty set of numa_maps statistics. */
	memset(md, 0, sizeof(*md));
1173 1174 1175 1176 1177 1178 1179 1180

	md->vma = vma;

	walk.hugetlb_entry = gather_hugetbl_stats;
	walk.pmd_entry = gather_pte_stats;
	walk.private = md;
	walk.mm = mm;

1181 1182
	task_lock(task);
	pol = get_vma_policy(task, vma, vma->vm_start);
1183 1184
	mpol_to_str(buffer, sizeof(buffer), pol, 0);
	mpol_cond_put(pol);
1185
	task_unlock(task);
1186 1187 1188 1189 1190 1191 1192 1193

	seq_printf(m, "%08lx %s", vma->vm_start, buffer);

	if (file) {
		seq_printf(m, " file=");
		seq_path(m, &file->f_path, "\n\t= ");
	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
		seq_printf(m, " heap");
1194
	} else {
1195
		pid_t tid = vm_is_stack(task, vma, is_pid);
1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
		if (tid != 0) {
			/*
			 * Thread stack in /proc/PID/task/TID/maps or
			 * the main process stack.
			 */
			if (!is_pid || (vma->vm_start <= mm->start_stack &&
			    vma->vm_end >= mm->start_stack))
				seq_printf(m, " stack");
			else
				seq_printf(m, " stack:%d", tid);
		}
1207 1208
	}

1209 1210 1211
	if (is_vm_hugetlb_page(vma))
		seq_printf(m, " huge");

1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
	walk_page_range(vma->vm_start, vma->vm_end, &walk);

	if (!md->pages)
		goto out;

	if (md->anon)
		seq_printf(m, " anon=%lu", md->anon);

	if (md->dirty)
		seq_printf(m, " dirty=%lu", md->dirty);

	if (md->pages != md->anon && md->pages != md->dirty)
		seq_printf(m, " mapped=%lu", md->pages);

	if (md->mapcount_max > 1)
		seq_printf(m, " mapmax=%lu", md->mapcount_max);

	if (md->swapcache)
		seq_printf(m, " swapcache=%lu", md->swapcache);

	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
		seq_printf(m, " active=%lu", md->active);

	if (md->writeback)
		seq_printf(m, " writeback=%lu", md->writeback);

	for_each_node_state(n, N_HIGH_MEMORY)
		if (md->node[n])
			seq_printf(m, " N%d=%lu", n, md->node[n]);
out:
	seq_putc(m, '\n');

	if (m->count < m->size)
1245
		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1246 1247
	return 0;
}
1248

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
static int show_pid_numa_map(struct seq_file *m, void *v)
{
	return show_numa_map(m, v, 1);
}

static int show_tid_numa_map(struct seq_file *m, void *v)
{
	return show_numa_map(m, v, 0);
}

1259
static const struct seq_operations proc_pid_numa_maps_op = {
1260 1261 1262 1263
	.start  = m_start,
	.next   = m_next,
	.stop   = m_stop,
	.show   = show_pid_numa_map,
1264
};
1265

1266 1267 1268 1269 1270 1271 1272 1273 1274
static const struct seq_operations proc_tid_numa_maps_op = {
	.start  = m_start,
	.next   = m_next,
	.stop   = m_stop,
	.show   = show_tid_numa_map,
};

static int numa_maps_open(struct inode *inode, struct file *file,
			  const struct seq_operations *ops)
1275
{
1276 1277 1278 1279 1280
	struct numa_maps_private *priv;
	int ret = -ENOMEM;
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv) {
		priv->proc_maps.pid = proc_pid(inode);
1281
		ret = seq_open(file, ops);
1282 1283 1284 1285 1286 1287 1288 1289
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = priv;
		} else {
			kfree(priv);
		}
	}
	return ret;
1290 1291
}

1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
}

static int tid_numa_maps_open(struct inode *inode, struct file *file)
{
	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
}

const struct file_operations proc_pid_numa_maps_operations = {
	.open		= pid_numa_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

const struct file_operations proc_tid_numa_maps_operations = {
	.open		= tid_numa_maps_open,
1311 1312
	.read		= seq_read,
	.llseek		= seq_lseek,
1313
	.release	= seq_release_private,
1314
};
1315
#endif /* CONFIG_NUMA */