task_mmu.c 30.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2
#include <linux/mm.h>
#include <linux/hugetlb.h>
3
#include <linux/huge_mm.h>
L
Linus Torvalds 已提交
4 5
#include <linux/mount.h>
#include <linux/seq_file.h>
M
Mauricio Lin 已提交
6
#include <linux/highmem.h>
K
Kees Cook 已提交
7
#include <linux/ptrace.h>
8
#include <linux/slab.h>
9 10
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
11
#include <linux/rmap.h>
12 13
#include <linux/swap.h>
#include <linux/swapops.h>
M
Mauricio Lin 已提交
14

L
Linus Torvalds 已提交
15 16
#include <asm/elf.h>
#include <asm/uaccess.h>
M
Mauricio Lin 已提交
17
#include <asm/tlbflush.h>
L
Linus Torvalds 已提交
18 19
#include "internal.h"

20
void task_mem(struct seq_file *m, struct mm_struct *mm)
L
Linus Torvalds 已提交
21
{
K
KAMEZAWA Hiroyuki 已提交
22
	unsigned long data, text, lib, swap;
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;

	/*
	 * Note: to minimize their overhead, mm maintains hiwater_vm and
	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
	 * collector of these hiwater stats must therefore get total_vm
	 * and rss too, which will usually be the higher.  Barriers? not
	 * worth the effort, such snapshots can always be inconsistent.
	 */
	hiwater_vm = total_vm = mm->total_vm;
	if (hiwater_vm < mm->hiwater_vm)
		hiwater_vm = mm->hiwater_vm;
	hiwater_rss = total_rss = get_mm_rss(mm);
	if (hiwater_rss < mm->hiwater_rss)
		hiwater_rss = mm->hiwater_rss;
L
Linus Torvalds 已提交
38 39 40 41

	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
K
KAMEZAWA Hiroyuki 已提交
42
	swap = get_mm_counter(mm, MM_SWAPENTS);
43
	seq_printf(m,
44
		"VmPeak:\t%8lu kB\n"
L
Linus Torvalds 已提交
45 46
		"VmSize:\t%8lu kB\n"
		"VmLck:\t%8lu kB\n"
47
		"VmPin:\t%8lu kB\n"
48
		"VmHWM:\t%8lu kB\n"
L
Linus Torvalds 已提交
49 50 51 52 53
		"VmRSS:\t%8lu kB\n"
		"VmData:\t%8lu kB\n"
		"VmStk:\t%8lu kB\n"
		"VmExe:\t%8lu kB\n"
		"VmLib:\t%8lu kB\n"
K
KAMEZAWA Hiroyuki 已提交
54 55
		"VmPTE:\t%8lu kB\n"
		"VmSwap:\t%8lu kB\n",
56 57
		hiwater_vm << (PAGE_SHIFT-10),
		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
58
		mm->locked_vm << (PAGE_SHIFT-10),
59
		mm->pinned_vm << (PAGE_SHIFT-10),
60 61
		hiwater_rss << (PAGE_SHIFT-10),
		total_rss << (PAGE_SHIFT-10),
L
Linus Torvalds 已提交
62 63
		data << (PAGE_SHIFT-10),
		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
K
KAMEZAWA Hiroyuki 已提交
64 65
		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
		swap << (PAGE_SHIFT-10));
L
Linus Torvalds 已提交
66 67 68 69 70 71 72
}

unsigned long task_vsize(struct mm_struct *mm)
{
	return PAGE_SIZE * mm->total_vm;
}

73 74 75
unsigned long task_statm(struct mm_struct *mm,
			 unsigned long *shared, unsigned long *text,
			 unsigned long *data, unsigned long *resident)
L
Linus Torvalds 已提交
76
{
K
KAMEZAWA Hiroyuki 已提交
77
	*shared = get_mm_counter(mm, MM_FILEPAGES);
L
Linus Torvalds 已提交
78 79 80
	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
								>> PAGE_SHIFT;
	*data = mm->total_vm - mm->shared_vm;
K
KAMEZAWA Hiroyuki 已提交
81
	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89 90 91 92
	return mm->total_vm;
}

static void pad_len_spaces(struct seq_file *m, int len)
{
	len = 25 + sizeof(void*) * 6 - len;
	if (len < 1)
		len = 1;
	seq_printf(m, "%*c", len, ' ');
}

93 94 95 96 97 98 99 100
static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
	if (vma && vma != priv->tail_vma) {
		struct mm_struct *mm = vma->vm_mm;
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
}
101

102
static void *m_start(struct seq_file *m, loff_t *pos)
M
Mauricio Lin 已提交
103
{
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
	struct proc_maps_private *priv = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *vma, *tail_vma = NULL;
	loff_t l = *pos;

	/* Clear the per syscall fields in priv */
	priv->task = NULL;
	priv->tail_vma = NULL;

	/*
	 * We remember last_addr rather than next_addr to hit with
	 * mmap_cache most of the time. We have zero last_addr at
	 * the beginning and also after lseek. We will have -1 last_addr
	 * after the end of the vmas.
	 */

	if (last_addr == -1UL)
		return NULL;

	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
	if (!priv->task)
A
Al Viro 已提交
126
		return ERR_PTR(-ESRCH);
127 128

	mm = mm_for_maps(priv->task);
A
Al Viro 已提交
129 130
	if (!mm || IS_ERR(mm))
		return mm;
131
	down_read(&mm->mmap_sem);
132

133
	tail_vma = get_gate_vma(priv->task->mm);
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	priv->tail_vma = tail_vma;

	/* Start with last addr hint */
	vma = find_vma(mm, last_addr);
	if (last_addr && vma) {
		vma = vma->vm_next;
		goto out;
	}

	/*
	 * Check the vma index is within the range and do
	 * sequential scan until m_index.
	 */
	vma = NULL;
	if ((unsigned long)l < mm->map_count) {
		vma = mm->mmap;
		while (l-- && vma)
			vma = vma->vm_next;
		goto out;
	}

	if (l != mm->map_count)
		tail_vma = NULL; /* After gate vma */

out:
	if (vma)
		return vma;

	/* End of vmas has been reached */
	m->version = (tail_vma != NULL)? 0: -1UL;
	up_read(&mm->mmap_sem);
	mmput(mm);
	return tail_vma;
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;
	struct vm_area_struct *tail_vma = priv->tail_vma;

	(*pos)++;
	if (vma && (vma != tail_vma) && vma->vm_next)
		return vma->vm_next;
	vma_stop(priv, vma);
	return (vma != tail_vma)? tail_vma: NULL;
}

static void m_stop(struct seq_file *m, void *v)
{
	struct proc_maps_private *priv = m->private;
	struct vm_area_struct *vma = v;

187 188
	if (!IS_ERR(vma))
		vma_stop(priv, vma);
189 190 191 192 193
	if (priv->task)
		put_task_struct(priv->task);
}

static int do_maps_open(struct inode *inode, struct file *file,
194
			const struct seq_operations *ops)
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
{
	struct proc_maps_private *priv;
	int ret = -ENOMEM;
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv) {
		priv->pid = proc_pid(inode);
		ret = seq_open(file, ops);
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = priv;
		} else {
			kfree(priv);
		}
	}
	return ret;
}
M
Mauricio Lin 已提交
211

212 213
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
L
Linus Torvalds 已提交
214
{
M
Mauricio Lin 已提交
215 216
	struct mm_struct *mm = vma->vm_mm;
	struct file *file = vma->vm_file;
217 218
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;
219
	vm_flags_t flags = vma->vm_flags;
L
Linus Torvalds 已提交
220
	unsigned long ino = 0;
221
	unsigned long long pgoff = 0;
222
	unsigned long start, end;
L
Linus Torvalds 已提交
223 224
	dev_t dev = 0;
	int len;
225
	const char *name = NULL;
L
Linus Torvalds 已提交
226 227

	if (file) {
228
		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
L
Linus Torvalds 已提交
229 230
		dev = inode->i_sb->s_dev;
		ino = inode->i_ino;
231
		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
L
Linus Torvalds 已提交
232 233
	}

234 235
	/* We don't show the stack guard page in /proc/maps */
	start = vma->vm_start;
236 237 238 239 240
	if (stack_guard_page_start(vma, start))
		start += PAGE_SIZE;
	end = vma->vm_end;
	if (stack_guard_page_end(vma, end))
		end -= PAGE_SIZE;
241

242
	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
243
			start,
244
			end,
L
Linus Torvalds 已提交
245 246 247 248
			flags & VM_READ ? 'r' : '-',
			flags & VM_WRITE ? 'w' : '-',
			flags & VM_EXEC ? 'x' : '-',
			flags & VM_MAYSHARE ? 's' : 'p',
249
			pgoff,
L
Linus Torvalds 已提交
250 251 252 253 254 255
			MAJOR(dev), MINOR(dev), ino, &len);

	/*
	 * Print the dentry name for named mappings, and a
	 * special [heap] marker for the heap:
	 */
M
Mauricio Lin 已提交
256
	if (file) {
L
Linus Torvalds 已提交
257
		pad_len_spaces(m, len);
258
		seq_path(m, &file->f_path, "\n");
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
		goto done;
	}

	name = arch_vma_name(vma);
	if (!name) {
		pid_t tid;

		if (!mm) {
			name = "[vdso]";
			goto done;
		}

		if (vma->vm_start <= mm->brk &&
		    vma->vm_end >= mm->start_brk) {
			name = "[heap]";
			goto done;
		}

		tid = vm_is_stack(task, vma, is_pid);

		if (tid != 0) {
			/*
			 * Thread stack in /proc/PID/task/TID/maps or
			 * the main process stack.
			 */
			if (!is_pid || (vma->vm_start <= mm->start_stack &&
			    vma->vm_end >= mm->start_stack)) {
				name = "[stack]";
287
			} else {
288 289 290
				/* Thread stack in /proc/PID/maps */
				pad_len_spaces(m, len);
				seq_printf(m, "[stack:%d]", tid);
L
Linus Torvalds 已提交
291
			}
292
		}
293 294 295 296 297 298
	}

done:
	if (name) {
		pad_len_spaces(m, len);
		seq_puts(m, name);
L
Linus Torvalds 已提交
299 300
	}
	seq_putc(m, '\n');
301 302
}

303
static int show_map(struct seq_file *m, void *v, int is_pid)
304 305 306 307 308
{
	struct vm_area_struct *vma = v;
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;

309
	show_map_vma(m, vma, is_pid);
M
Mauricio Lin 已提交
310 311

	if (m->count < m->size)  /* vma is copied successfully */
312 313
		m->version = (vma != get_gate_vma(task->mm))
			? vma->vm_start : 0;
L
Linus Torvalds 已提交
314 315 316
	return 0;
}

317 318 319 320 321 322 323 324 325 326
static int show_pid_map(struct seq_file *m, void *v)
{
	return show_map(m, v, 1);
}

static int show_tid_map(struct seq_file *m, void *v)
{
	return show_map(m, v, 0);
}

327
static const struct seq_operations proc_pid_maps_op = {
328 329 330
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
331 332 333 334 335 336 337 338
	.show	= show_pid_map
};

static const struct seq_operations proc_tid_maps_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_tid_map
339 340
};

341
static int pid_maps_open(struct inode *inode, struct file *file)
342 343 344 345
{
	return do_maps_open(inode, file, &proc_pid_maps_op);
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359
static int tid_maps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_tid_maps_op);
}

const struct file_operations proc_pid_maps_operations = {
	.open		= pid_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

const struct file_operations proc_tid_maps_operations = {
	.open		= tid_maps_open,
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

/*
 * Proportional Set Size(PSS): my share of RSS.
 *
 * PSS of a process is the count of pages it has in memory, where each
 * page is divided by the number of processes sharing it.  So if a
 * process has 1000 pages all to itself, and 1000 shared with one other
 * process, its PSS will be 1500.
 *
 * To keep (accumulated) division errors low, we adopt a 64bit
 * fixed-point pss counter to minimize division errors. So (pss >>
 * PSS_SHIFT) would be the real byte count.
 *
 * A shift of 12 before division means (assuming 4K page size):
 * 	- 1M 3-user-pages add up to 8KB errors;
 * 	- supports mapcount up to 2^24, or 16M;
 * 	- supports PSS up to 2^52 bytes, or 4PB.
 */
#define PSS_SHIFT 12

384
#ifdef CONFIG_PROC_PAGE_MONITOR
P
Peter Zijlstra 已提交
385
struct mem_size_stats {
386 387 388 389 390 391 392
	struct vm_area_struct *vma;
	unsigned long resident;
	unsigned long shared_clean;
	unsigned long shared_dirty;
	unsigned long private_clean;
	unsigned long private_dirty;
	unsigned long referenced;
393
	unsigned long anonymous;
394
	unsigned long anonymous_thp;
P
Peter Zijlstra 已提交
395
	unsigned long swap;
396 397 398
	u64 pss;
};

399 400

static void smaps_pte_entry(pte_t ptent, unsigned long addr,
401
		unsigned long ptent_size, struct mm_walk *walk)
402 403 404 405 406 407 408
{
	struct mem_size_stats *mss = walk->private;
	struct vm_area_struct *vma = mss->vma;
	struct page *page;
	int mapcount;

	if (is_swap_pte(ptent)) {
409
		mss->swap += ptent_size;
410 411 412 413 414 415 416 417 418 419 420
		return;
	}

	if (!pte_present(ptent))
		return;

	page = vm_normal_page(vma, addr, ptent);
	if (!page)
		return;

	if (PageAnon(page))
421
		mss->anonymous += ptent_size;
422

423
	mss->resident += ptent_size;
424 425
	/* Accumulate the size in pages that have been accessed. */
	if (pte_young(ptent) || PageReferenced(page))
426
		mss->referenced += ptent_size;
427 428 429
	mapcount = page_mapcount(page);
	if (mapcount >= 2) {
		if (pte_dirty(ptent) || PageDirty(page))
430
			mss->shared_dirty += ptent_size;
431
		else
432 433
			mss->shared_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
434 435
	} else {
		if (pte_dirty(ptent) || PageDirty(page))
436
			mss->private_dirty += ptent_size;
437
		else
438 439
			mss->private_clean += ptent_size;
		mss->pss += (ptent_size << PSS_SHIFT);
440 441 442
	}
}

443
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
444
			   struct mm_walk *walk)
M
Mauricio Lin 已提交
445
{
D
Dave Hansen 已提交
446
	struct mem_size_stats *mss = walk->private;
447
	struct vm_area_struct *vma = mss->vma;
448
	pte_t *pte;
449
	spinlock_t *ptl;
M
Mauricio Lin 已提交
450

451 452
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
453
		spin_unlock(&walk->mm->page_table_lock);
454 455
		mss->anonymous_thp += HPAGE_PMD_SIZE;
		return 0;
456
	}
457 458 459

	if (pmd_trans_unstable(pmd))
		return 0;
460 461 462 463 464
	/*
	 * The mmap_sem held all the way back in m_start() is what
	 * keeps khugepaged out of here and from collapsing things
	 * in here.
	 */
465
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
466
	for (; addr != end; pte++, addr += PAGE_SIZE)
467
		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
468 469
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
470
	return 0;
M
Mauricio Lin 已提交
471 472
}

473
static int show_smap(struct seq_file *m, void *v, int is_pid)
M
Mauricio Lin 已提交
474
{
475 476
	struct proc_maps_private *priv = m->private;
	struct task_struct *task = priv->task;
M
Mauricio Lin 已提交
477 478
	struct vm_area_struct *vma = v;
	struct mem_size_stats mss;
D
Dave Hansen 已提交
479 480 481 482 483
	struct mm_walk smaps_walk = {
		.pmd_entry = smaps_pte_range,
		.mm = vma->vm_mm,
		.private = &mss,
	};
M
Mauricio Lin 已提交
484 485

	memset(&mss, 0, sizeof mss);
486
	mss.vma = vma;
487
	/* mmap_sem is held in m_start */
N
Nick Piggin 已提交
488
	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
D
Dave Hansen 已提交
489
		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
490

491
	show_map_vma(m, vma, is_pid);
492 493 494 495 496 497 498 499 500

	seq_printf(m,
		   "Size:           %8lu kB\n"
		   "Rss:            %8lu kB\n"
		   "Pss:            %8lu kB\n"
		   "Shared_Clean:   %8lu kB\n"
		   "Shared_Dirty:   %8lu kB\n"
		   "Private_Clean:  %8lu kB\n"
		   "Private_Dirty:  %8lu kB\n"
P
Peter Zijlstra 已提交
501
		   "Referenced:     %8lu kB\n"
502
		   "Anonymous:      %8lu kB\n"
503
		   "AnonHugePages:  %8lu kB\n"
504
		   "Swap:           %8lu kB\n"
505
		   "KernelPageSize: %8lu kB\n"
506 507
		   "MMUPageSize:    %8lu kB\n"
		   "Locked:         %8lu kB\n",
508 509 510 511 512 513 514
		   (vma->vm_end - vma->vm_start) >> 10,
		   mss.resident >> 10,
		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
		   mss.shared_clean  >> 10,
		   mss.shared_dirty  >> 10,
		   mss.private_clean >> 10,
		   mss.private_dirty >> 10,
P
Peter Zijlstra 已提交
515
		   mss.referenced >> 10,
516
		   mss.anonymous >> 10,
517
		   mss.anonymous_thp >> 10,
518
		   mss.swap >> 10,
519
		   vma_kernel_pagesize(vma) >> 10,
520 521 522
		   vma_mmu_pagesize(vma) >> 10,
		   (vma->vm_flags & VM_LOCKED) ?
			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
523

524
	if (m->count < m->size)  /* vma is copied successfully */
525 526
		m->version = (vma != get_gate_vma(task->mm))
			? vma->vm_start : 0;
527
	return 0;
M
Mauricio Lin 已提交
528 529
}

530 531 532 533 534 535 536 537 538 539
static int show_pid_smap(struct seq_file *m, void *v)
{
	return show_smap(m, v, 1);
}

static int show_tid_smap(struct seq_file *m, void *v)
{
	return show_smap(m, v, 0);
}

540
static const struct seq_operations proc_pid_smaps_op = {
541 542 543
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
544 545 546 547 548 549 550 551
	.show	= show_pid_smap
};

static const struct seq_operations proc_tid_smaps_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_tid_smap
552 553
};

554
static int pid_smaps_open(struct inode *inode, struct file *file)
555 556 557 558
{
	return do_maps_open(inode, file, &proc_pid_smaps_op);
}

559 560 561 562 563 564 565 566 567 568 569 570 571 572
static int tid_smaps_open(struct inode *inode, struct file *file)
{
	return do_maps_open(inode, file, &proc_tid_smaps_op);
}

const struct file_operations proc_pid_smaps_operations = {
	.open		= pid_smaps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

const struct file_operations proc_tid_smaps_operations = {
	.open		= tid_smaps_open,
573 574 575 576 577 578
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
D
Dave Hansen 已提交
579
				unsigned long end, struct mm_walk *walk)
580
{
D
Dave Hansen 已提交
581
	struct vm_area_struct *vma = walk->private;
582 583 584 585
	pte_t *pte, ptent;
	spinlock_t *ptl;
	struct page *page;

586
	split_huge_page_pmd(walk->mm, pmd);
587 588
	if (pmd_trans_unstable(pmd))
		return 0;
589

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608
	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
	for (; addr != end; pte++, addr += PAGE_SIZE) {
		ptent = *pte;
		if (!pte_present(ptent))
			continue;

		page = vm_normal_page(vma, addr, ptent);
		if (!page)
			continue;

		/* Clear accessed and referenced bits. */
		ptep_test_and_clear_young(vma, addr, pte);
		ClearPageReferenced(page);
	}
	pte_unmap_unlock(pte - 1, ptl);
	cond_resched();
	return 0;
}

609 610 611 612
#define CLEAR_REFS_ALL 1
#define CLEAR_REFS_ANON 2
#define CLEAR_REFS_MAPPED 3

613 614
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
				size_t count, loff_t *ppos)
615
{
616
	struct task_struct *task;
617
	char buffer[PROC_NUMBUF];
618
	struct mm_struct *mm;
619
	struct vm_area_struct *vma;
A
Alexey Dobriyan 已提交
620 621
	int type;
	int rv;
622

623 624 625 626 627
	memset(buffer, 0, sizeof(buffer));
	if (count > sizeof(buffer) - 1)
		count = sizeof(buffer) - 1;
	if (copy_from_user(buffer, buf, count))
		return -EFAULT;
A
Alexey Dobriyan 已提交
628 629 630
	rv = kstrtoint(strstrip(buffer), 10, &type);
	if (rv < 0)
		return rv;
631
	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
632 633 634 635 636 637
		return -EINVAL;
	task = get_proc_task(file->f_path.dentry->d_inode);
	if (!task)
		return -ESRCH;
	mm = get_task_mm(task);
	if (mm) {
638 639 640 641
		struct mm_walk clear_refs_walk = {
			.pmd_entry = clear_refs_pte_range,
			.mm = mm,
		};
642
		down_read(&mm->mmap_sem);
D
Dave Hansen 已提交
643 644
		for (vma = mm->mmap; vma; vma = vma->vm_next) {
			clear_refs_walk.private = vma;
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			if (is_vm_hugetlb_page(vma))
				continue;
			/*
			 * Writing 1 to /proc/pid/clear_refs affects all pages.
			 *
			 * Writing 2 to /proc/pid/clear_refs only affects
			 * Anonymous pages.
			 *
			 * Writing 3 to /proc/pid/clear_refs only affects file
			 * mapped pages.
			 */
			if (type == CLEAR_REFS_ANON && vma->vm_file)
				continue;
			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
				continue;
			walk_page_range(vma->vm_start, vma->vm_end,
					&clear_refs_walk);
D
Dave Hansen 已提交
662
		}
663 664 665 666 667
		flush_tlb_mm(mm);
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	put_task_struct(task);
668 669

	return count;
670 671
}

672 673
const struct file_operations proc_clear_refs_operations = {
	.write		= clear_refs_write,
674
	.llseek		= noop_llseek,
675 676
};

677 678 679 680
typedef struct {
	u64 pme;
} pagemap_entry_t;

681
struct pagemapread {
682
	int pos, len;
683
	pagemap_entry_t *buffer;
684 685
};

686 687 688
#define PAGEMAP_WALK_SIZE	(PMD_SIZE)
#define PAGEMAP_WALK_MASK	(PMD_MASK)

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703
#define PM_ENTRY_BYTES      sizeof(u64)
#define PM_STATUS_BITS      3
#define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
#define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
#define PM_PSHIFT_BITS      6
#define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
#define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
#define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
#define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
#define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)

#define PM_PRESENT          PM_STATUS(4LL)
#define PM_SWAP             PM_STATUS(2LL)
#define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
704 705
#define PM_END_OF_BUFFER    1

706 707 708 709 710 711
static inline pagemap_entry_t make_pme(u64 val)
{
	return (pagemap_entry_t) { .pme = val };
}

static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
712 713
			  struct pagemapread *pm)
{
714
	pm->buffer[pm->pos++] = *pme;
715
	if (pm->pos >= pm->len)
716
		return PM_END_OF_BUFFER;
717 718 719 720
	return 0;
}

static int pagemap_pte_hole(unsigned long start, unsigned long end,
D
Dave Hansen 已提交
721
				struct mm_walk *walk)
722
{
D
Dave Hansen 已提交
723
	struct pagemapread *pm = walk->private;
724 725
	unsigned long addr;
	int err = 0;
726 727
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);

728
	for (addr = start; addr < end; addr += PAGE_SIZE) {
729
		err = add_to_pagemap(addr, &pme, pm);
730 731 732 733 734 735
		if (err)
			break;
	}
	return err;
}

736
static u64 swap_pte_to_pagemap_entry(pte_t pte)
737 738
{
	swp_entry_t e = pte_to_swp_entry(pte);
739
	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
740 741
}

742
static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte)
743 744
{
	if (is_swap_pte(pte))
745 746
		*pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte))
				| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP);
747
	else if (pte_present(pte))
748 749
		*pme = make_pme(PM_PFRAME(pte_pfn(pte))
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
750 751
	else
		*pme = make_pme(PM_NOT_PRESENT);
752 753
}

754
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
755 756
static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
					pmd_t pmd, int offset)
757 758 759 760 761 762 763
{
	/*
	 * Currently pmd for thp is always present because thp can not be
	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
	 * This if-check is just to prepare for future implementation.
	 */
	if (pmd_present(pmd))
764 765
		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
766 767
	else
		*pme = make_pme(PM_NOT_PRESENT);
768 769
}
#else
770 771
static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
						pmd_t pmd, int offset)
772 773 774 775
{
}
#endif

776
static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
D
Dave Hansen 已提交
777
			     struct mm_walk *walk)
778
{
779
	struct vm_area_struct *vma;
D
Dave Hansen 已提交
780
	struct pagemapread *pm = walk->private;
781 782
	pte_t *pte;
	int err = 0;
783
	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
784

785 786
	/* find the first VMA at or above 'addr' */
	vma = find_vma(walk->mm, addr);
787 788 789 790 791 792
	if (pmd_trans_huge_lock(pmd, vma) == 1) {
		for (; addr != end; addr += PAGE_SIZE) {
			unsigned long offset;

			offset = (addr & ~PAGEMAP_WALK_MASK) >>
					PAGE_SHIFT;
793 794
			thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
			err = add_to_pagemap(addr, &pme, pm);
795 796
			if (err)
				break;
797 798
		}
		spin_unlock(&walk->mm->page_table_lock);
799
		return err;
800 801
	}

802 803
	if (pmd_trans_unstable(pmd))
		return 0;
804
	for (; addr != end; addr += PAGE_SIZE) {
805 806 807

		/* check to see if we've left 'vma' behind
		 * and need a new, higher one */
808
		if (vma && (addr >= vma->vm_end)) {
809
			vma = find_vma(walk->mm, addr);
810 811
			pme = make_pme(PM_NOT_PRESENT);
		}
812 813 814 815 816 817

		/* check that 'vma' actually covers this address,
		 * and that it isn't a huge page vma */
		if (vma && (vma->vm_start <= addr) &&
		    !is_vm_hugetlb_page(vma)) {
			pte = pte_offset_map(pmd, addr);
818
			pte_to_pagemap_entry(&pme, *pte);
819 820 821
			/* unmap before userspace copy */
			pte_unmap(pte);
		}
822
		err = add_to_pagemap(addr, &pme, pm);
823 824 825 826 827 828 829 830 831
		if (err)
			return err;
	}

	cond_resched();

	return err;
}

832
#ifdef CONFIG_HUGETLB_PAGE
833 834
static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
					pte_t pte, int offset)
835 836
{
	if (pte_present(pte))
837 838
		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
				| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
839 840
	else
		*pme = make_pme(PM_NOT_PRESENT);
841 842
}

843 844 845 846
/* This function walks within one hugetlb entry in the single call */
static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
				 unsigned long addr, unsigned long end,
				 struct mm_walk *walk)
847 848 849
{
	struct pagemapread *pm = walk->private;
	int err = 0;
850
	pagemap_entry_t pme;
851 852

	for (; addr != end; addr += PAGE_SIZE) {
853
		int offset = (addr & ~hmask) >> PAGE_SHIFT;
854 855
		huge_pte_to_pagemap_entry(&pme, *pte, offset);
		err = add_to_pagemap(addr, &pme, pm);
856 857 858 859 860 861 862 863
		if (err)
			return err;
	}

	cond_resched();

	return err;
}
864
#endif /* HUGETLB_PAGE */
865

866 867 868
/*
 * /proc/pid/pagemap - an array mapping virtual pages to pfns
 *
869 870 871 872 873 874 875 876 877 878 879 880 881 882
 * For each page in the address space, this file contains one 64-bit entry
 * consisting of the following:
 *
 * Bits 0-55  page frame number (PFN) if present
 * Bits 0-4   swap type if swapped
 * Bits 5-55  swap offset if swapped
 * Bits 55-60 page shift (page size = 1<<page shift)
 * Bit  61    reserved for future use
 * Bit  62    page swapped
 * Bit  63    page present
 *
 * If the page is not present but in swap, then the PFN contains an
 * encoding of the swap file number and the page's offset into the
 * swap. Unmapped pages return a null PFN. This allows determining
883 884 885 886 887 888 889 890 891 892 893 894 895 896
 * precisely which pages are mapped (or in swap) and comparing mapped
 * pages between processes.
 *
 * Efficient users of this interface will use /proc/pid/maps to
 * determine which areas of memory are actually mapped and llseek to
 * skip over unmapped regions.
 */
static ssize_t pagemap_read(struct file *file, char __user *buf,
			    size_t count, loff_t *ppos)
{
	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
	struct mm_struct *mm;
	struct pagemapread pm;
	int ret = -ESRCH;
897
	struct mm_walk pagemap_walk = {};
898 899 900 901
	unsigned long src;
	unsigned long svpfn;
	unsigned long start_vaddr;
	unsigned long end_vaddr;
902
	int copied = 0;
903 904 905 906 907 908

	if (!task)
		goto out;

	ret = -EINVAL;
	/* file position must be aligned */
909
	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
910
		goto out_task;
911 912

	ret = 0;
913 914 915
	if (!count)
		goto out_task;

916 917
	pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
	pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
918
	ret = -ENOMEM;
919
	if (!pm.buffer)
920 921 922 923 924 925
		goto out_task;

	mm = mm_for_maps(task);
	ret = PTR_ERR(mm);
	if (!mm || IS_ERR(mm))
		goto out_free;
926

927 928
	pagemap_walk.pmd_entry = pagemap_pte_range;
	pagemap_walk.pte_hole = pagemap_pte_hole;
929
#ifdef CONFIG_HUGETLB_PAGE
930
	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
931
#endif
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
	pagemap_walk.mm = mm;
	pagemap_walk.private = &pm;

	src = *ppos;
	svpfn = src / PM_ENTRY_BYTES;
	start_vaddr = svpfn << PAGE_SHIFT;
	end_vaddr = TASK_SIZE_OF(task);

	/* watch out for wraparound */
	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
		start_vaddr = end_vaddr;

	/*
	 * The odds are that this will stop walking way
	 * before end_vaddr, because the length of the
	 * user buffer is tracked in "pm", and the walk
	 * will stop when we hit the end of the buffer.
	 */
950 951 952 953 954 955
	ret = 0;
	while (count && (start_vaddr < end_vaddr)) {
		int len;
		unsigned long end;

		pm.pos = 0;
956
		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
957 958 959 960 961 962 963 964 965
		/* overflow ? */
		if (end < start_vaddr || end > end_vaddr)
			end = end_vaddr;
		down_read(&mm->mmap_sem);
		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
		up_read(&mm->mmap_sem);
		start_vaddr = end;

		len = min(count, PM_ENTRY_BYTES * pm.pos);
966
		if (copy_to_user(buf, pm.buffer, len)) {
967
			ret = -EFAULT;
968
			goto out_mm;
969 970 971 972
		}
		copied += len;
		buf += len;
		count -= len;
973
	}
974 975 976 977
	*ppos += copied;
	if (!ret || ret == PM_END_OF_BUFFER)
		ret = copied;

978 979
out_mm:
	mmput(mm);
980 981
out_free:
	kfree(pm.buffer);
982 983 984 985 986 987 988 989 990 991
out_task:
	put_task_struct(task);
out:
	return ret;
}

const struct file_operations proc_pagemap_operations = {
	.llseek		= mem_lseek, /* borrow this */
	.read		= pagemap_read,
};
992
#endif /* CONFIG_PROC_PAGE_MONITOR */
993

994 995
#ifdef CONFIG_NUMA

996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
struct numa_maps {
	struct vm_area_struct *vma;
	unsigned long pages;
	unsigned long anon;
	unsigned long active;
	unsigned long writeback;
	unsigned long mapcount_max;
	unsigned long dirty;
	unsigned long swapcache;
	unsigned long node[MAX_NUMNODES];
};

1008 1009 1010 1011 1012
struct numa_maps_private {
	struct proc_maps_private proc_maps;
	struct numa_maps md;
};

1013 1014
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
			unsigned long nr_pages)
1015 1016 1017
{
	int count = page_mapcount(page);

1018
	md->pages += nr_pages;
1019
	if (pte_dirty || PageDirty(page))
1020
		md->dirty += nr_pages;
1021 1022

	if (PageSwapCache(page))
1023
		md->swapcache += nr_pages;
1024 1025

	if (PageActive(page) || PageUnevictable(page))
1026
		md->active += nr_pages;
1027 1028

	if (PageWriteback(page))
1029
		md->writeback += nr_pages;
1030 1031

	if (PageAnon(page))
1032
		md->anon += nr_pages;
1033 1034 1035 1036

	if (count > md->mapcount_max)
		md->mapcount_max = count;

1037
	md->node[page_to_nid(page)] += nr_pages;
1038 1039
}

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
		unsigned long addr)
{
	struct page *page;
	int nid;

	if (!pte_present(pte))
		return NULL;

	page = vm_normal_page(vma, addr, pte);
	if (!page)
		return NULL;

	if (PageReserved(page))
		return NULL;

	nid = page_to_nid(page);
	if (!node_isset(nid, node_states[N_HIGH_MEMORY]))
		return NULL;

	return page;
}

1063 1064 1065 1066 1067 1068 1069 1070 1071
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
		unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	spinlock_t *ptl;
	pte_t *orig_pte;
	pte_t *pte;

	md = walk->private;
1072 1073 1074 1075 1076 1077 1078 1079 1080

	if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
		pte_t huge_pte = *(pte_t *)pmd;
		struct page *page;

		page = can_gather_numa_stats(huge_pte, md->vma, addr);
		if (page)
			gather_stats(page, md, pte_dirty(huge_pte),
				     HPAGE_PMD_SIZE/PAGE_SIZE);
1081
		spin_unlock(&walk->mm->page_table_lock);
1082
		return 0;
1083 1084
	}

1085 1086
	if (pmd_trans_unstable(pmd))
		return 0;
1087 1088
	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
	do {
1089
		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1090 1091
		if (!page)
			continue;
1092
		gather_stats(page, md, pte_dirty(*pte), 1);
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112

	} while (pte++, addr += PAGE_SIZE, addr != end);
	pte_unmap_unlock(orig_pte, ptl);
	return 0;
}
#ifdef CONFIG_HUGETLB_PAGE
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	struct numa_maps *md;
	struct page *page;

	if (pte_none(*pte))
		return 0;

	page = pte_page(*pte);
	if (!page)
		return 0;

	md = walk->private;
1113
	gather_stats(page, md, pte_dirty(*pte), 1);
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	return 0;
}

#else
static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
		unsigned long addr, unsigned long end, struct mm_walk *walk)
{
	return 0;
}
#endif

/*
 * Display pages allocated per node and memory policy via /proc.
 */
1128
static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1129
{
1130 1131
	struct numa_maps_private *numa_priv = m->private;
	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1132
	struct vm_area_struct *vma = v;
1133
	struct numa_maps *md = &numa_priv->md;
1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
	struct file *file = vma->vm_file;
	struct mm_struct *mm = vma->vm_mm;
	struct mm_walk walk = {};
	struct mempolicy *pol;
	int n;
	char buffer[50];

	if (!mm)
		return 0;

1144 1145
	/* Ensure we start with an empty set of numa_maps statistics. */
	memset(md, 0, sizeof(*md));
1146 1147 1148 1149 1150 1151 1152 1153

	md->vma = vma;

	walk.hugetlb_entry = gather_hugetbl_stats;
	walk.pmd_entry = gather_pte_stats;
	walk.private = md;
	walk.mm = mm;

1154
	pol = get_vma_policy(proc_priv->task, vma, vma->vm_start);
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	mpol_to_str(buffer, sizeof(buffer), pol, 0);
	mpol_cond_put(pol);

	seq_printf(m, "%08lx %s", vma->vm_start, buffer);

	if (file) {
		seq_printf(m, " file=");
		seq_path(m, &file->f_path, "\n\t= ");
	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
		seq_printf(m, " heap");
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	} else {
		pid_t tid = vm_is_stack(proc_priv->task, vma, is_pid);
		if (tid != 0) {
			/*
			 * Thread stack in /proc/PID/task/TID/maps or
			 * the main process stack.
			 */
			if (!is_pid || (vma->vm_start <= mm->start_stack &&
			    vma->vm_end >= mm->start_stack))
				seq_printf(m, " stack");
			else
				seq_printf(m, " stack:%d", tid);
		}
1178 1179
	}

1180 1181 1182
	if (is_vm_hugetlb_page(vma))
		seq_printf(m, " huge");

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	walk_page_range(vma->vm_start, vma->vm_end, &walk);

	if (!md->pages)
		goto out;

	if (md->anon)
		seq_printf(m, " anon=%lu", md->anon);

	if (md->dirty)
		seq_printf(m, " dirty=%lu", md->dirty);

	if (md->pages != md->anon && md->pages != md->dirty)
		seq_printf(m, " mapped=%lu", md->pages);

	if (md->mapcount_max > 1)
		seq_printf(m, " mapmax=%lu", md->mapcount_max);

	if (md->swapcache)
		seq_printf(m, " swapcache=%lu", md->swapcache);

	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
		seq_printf(m, " active=%lu", md->active);

	if (md->writeback)
		seq_printf(m, " writeback=%lu", md->writeback);

	for_each_node_state(n, N_HIGH_MEMORY)
		if (md->node[n])
			seq_printf(m, " N%d=%lu", n, md->node[n]);
out:
	seq_putc(m, '\n');

	if (m->count < m->size)
1216
		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1217 1218
	return 0;
}
1219

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
static int show_pid_numa_map(struct seq_file *m, void *v)
{
	return show_numa_map(m, v, 1);
}

static int show_tid_numa_map(struct seq_file *m, void *v)
{
	return show_numa_map(m, v, 0);
}

1230
static const struct seq_operations proc_pid_numa_maps_op = {
1231 1232 1233 1234
	.start  = m_start,
	.next   = m_next,
	.stop   = m_stop,
	.show   = show_pid_numa_map,
1235
};
1236

1237 1238 1239 1240 1241 1242 1243 1244 1245
static const struct seq_operations proc_tid_numa_maps_op = {
	.start  = m_start,
	.next   = m_next,
	.stop   = m_stop,
	.show   = show_tid_numa_map,
};

static int numa_maps_open(struct inode *inode, struct file *file,
			  const struct seq_operations *ops)
1246
{
1247 1248 1249 1250 1251
	struct numa_maps_private *priv;
	int ret = -ENOMEM;
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (priv) {
		priv->proc_maps.pid = proc_pid(inode);
1252
		ret = seq_open(file, ops);
1253 1254 1255 1256 1257 1258 1259 1260
		if (!ret) {
			struct seq_file *m = file->private_data;
			m->private = priv;
		} else {
			kfree(priv);
		}
	}
	return ret;
1261 1262
}

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
static int pid_numa_maps_open(struct inode *inode, struct file *file)
{
	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
}

static int tid_numa_maps_open(struct inode *inode, struct file *file)
{
	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
}

const struct file_operations proc_pid_numa_maps_operations = {
	.open		= pid_numa_maps_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

const struct file_operations proc_tid_numa_maps_operations = {
	.open		= tid_numa_maps_open,
1282 1283
	.read		= seq_read,
	.llseek		= seq_lseek,
1284
	.release	= seq_release_private,
1285
};
1286
#endif /* CONFIG_NUMA */