nommu.c 48.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *  linux/mm/nommu.c
 *
 *  Replacement code for mm functions to support CPU's that don't
 *  have any form of memory management unit (thus no virtual memory).
 *
 *  See Documentation/nommu-mmap.txt
 *
9
 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
L
Linus Torvalds 已提交
10 11 12
 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13
 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
L
Linus Torvalds 已提交
14 15
 */

16 17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

18
#include <linux/export.h>
L
Linus Torvalds 已提交
19
#include <linux/mm.h>
D
Davidlohr Bueso 已提交
20
#include <linux/vmacache.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
30
#include <linux/compiler.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
35
#include <linux/audit.h>
36
#include <linux/printk.h>
L
Linus Torvalds 已提交
37 38 39 40

#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
41
#include <asm/mmu_context.h>
42 43
#include "internal.h"

L
Linus Torvalds 已提交
44
void *high_memory;
45
EXPORT_SYMBOL(high_memory);
L
Linus Torvalds 已提交
46 47
struct page *mem_map;
unsigned long max_mapnr;
48
EXPORT_SYMBOL(max_mapnr);
H
Hugh Dickins 已提交
49
unsigned long highest_memmap_pfn;
50
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
L
Linus Torvalds 已提交
51 52
int heap_stack_gap = 0;

53
atomic_long_t mmap_pages_allocated;
54

L
Linus Torvalds 已提交
55 56
EXPORT_SYMBOL(mem_map);

57 58 59 60
/* list of mapped, potentially shareable regions */
static struct kmem_cache *vm_region_jar;
struct rb_root nommu_region_tree = RB_ROOT;
DECLARE_RWSEM(nommu_region_sem);
L
Linus Torvalds 已提交
61

62
const struct vm_operations_struct generic_file_vm_ops = {
L
Linus Torvalds 已提交
63 64 65 66 67 68 69 70 71 72 73 74
};

/*
 * Return the total memory allocated for this pointer, not
 * just what the caller asked for.
 *
 * Doesn't have to be accurate, i.e. may have races.
 */
unsigned int kobjsize(const void *objp)
{
	struct page *page;

75 76 77 78
	/*
	 * If the object we have should not have ksize performed on it,
	 * return size of 0
	 */
79
	if (!objp || !virt_addr_valid(objp))
80 81 82 83 84 85 86 87
		return 0;

	page = virt_to_head_page(objp);

	/*
	 * If the allocator sets PageSlab, we know the pointer came from
	 * kmalloc().
	 */
L
Linus Torvalds 已提交
88 89 90
	if (PageSlab(page))
		return ksize(objp);

91 92 93 94 95 96 97 98 99 100 101 102 103 104
	/*
	 * If it's not a compound page, see if we have a matching VMA
	 * region. This test is intentionally done in reverse order,
	 * so if there's no VMA, we still fall through and hand back
	 * PAGE_SIZE for 0-order pages.
	 */
	if (!PageCompound(page)) {
		struct vm_area_struct *vma;

		vma = find_vma(current->mm, (unsigned long)objp);
		if (vma)
			return vma->vm_end - vma->vm_start;
	}

105 106
	/*
	 * The ksize() function is only guaranteed to work for pointers
107
	 * returned by kmalloc(). So handle arbitrary pointers here.
108
	 */
109
	return PAGE_SIZE << compound_order(page);
L
Linus Torvalds 已提交
110 111
}

112 113 114 115
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		      unsigned long start, unsigned long nr_pages,
		      unsigned int foll_flags, struct page **pages,
		      struct vm_area_struct **vmas, int *nonblocking)
L
Linus Torvalds 已提交
116
{
117
	struct vm_area_struct *vma;
118 119 120 121
	unsigned long vm_flags;
	int i;

	/* calculate required read or write permissions.
H
Hugh Dickins 已提交
122
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
123
	 */
H
Hugh Dickins 已提交
124 125 126 127
	vm_flags  = (foll_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (foll_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
L
Linus Torvalds 已提交
128

129
	for (i = 0; i < nr_pages; i++) {
130
		vma = find_vma(mm, start);
131 132 133 134
		if (!vma)
			goto finish_or_fault;

		/* protect what we can, including chardevs */
H
Hugh Dickins 已提交
135 136
		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
		    !(vm_flags & vma->vm_flags))
137
			goto finish_or_fault;
138

L
Linus Torvalds 已提交
139 140 141
		if (pages) {
			pages[i] = virt_to_page(start);
			if (pages[i])
142
				get_page(pages[i]);
L
Linus Torvalds 已提交
143 144
		}
		if (vmas)
145
			vmas[i] = vma;
146
		start = (start + PAGE_SIZE) & PAGE_MASK;
L
Linus Torvalds 已提交
147
	}
148 149 150 151 152

	return i;

finish_or_fault:
	return i ? : -EFAULT;
L
Linus Torvalds 已提交
153
}
N
Nick Piggin 已提交
154 155 156 157 158 159 160 161

/*
 * get a list of pages in an address range belonging to the specified process
 * and indicate the VMA that covers each page
 * - this is potentially dodgy as we may end incrementing the page count of a
 *   slab page or a secondary page from a compound page
 * - don't permit access to VMAs that don't support it, such as I/O mappings
 */
162
long get_user_pages(unsigned long start, unsigned long nr_pages,
163 164
		    int write, int force, struct page **pages,
		    struct vm_area_struct **vmas)
N
Nick Piggin 已提交
165 166 167 168
{
	int flags = 0;

	if (write)
H
Hugh Dickins 已提交
169
		flags |= FOLL_WRITE;
N
Nick Piggin 已提交
170
	if (force)
H
Hugh Dickins 已提交
171
		flags |= FOLL_FORCE;
N
Nick Piggin 已提交
172

173 174
	return __get_user_pages(current, current->mm, start, nr_pages, flags,
				pages, vmas, NULL);
N
Nick Piggin 已提交
175
}
176
EXPORT_SYMBOL(get_user_pages);
177

178
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
179 180
			    int write, int force, struct page **pages,
			    int *locked)
181
{
182
	return get_user_pages(start, nr_pages, write, force, pages, NULL);
183
}
184
EXPORT_SYMBOL(get_user_pages_locked);
185

186 187
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
			       unsigned long start, unsigned long nr_pages,
188
			       struct page **pages, unsigned int gup_flags)
189 190 191
{
	long ret;
	down_read(&mm->mmap_sem);
192 193
	ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
				NULL, NULL);
194 195 196
	up_read(&mm->mmap_sem);
	return ret;
}
197 198
EXPORT_SYMBOL(__get_user_pages_unlocked);

199
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
200
			     struct page **pages, unsigned int gup_flags)
201
{
202
	return __get_user_pages_unlocked(current, current->mm, start, nr_pages,
203
					 pages, gup_flags);
204
}
205
EXPORT_SYMBOL(get_user_pages_unlocked);
206

P
Paul Mundt 已提交
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
/**
 * follow_pfn - look up PFN at a user virtual address
 * @vma: memory mapping
 * @address: user virtual address
 * @pfn: location to store found PFN
 *
 * Only IO mappings and raw PFN mappings are allowed.
 *
 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 */
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn)
{
	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
		return -EINVAL;

	*pfn = address >> PAGE_SHIFT;
	return 0;
}
EXPORT_SYMBOL(follow_pfn);

228
LIST_HEAD(vmap_area_list);
L
Linus Torvalds 已提交
229

230
void vfree(const void *addr)
L
Linus Torvalds 已提交
231 232 233
{
	kfree(addr);
}
234
EXPORT_SYMBOL(vfree);
L
Linus Torvalds 已提交
235

A
Al Viro 已提交
236
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
L
Linus Torvalds 已提交
237 238
{
	/*
239 240
	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
	 * returns only a logical address.
L
Linus Torvalds 已提交
241
	 */
242
	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
L
Linus Torvalds 已提交
243
}
244
EXPORT_SYMBOL(__vmalloc);
L
Linus Torvalds 已提交
245

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
void *vmalloc_user(unsigned long size)
{
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	if (ret) {
		struct vm_area_struct *vma;

		down_write(&current->mm->mmap_sem);
		vma = find_vma(current->mm, (unsigned long)ret);
		if (vma)
			vma->vm_flags |= VM_USERMAP;
		up_write(&current->mm->mmap_sem);
	}

	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

266
struct page *vmalloc_to_page(const void *addr)
L
Linus Torvalds 已提交
267 268 269
{
	return virt_to_page(addr);
}
270
EXPORT_SYMBOL(vmalloc_to_page);
L
Linus Torvalds 已提交
271

272
unsigned long vmalloc_to_pfn(const void *addr)
L
Linus Torvalds 已提交
273 274 275
{
	return page_to_pfn(virt_to_page(addr));
}
276
EXPORT_SYMBOL(vmalloc_to_pfn);
L
Linus Torvalds 已提交
277 278 279

long vread(char *buf, char *addr, unsigned long count)
{
280 281 282 283
	/* Don't allow overflow */
	if ((unsigned long) buf + count < count)
		count = -(unsigned long) buf;

L
Linus Torvalds 已提交
284 285 286 287 288 289 290 291 292 293 294
	memcpy(buf, addr, count);
	return count;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	memcpy(addr, buf, count);
295
	return count;
L
Linus Torvalds 已提交
296 297 298
}

/*
299
 *	vmalloc  -  allocate virtually contiguous memory
L
Linus Torvalds 已提交
300 301 302 303
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
304
 *	allocator and map them into contiguous kernel virtual space.
L
Linus Torvalds 已提交
305
 *
306
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
307 308 309 310 311 312
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
313 314
EXPORT_SYMBOL(vmalloc);

315
/*
316
 *	vzalloc - allocate virtually contiguous memory with zero fill
317 318 319 320
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
321
 *	allocator and map them into contiguous kernel virtual space.
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
 *	The memory allocated is set to zero.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */
void *vzalloc(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
}
EXPORT_SYMBOL(vzalloc);

/**
 * vmalloc_node - allocate memory on a specific node
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
345 346 347 348
void *vmalloc_node(unsigned long size, int node)
{
	return vmalloc(size);
}
349
EXPORT_SYMBOL(vmalloc_node);
350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367

/**
 * vzalloc_node - allocate memory on a specific node with zero fill
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 * The memory allocated is set to zero.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
void *vzalloc_node(unsigned long size, int node)
{
	return vzalloc(size);
}
EXPORT_SYMBOL(vzalloc_node);
L
Linus Torvalds 已提交
368

P
Paul Mundt 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

390 391
/**
 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
L
Linus Torvalds 已提交
392 393 394
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
395
 *	page level allocator and map them into contiguous kernel virtual space.
L
Linus Torvalds 已提交
396 397 398 399 400
 */
void *vmalloc_32(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}
401 402 403 404 405 406 407 408
EXPORT_SYMBOL(vmalloc_32);

/**
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 *	@size:		allocation size
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
409 410 411
 *
 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 * remap_vmalloc_range() are permissible.
412 413 414
 */
void *vmalloc_32_user(unsigned long size)
{
415 416 417 418 419
	/*
	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
	 * but for now this can simply use vmalloc_user() directly.
	 */
	return vmalloc_user(size);
420 421
}
EXPORT_SYMBOL(vmalloc_32_user);
L
Linus Torvalds 已提交
422 423 424 425 426 427

void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
{
	BUG();
	return NULL;
}
428
EXPORT_SYMBOL(vmap);
L
Linus Torvalds 已提交
429

430
void vunmap(const void *addr)
L
Linus Torvalds 已提交
431 432 433
{
	BUG();
}
434
EXPORT_SYMBOL(vunmap);
L
Linus Torvalds 已提交
435

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL(vm_map_ram);

void vm_unmap_ram(const void *mem, unsigned int count)
{
	BUG();
}
EXPORT_SYMBOL(vm_unmap_ram);

void vm_unmap_aliases(void)
{
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

454 455 456 457
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
458
void __weak vmalloc_sync_all(void)
459 460 461
{
}

462 463 464 465 466 467 468 469 470 471 472 473
/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
 *
 *	Returns:	NULL on failure, vm_struct on success
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
474
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
475 476 477 478 479 480 481 482 483 484 485 486
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	BUG();
}
EXPORT_SYMBOL_GPL(free_vm_area);

487 488 489 490 491 492 493
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
		   struct page *page)
{
	return -EINVAL;
}
EXPORT_SYMBOL(vm_insert_page);

L
Linus Torvalds 已提交
494 495 496 497 498 499 500
/*
 *  sys_brk() for the most part doesn't need the global kernel
 *  lock, except when an application is doing something nasty
 *  like trying to un-brk an area that has already been mapped
 *  to a regular file.  in this case, the unmapping will need
 *  to invoke file system routines that need the global lock.
 */
501
SYSCALL_DEFINE1(brk, unsigned long, brk)
L
Linus Torvalds 已提交
502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
{
	struct mm_struct *mm = current->mm;

	if (brk < mm->start_brk || brk > mm->context.end_brk)
		return mm->brk;

	if (mm->brk == brk)
		return mm->brk;

	/*
	 * Always allow shrinking brk
	 */
	if (brk <= mm->brk) {
		mm->brk = brk;
		return brk;
	}

	/*
	 * Ok, looks good - let it rip.
	 */
522
	flush_icache_range(mm->brk, brk);
L
Linus Torvalds 已提交
523 524 525
	return mm->brk = brk;
}

526 527 528 529
/*
 * initialise the VMA and region record slabs
 */
void __init mmap_init(void)
L
Linus Torvalds 已提交
530
{
531 532
	int ret;

533
	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
534
	VM_BUG_ON(ret);
535
	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
L
Linus Torvalds 已提交
536 537
}

538
/*
539 540
 * validate the region tree
 * - the caller must hold the region lock
541
 */
542 543
#ifdef CONFIG_DEBUG_NOMMU_REGIONS
static noinline void validate_nommu_regions(void)
544
{
545 546
	struct vm_region *region, *last;
	struct rb_node *p, *lastp;
547

548 549 550 551 552
	lastp = rb_first(&nommu_region_tree);
	if (!lastp)
		return;

	last = rb_entry(lastp, struct vm_region, vm_rb);
553 554
	BUG_ON(last->vm_end <= last->vm_start);
	BUG_ON(last->vm_top < last->vm_end);
555 556 557 558 559

	while ((p = rb_next(lastp))) {
		region = rb_entry(p, struct vm_region, vm_rb);
		last = rb_entry(lastp, struct vm_region, vm_rb);

560 561 562
		BUG_ON(region->vm_end <= region->vm_start);
		BUG_ON(region->vm_top < region->vm_end);
		BUG_ON(region->vm_start < last->vm_top);
563

564 565
		lastp = p;
	}
566
}
567
#else
568 569 570
static void validate_nommu_regions(void)
{
}
571
#endif
572 573

/*
574
 * add a region into the global tree
575
 */
576
static void add_nommu_region(struct vm_region *region)
577
{
578 579
	struct vm_region *pregion;
	struct rb_node **p, *parent;
580

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	validate_nommu_regions();

	parent = NULL;
	p = &nommu_region_tree.rb_node;
	while (*p) {
		parent = *p;
		pregion = rb_entry(parent, struct vm_region, vm_rb);
		if (region->vm_start < pregion->vm_start)
			p = &(*p)->rb_left;
		else if (region->vm_start > pregion->vm_start)
			p = &(*p)->rb_right;
		else if (pregion == region)
			return;
		else
			BUG();
596 597
	}

598 599
	rb_link_node(&region->vm_rb, parent, p);
	rb_insert_color(&region->vm_rb, &nommu_region_tree);
600

601
	validate_nommu_regions();
602 603
}

604
/*
605
 * delete a region from the global tree
606
 */
607
static void delete_nommu_region(struct vm_region *region)
608
{
609
	BUG_ON(!nommu_region_tree.rb_node);
610

611 612 613
	validate_nommu_regions();
	rb_erase(&region->vm_rb, &nommu_region_tree);
	validate_nommu_regions();
614 615
}

616
/*
617
 * free a contiguous series of pages
618
 */
619
static void free_page_series(unsigned long from, unsigned long to)
620
{
621 622 623
	for (; from < to; from += PAGE_SIZE) {
		struct page *page = virt_to_page(from);

624
		atomic_long_dec(&mmap_pages_allocated);
625
		put_page(page);
626 627 628
	}
}

629
/*
630
 * release a reference to a region
631
 * - the caller must hold the region semaphore for writing, which this releases
632
 * - the region may not have been added to the tree yet, in which case vm_top
633
 *   will equal vm_start
634
 */
635 636
static void __put_nommu_region(struct vm_region *region)
	__releases(nommu_region_sem)
L
Linus Torvalds 已提交
637
{
638
	BUG_ON(!nommu_region_tree.rb_node);
L
Linus Torvalds 已提交
639

640
	if (--region->vm_usage == 0) {
641
		if (region->vm_top > region->vm_start)
642 643 644 645 646 647 648 649
			delete_nommu_region(region);
		up_write(&nommu_region_sem);

		if (region->vm_file)
			fput(region->vm_file);

		/* IO memory and memory shared directly out of the pagecache
		 * from ramfs/tmpfs mustn't be released here */
650
		if (region->vm_flags & VM_MAPPED_COPY)
651
			free_page_series(region->vm_start, region->vm_top);
652 653 654
		kmem_cache_free(vm_region_jar, region);
	} else {
		up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
655
	}
656
}
L
Linus Torvalds 已提交
657

658 659 660 661 662 663 664
/*
 * release a reference to a region
 */
static void put_nommu_region(struct vm_region *region)
{
	down_write(&nommu_region_sem);
	__put_nommu_region(region);
L
Linus Torvalds 已提交
665 666
}

667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
/*
 * update protection on a vma
 */
static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
{
#ifdef CONFIG_MPU
	struct mm_struct *mm = vma->vm_mm;
	long start = vma->vm_start & PAGE_MASK;
	while (start < vma->vm_end) {
		protect_page(mm, start, flags);
		start += PAGE_SIZE;
	}
	update_protections(mm);
#endif
}

683
/*
684 685 686 687
 * add a VMA into a process's mm_struct in the appropriate place in the list
 * and tree and add to the address space's page tree also if not an anonymous
 * page
 * - should be called with mm->mmap_sem held writelocked
688
 */
689
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
690
{
691
	struct vm_area_struct *pvma, *prev;
L
Linus Torvalds 已提交
692
	struct address_space *mapping;
693
	struct rb_node **p, *parent, *rb_prev;
694 695 696 697 698

	BUG_ON(!vma->vm_region);

	mm->map_count++;
	vma->vm_mm = mm;
L
Linus Torvalds 已提交
699

700 701
	protect_vma(vma, vma->vm_flags);

L
Linus Torvalds 已提交
702 703 704 705
	/* add the VMA to the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

706
		i_mmap_lock_write(mapping);
L
Linus Torvalds 已提交
707
		flush_dcache_mmap_lock(mapping);
708
		vma_interval_tree_insert(vma, &mapping->i_mmap);
L
Linus Torvalds 已提交
709
		flush_dcache_mmap_unlock(mapping);
710
		i_mmap_unlock_write(mapping);
L
Linus Torvalds 已提交
711 712
	}

713
	/* add the VMA to the tree */
714
	parent = rb_prev = NULL;
715
	p = &mm->mm_rb.rb_node;
L
Linus Torvalds 已提交
716 717 718 719
	while (*p) {
		parent = *p;
		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);

720 721 722
		/* sort by: start addr, end addr, VMA struct addr in that order
		 * (the latter is necessary as we may get identical VMAs) */
		if (vma->vm_start < pvma->vm_start)
L
Linus Torvalds 已提交
723
			p = &(*p)->rb_left;
724 725
		else if (vma->vm_start > pvma->vm_start) {
			rb_prev = parent;
L
Linus Torvalds 已提交
726
			p = &(*p)->rb_right;
727
		} else if (vma->vm_end < pvma->vm_end)
728
			p = &(*p)->rb_left;
729 730
		else if (vma->vm_end > pvma->vm_end) {
			rb_prev = parent;
731
			p = &(*p)->rb_right;
732
		} else if (vma < pvma)
733
			p = &(*p)->rb_left;
734 735
		else if (vma > pvma) {
			rb_prev = parent;
736
			p = &(*p)->rb_right;
737
		} else
738
			BUG();
L
Linus Torvalds 已提交
739 740 741
	}

	rb_link_node(&vma->vm_rb, parent, p);
742 743 744
	rb_insert_color(&vma->vm_rb, &mm->mm_rb);

	/* add VMA to the VMA list also */
745 746 747
	prev = NULL;
	if (rb_prev)
		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
748

749
	__vma_link_list(mm, vma, prev, parent);
L
Linus Torvalds 已提交
750 751
}

752
/*
753
 * delete a VMA from its owning mm_struct and address space
754
 */
755
static void delete_vma_from_mm(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
756
{
D
Davidlohr Bueso 已提交
757
	int i;
L
Linus Torvalds 已提交
758
	struct address_space *mapping;
759
	struct mm_struct *mm = vma->vm_mm;
D
Davidlohr Bueso 已提交
760
	struct task_struct *curr = current;
761

762 763
	protect_vma(vma, 0);

764
	mm->map_count--;
D
Davidlohr Bueso 已提交
765 766 767
	for (i = 0; i < VMACACHE_SIZE; i++) {
		/* if the vma is cached, invalidate the entire cache */
		if (curr->vmacache[i] == vma) {
768
			vmacache_invalidate(mm);
D
Davidlohr Bueso 已提交
769 770 771
			break;
		}
	}
L
Linus Torvalds 已提交
772 773 774 775 776

	/* remove the VMA from the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

777
		i_mmap_lock_write(mapping);
L
Linus Torvalds 已提交
778
		flush_dcache_mmap_lock(mapping);
779
		vma_interval_tree_remove(vma, &mapping->i_mmap);
L
Linus Torvalds 已提交
780
		flush_dcache_mmap_unlock(mapping);
781
		i_mmap_unlock_write(mapping);
L
Linus Torvalds 已提交
782 783
	}

784 785
	/* remove from the MM's tree and list */
	rb_erase(&vma->vm_rb, &mm->mm_rb);
786 787 788 789 790 791 792 793

	if (vma->vm_prev)
		vma->vm_prev->vm_next = vma->vm_next;
	else
		mm->mmap = vma->vm_next;

	if (vma->vm_next)
		vma->vm_next->vm_prev = vma->vm_prev;
794 795 796 797 798 799 800 801 802
}

/*
 * destroy a VMA record
 */
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
803
	if (vma->vm_file)
804 805 806 807 808 809 810 811 812 813 814 815 816 817
		fput(vma->vm_file);
	put_nommu_region(vma->vm_region);
	kmem_cache_free(vm_area_cachep, vma);
}

/*
 * look up the first VMA in which addr resides, NULL if none
 * - should be called with mm->mmap_sem at least held readlocked
 */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma;

	/* check the cache first */
D
Davidlohr Bueso 已提交
818 819
	vma = vmacache_find(mm, addr);
	if (likely(vma))
820 821
		return vma;

822
	/* trawl the list (there may be multiple mappings in which addr
823
	 * resides) */
824
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
825 826 827
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end > addr) {
D
Davidlohr Bueso 已提交
828
			vmacache_update(addr, vma);
829 830 831 832 833 834 835 836 837 838 839 840 841 842
			return vma;
		}
	}

	return NULL;
}
EXPORT_SYMBOL(find_vma);

/*
 * find a VMA
 * - we don't extend stack VMAs under NOMMU conditions
 */
struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
843
	return find_vma(mm, addr);
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866
}

/*
 * expand a stack to a given address
 * - not supported under NOMMU conditions
 */
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
	return -ENOMEM;
}

/*
 * look up the first VMA exactly that exactly matches addr
 * - should be called with mm->mmap_sem at least held readlocked
 */
static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
					     unsigned long addr,
					     unsigned long len)
{
	struct vm_area_struct *vma;
	unsigned long end = addr + len;

	/* check the cache first */
D
Davidlohr Bueso 已提交
867 868
	vma = vmacache_find_exact(mm, addr, end);
	if (vma)
869 870
		return vma;

871
	/* trawl the list (there may be multiple mappings in which addr
872
	 * resides) */
873
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
874 875 876 877 878
		if (vma->vm_start < addr)
			continue;
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end == end) {
D
Davidlohr Bueso 已提交
879
			vmacache_update(addr, vma);
880 881 882 883 884
			return vma;
		}
	}

	return NULL;
L
Linus Torvalds 已提交
885 886 887 888 889 890 891 892 893 894 895 896 897 898
}

/*
 * determine whether a mapping should be permitted and, if so, what sort of
 * mapping we're capable of supporting
 */
static int validate_mmap_request(struct file *file,
				 unsigned long addr,
				 unsigned long len,
				 unsigned long prot,
				 unsigned long flags,
				 unsigned long pgoff,
				 unsigned long *_capabilities)
{
899
	unsigned long capabilities, rlen;
L
Linus Torvalds 已提交
900 901 902
	int ret;

	/* do the simple checks first */
903
	if (flags & MAP_FIXED)
L
Linus Torvalds 已提交
904 905 906 907 908 909
		return -EINVAL;

	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
	    (flags & MAP_TYPE) != MAP_SHARED)
		return -EINVAL;

910
	if (!len)
L
Linus Torvalds 已提交
911 912
		return -EINVAL;

913
	/* Careful about overflows.. */
914 915
	rlen = PAGE_ALIGN(len);
	if (!rlen || rlen > TASK_SIZE)
916 917
		return -ENOMEM;

L
Linus Torvalds 已提交
918
	/* offset overflow? */
919
	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
920
		return -EOVERFLOW;
L
Linus Torvalds 已提交
921 922 923

	if (file) {
		/* files must support mmap */
A
Al Viro 已提交
924
		if (!file->f_op->mmap)
L
Linus Torvalds 已提交
925 926 927 928 929 930
			return -ENODEV;

		/* work out if what we've got could possibly be shared
		 * - we support chardevs that provide their own "memory"
		 * - we support files/blockdevs that are memory backed
		 */
931 932 933
		if (file->f_op->mmap_capabilities) {
			capabilities = file->f_op->mmap_capabilities(file);
		} else {
L
Linus Torvalds 已提交
934 935
			/* no explicit capabilities set, so assume some
			 * defaults */
A
Al Viro 已提交
936
			switch (file_inode(file)->i_mode & S_IFMT) {
L
Linus Torvalds 已提交
937 938
			case S_IFREG:
			case S_IFBLK:
939
				capabilities = NOMMU_MAP_COPY;
L
Linus Torvalds 已提交
940 941 942 943
				break;

			case S_IFCHR:
				capabilities =
944 945 946
					NOMMU_MAP_DIRECT |
					NOMMU_MAP_READ |
					NOMMU_MAP_WRITE;
L
Linus Torvalds 已提交
947 948 949 950 951 952 953 954 955 956
				break;

			default:
				return -EINVAL;
			}
		}

		/* eliminate any capabilities that we can't support on this
		 * device */
		if (!file->f_op->get_unmapped_area)
957
			capabilities &= ~NOMMU_MAP_DIRECT;
A
Al Viro 已提交
958
		if (!(file->f_mode & FMODE_CAN_READ))
959
			capabilities &= ~NOMMU_MAP_COPY;
L
Linus Torvalds 已提交
960

961 962 963 964
		/* The file shall have been opened with read permission. */
		if (!(file->f_mode & FMODE_READ))
			return -EACCES;

L
Linus Torvalds 已提交
965 966 967 968 969 970
		if (flags & MAP_SHARED) {
			/* do checks for writing, appending and locking */
			if ((prot & PROT_WRITE) &&
			    !(file->f_mode & FMODE_WRITE))
				return -EACCES;

A
Al Viro 已提交
971
			if (IS_APPEND(file_inode(file)) &&
L
Linus Torvalds 已提交
972 973 974
			    (file->f_mode & FMODE_WRITE))
				return -EACCES;

975
			if (locks_verify_locked(file))
L
Linus Torvalds 已提交
976 977
				return -EAGAIN;

978
			if (!(capabilities & NOMMU_MAP_DIRECT))
L
Linus Torvalds 已提交
979 980 981
				return -ENODEV;

			/* we mustn't privatise shared mappings */
982
			capabilities &= ~NOMMU_MAP_COPY;
983
		} else {
L
Linus Torvalds 已提交
984 985
			/* we're going to read the file into private memory we
			 * allocate */
986
			if (!(capabilities & NOMMU_MAP_COPY))
L
Linus Torvalds 已提交
987 988 989 990 991
				return -ENODEV;

			/* we don't permit a private writable mapping to be
			 * shared with the backing device */
			if (prot & PROT_WRITE)
992
				capabilities &= ~NOMMU_MAP_DIRECT;
L
Linus Torvalds 已提交
993 994
		}

995 996 997 998
		if (capabilities & NOMMU_MAP_DIRECT) {
			if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
			    ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
			    ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
999
			    ) {
1000
				capabilities &= ~NOMMU_MAP_DIRECT;
1001
				if (flags & MAP_SHARED) {
1002
					pr_warn("MAP_SHARED not completely supported on !MMU\n");
1003 1004 1005 1006 1007
					return -EINVAL;
				}
			}
		}

L
Linus Torvalds 已提交
1008 1009
		/* handle executable mappings and implied executable
		 * mappings */
1010
		if (path_noexec(&file->f_path)) {
L
Linus Torvalds 已提交
1011 1012
			if (prot & PROT_EXEC)
				return -EPERM;
1013
		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
L
Linus Torvalds 已提交
1014 1015
			/* handle implication of PROT_EXEC by PROT_READ */
			if (current->personality & READ_IMPLIES_EXEC) {
1016
				if (capabilities & NOMMU_MAP_EXEC)
L
Linus Torvalds 已提交
1017 1018
					prot |= PROT_EXEC;
			}
1019
		} else if ((prot & PROT_READ) &&
L
Linus Torvalds 已提交
1020
			 (prot & PROT_EXEC) &&
1021
			 !(capabilities & NOMMU_MAP_EXEC)
L
Linus Torvalds 已提交
1022 1023
			 ) {
			/* backing file is not executable, try to copy */
1024
			capabilities &= ~NOMMU_MAP_DIRECT;
L
Linus Torvalds 已提交
1025
		}
1026
	} else {
L
Linus Torvalds 已提交
1027 1028 1029
		/* anonymous mappings are always memory backed and can be
		 * privately mapped
		 */
1030
		capabilities = NOMMU_MAP_COPY;
L
Linus Torvalds 已提交
1031 1032 1033 1034 1035 1036 1037 1038

		/* handle PROT_EXEC implication by PROT_READ */
		if ((prot & PROT_READ) &&
		    (current->personality & READ_IMPLIES_EXEC))
			prot |= PROT_EXEC;
	}

	/* allow the security API to have its say */
1039
	ret = security_mmap_addr(addr);
L
Linus Torvalds 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	if (ret < 0)
		return ret;

	/* looks okay */
	*_capabilities = capabilities;
	return 0;
}

/*
 * we've determined that we can make the mapping, now translate what we
 * now know into VMA flags
 */
static unsigned long determine_vm_flags(struct file *file,
					unsigned long prot,
					unsigned long flags,
					unsigned long capabilities)
{
	unsigned long vm_flags;

1059
	vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
L
Linus Torvalds 已提交
1060 1061
	/* vm_flags |= mm->def_flags; */

1062
	if (!(capabilities & NOMMU_MAP_DIRECT)) {
L
Linus Torvalds 已提交
1063
		/* attempt to share read-only copies of mapped file chunks */
1064
		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
L
Linus Torvalds 已提交
1065 1066
		if (file && !(prot & PROT_WRITE))
			vm_flags |= VM_MAYSHARE;
1067
	} else {
L
Linus Torvalds 已提交
1068 1069 1070
		/* overlay a shareable mapping on the backing device or inode
		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
		 * romfs/cramfs */
1071
		vm_flags |= VM_MAYSHARE | (capabilities & NOMMU_VMFLAGS);
L
Linus Torvalds 已提交
1072
		if (flags & MAP_SHARED)
1073
			vm_flags |= VM_SHARED;
L
Linus Torvalds 已提交
1074 1075 1076 1077 1078 1079
	}

	/* refuse to let anyone share private mappings with this process if
	 * it's being traced - otherwise breakpoints set in it may interfere
	 * with another untraced process
	 */
T
Tejun Heo 已提交
1080
	if ((flags & MAP_PRIVATE) && current->ptrace)
L
Linus Torvalds 已提交
1081 1082 1083 1084 1085 1086
		vm_flags &= ~VM_MAYSHARE;

	return vm_flags;
}

/*
1087 1088
 * set up a shared mapping on a file (the driver or filesystem provides and
 * pins the storage)
L
Linus Torvalds 已提交
1089
 */
1090
static int do_mmap_shared_file(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
1091 1092 1093 1094
{
	int ret;

	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1095 1096
	if (ret == 0) {
		vma->vm_region->vm_top = vma->vm_region->vm_end;
1097
		return 0;
1098
	}
L
Linus Torvalds 已提交
1099 1100 1101
	if (ret != -ENOSYS)
		return ret;

1102 1103 1104
	/* getting -ENOSYS indicates that direct mmap isn't possible (as
	 * opposed to tried but failed) so we can only give a suitable error as
	 * it's not possible to make a private copy if MAP_SHARED was given */
L
Linus Torvalds 已提交
1105 1106 1107 1108 1109 1110
	return -ENODEV;
}

/*
 * set up a private mapping or an anonymous shared mapping
 */
1111 1112
static int do_mmap_private(struct vm_area_struct *vma,
			   struct vm_region *region,
1113 1114
			   unsigned long len,
			   unsigned long capabilities)
L
Linus Torvalds 已提交
1115
{
1116
	unsigned long total, point;
L
Linus Torvalds 已提交
1117
	void *base;
1118
	int ret, order;
L
Linus Torvalds 已提交
1119 1120 1121 1122 1123

	/* invoke the file's mapping function so that it can keep track of
	 * shared mappings on devices or memory
	 * - VM_MAYSHARE will be set if it may attempt to share
	 */
1124
	if (capabilities & NOMMU_MAP_DIRECT) {
L
Linus Torvalds 已提交
1125
		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1126
		if (ret == 0) {
L
Linus Torvalds 已提交
1127
			/* shouldn't return success if we're not sharing */
1128 1129
			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
			vma->vm_region->vm_top = vma->vm_region->vm_end;
1130
			return 0;
L
Linus Torvalds 已提交
1131
		}
1132 1133
		if (ret != -ENOSYS)
			return ret;
L
Linus Torvalds 已提交
1134 1135 1136 1137 1138 1139

		/* getting an ENOSYS error indicates that direct mmap isn't
		 * possible (as opposed to tried but failed) so we'll try to
		 * make a private copy of the data and map that instead */
	}

1140

L
Linus Torvalds 已提交
1141 1142 1143 1144
	/* allocate some memory to hold the mapping
	 * - note that this may not return a page-aligned address if the object
	 *   we're allocating is smaller than a page
	 */
B
Bob Liu 已提交
1145
	order = get_order(len);
1146
	total = 1 << order;
B
Bob Liu 已提交
1147
	point = len >> PAGE_SHIFT;
1148

1149
	/* we don't want to allocate a power-of-2 sized page set */
1150
	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
1151
		total = point;
1152

J
Joonsoo Kim 已提交
1153
	base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
1154 1155 1156 1157
	if (!base)
		goto enomem;

	atomic_long_add(total, &mmap_pages_allocated);
L
Linus Torvalds 已提交
1158

1159 1160
	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
	region->vm_start = (unsigned long) base;
B
Bob Liu 已提交
1161
	region->vm_end   = region->vm_start + len;
1162
	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1163 1164 1165

	vma->vm_start = region->vm_start;
	vma->vm_end   = region->vm_start + len;
L
Linus Torvalds 已提交
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176

	if (vma->vm_file) {
		/* read the contents of a file into the copy */
		mm_segment_t old_fs;
		loff_t fpos;

		fpos = vma->vm_pgoff;
		fpos <<= PAGE_SHIFT;

		old_fs = get_fs();
		set_fs(KERNEL_DS);
A
Al Viro 已提交
1177
		ret = __vfs_read(vma->vm_file, base, len, &fpos);
L
Linus Torvalds 已提交
1178 1179 1180 1181 1182 1183
		set_fs(old_fs);

		if (ret < 0)
			goto error_free;

		/* clear the last little bit */
B
Bob Liu 已提交
1184 1185
		if (ret < len)
			memset(base + ret, 0, len - ret);
L
Linus Torvalds 已提交
1186 1187 1188 1189 1190 1191

	}

	return 0;

error_free:
1192
	free_page_series(region->vm_start, region->vm_top);
1193 1194
	region->vm_start = vma->vm_start = 0;
	region->vm_end   = vma->vm_end = 0;
1195
	region->vm_top   = 0;
L
Linus Torvalds 已提交
1196 1197 1198
	return ret;

enomem:
1199
	pr_err("Allocation of length %lu from process %d (%s) failed\n",
1200
	       len, current->pid, current->comm);
1201
	show_free_areas(0);
L
Linus Torvalds 已提交
1202 1203 1204 1205 1206 1207
	return -ENOMEM;
}

/*
 * handle mapping creation for uClinux
 */
1208 1209 1210 1211 1212 1213 1214 1215
unsigned long do_mmap(struct file *file,
			unsigned long addr,
			unsigned long len,
			unsigned long prot,
			unsigned long flags,
			vm_flags_t vm_flags,
			unsigned long pgoff,
			unsigned long *populate)
L
Linus Torvalds 已提交
1216
{
1217 1218
	struct vm_area_struct *vma;
	struct vm_region *region;
L
Linus Torvalds 已提交
1219
	struct rb_node *rb;
1220
	unsigned long capabilities, result;
L
Linus Torvalds 已提交
1221 1222
	int ret;

1223
	*populate = 0;
1224

L
Linus Torvalds 已提交
1225 1226 1227 1228
	/* decide whether we should attempt the mapping, and if so what sort of
	 * mapping */
	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
				    &capabilities);
1229
	if (ret < 0)
L
Linus Torvalds 已提交
1230 1231
		return ret;

1232 1233
	/* we ignore the address hint */
	addr = 0;
B
Bob Liu 已提交
1234
	len = PAGE_ALIGN(len);
1235

L
Linus Torvalds 已提交
1236 1237
	/* we've determined that we can make the mapping, now translate what we
	 * now know into VMA flags */
1238
	vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
L
Linus Torvalds 已提交
1239

1240 1241 1242 1243 1244 1245 1246 1247
	/* we're going to need to record the mapping */
	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		goto error_getting_region;

	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		goto error_getting_vma;
L
Linus Torvalds 已提交
1248

1249
	region->vm_usage = 1;
1250 1251 1252
	region->vm_flags = vm_flags;
	region->vm_pgoff = pgoff;

1253
	INIT_LIST_HEAD(&vma->anon_vma_chain);
1254 1255
	vma->vm_flags = vm_flags;
	vma->vm_pgoff = pgoff;
L
Linus Torvalds 已提交
1256

1257
	if (file) {
A
Al Viro 已提交
1258 1259
		region->vm_file = get_file(file);
		vma->vm_file = get_file(file);
1260 1261 1262 1263 1264
	}

	down_write(&nommu_region_sem);

	/* if we want to share, we need to check for regions created by other
L
Linus Torvalds 已提交
1265
	 * mmap() calls that overlap with our proposed mapping
1266
	 * - we can only share with a superset match on most regular files
L
Linus Torvalds 已提交
1267 1268 1269 1270 1271 1272
	 * - shared mappings on character devices and memory backed files are
	 *   permitted to overlap inexactly as far as we are concerned for in
	 *   these cases, sharing is handled in the driver or filesystem rather
	 *   than here
	 */
	if (vm_flags & VM_MAYSHARE) {
1273 1274
		struct vm_region *pregion;
		unsigned long pglen, rpglen, pgend, rpgend, start;
L
Linus Torvalds 已提交
1275

1276 1277
		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pgend = pgoff + pglen;
1278

1279 1280
		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
			pregion = rb_entry(rb, struct vm_region, vm_rb);
L
Linus Torvalds 已提交
1281

1282
			if (!(pregion->vm_flags & VM_MAYSHARE))
L
Linus Torvalds 已提交
1283 1284 1285
				continue;

			/* search for overlapping mappings on the same file */
A
Al Viro 已提交
1286 1287
			if (file_inode(pregion->vm_file) !=
			    file_inode(file))
L
Linus Torvalds 已提交
1288 1289
				continue;

1290
			if (pregion->vm_pgoff >= pgend)
L
Linus Torvalds 已提交
1291 1292
				continue;

1293 1294 1295 1296
			rpglen = pregion->vm_end - pregion->vm_start;
			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
			rpgend = pregion->vm_pgoff + rpglen;
			if (pgoff >= rpgend)
L
Linus Torvalds 已提交
1297 1298
				continue;

1299 1300 1301 1302 1303
			/* handle inexactly overlapping matches between
			 * mappings */
			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
				/* new mapping is not a subset of the region */
1304
				if (!(capabilities & NOMMU_MAP_DIRECT))
L
Linus Torvalds 已提交
1305 1306 1307 1308
					goto sharing_violation;
				continue;
			}

1309
			/* we've found a region we can share */
1310
			pregion->vm_usage++;
1311 1312 1313 1314 1315 1316
			vma->vm_region = pregion;
			start = pregion->vm_start;
			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
			vma->vm_start = start;
			vma->vm_end = start + len;

1317
			if (pregion->vm_flags & VM_MAPPED_COPY)
1318
				vma->vm_flags |= VM_MAPPED_COPY;
1319
			else {
1320 1321 1322 1323 1324
				ret = do_mmap_shared_file(vma);
				if (ret < 0) {
					vma->vm_region = NULL;
					vma->vm_start = 0;
					vma->vm_end = 0;
1325
					pregion->vm_usage--;
1326 1327 1328 1329 1330 1331 1332 1333 1334
					pregion = NULL;
					goto error_just_free;
				}
			}
			fput(region->vm_file);
			kmem_cache_free(vm_region_jar, region);
			region = pregion;
			result = start;
			goto share;
L
Linus Torvalds 已提交
1335 1336 1337 1338 1339 1340
		}

		/* obtain the address at which to make a shared mapping
		 * - this is the hook for quasi-memory character devices to
		 *   tell us the location of a shared mapping
		 */
1341
		if (capabilities & NOMMU_MAP_DIRECT) {
L
Linus Torvalds 已提交
1342 1343
			addr = file->f_op->get_unmapped_area(file, addr, len,
							     pgoff, flags);
1344
			if (IS_ERR_VALUE(addr)) {
L
Linus Torvalds 已提交
1345
				ret = addr;
1346
				if (ret != -ENOSYS)
1347
					goto error_just_free;
L
Linus Torvalds 已提交
1348 1349 1350 1351

				/* the driver refused to tell us where to site
				 * the mapping so we'll have to attempt to copy
				 * it */
1352
				ret = -ENODEV;
1353
				if (!(capabilities & NOMMU_MAP_COPY))
1354
					goto error_just_free;
L
Linus Torvalds 已提交
1355

1356
				capabilities &= ~NOMMU_MAP_DIRECT;
1357 1358 1359
			} else {
				vma->vm_start = region->vm_start = addr;
				vma->vm_end = region->vm_end = addr + len;
L
Linus Torvalds 已提交
1360 1361 1362 1363
			}
		}
	}

1364
	vma->vm_region = region;
L
Linus Torvalds 已提交
1365

1366
	/* set up the mapping
1367
	 * - the region is filled in if NOMMU_MAP_DIRECT is still set
1368
	 */
L
Linus Torvalds 已提交
1369
	if (file && vma->vm_flags & VM_SHARED)
1370
		ret = do_mmap_shared_file(vma);
L
Linus Torvalds 已提交
1371
	else
1372
		ret = do_mmap_private(vma, region, len, capabilities);
L
Linus Torvalds 已提交
1373
	if (ret < 0)
1374 1375
		goto error_just_free;
	add_nommu_region(region);
1376

1377 1378 1379 1380 1381
	/* clear anonymous mappings that don't ask for uninitialized data */
	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
		memset((void *)region->vm_start, 0,
		       region->vm_end - region->vm_start);

L
Linus Torvalds 已提交
1382
	/* okay... we have a mapping; now we have to register it */
1383
	result = vma->vm_start;
L
Linus Torvalds 已提交
1384 1385 1386

	current->mm->total_vm += len >> PAGE_SHIFT;

1387 1388
share:
	add_vma_to_mm(current->mm, vma);
L
Linus Torvalds 已提交
1389

1390 1391 1392 1393 1394 1395
	/* we flush the region from the icache only when the first executable
	 * mapping of it is made  */
	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
		flush_icache_range(region->vm_start, region->vm_end);
		region->vm_icache_flushed = true;
	}
L
Linus Torvalds 已提交
1396

1397
	up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
1398

1399
	return result;
L
Linus Torvalds 已提交
1400

1401 1402 1403
error_just_free:
	up_write(&nommu_region_sem);
error:
1404 1405
	if (region->vm_file)
		fput(region->vm_file);
1406
	kmem_cache_free(vm_region_jar, region);
1407 1408
	if (vma->vm_file)
		fput(vma->vm_file);
1409 1410 1411 1412 1413
	kmem_cache_free(vm_area_cachep, vma);
	return ret;

sharing_violation:
	up_write(&nommu_region_sem);
1414
	pr_warn("Attempt to share mismatched mappings\n");
1415 1416
	ret = -EINVAL;
	goto error;
L
Linus Torvalds 已提交
1417

1418 1419
error_getting_vma:
	kmem_cache_free(vm_region_jar, region);
1420 1421
	pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
			len, current->pid);
1422
	show_free_areas(0);
L
Linus Torvalds 已提交
1423 1424
	return -ENOMEM;

1425
error_getting_region:
1426 1427
	pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
			len, current->pid);
1428
	show_free_areas(0);
L
Linus Torvalds 已提交
1429 1430
	return -ENOMEM;
}
1431

H
Hugh Dickins 已提交
1432 1433 1434 1435 1436 1437 1438
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
		unsigned long, prot, unsigned long, flags,
		unsigned long, fd, unsigned long, pgoff)
{
	struct file *file = NULL;
	unsigned long retval = -EBADF;

A
Al Viro 已提交
1439
	audit_mmap_fd(fd, flags);
H
Hugh Dickins 已提交
1440 1441 1442 1443 1444 1445 1446 1447
	if (!(flags & MAP_ANONYMOUS)) {
		file = fget(fd);
		if (!file)
			goto out;
	}

	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);

G
Greg Ungerer 已提交
1448
	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
H
Hugh Dickins 已提交
1449 1450 1451 1452 1453 1454 1455

	if (file)
		fput(file);
out:
	return retval;
}

C
Christoph Hellwig 已提交
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
	unsigned long addr;
	unsigned long len;
	unsigned long prot;
	unsigned long flags;
	unsigned long fd;
	unsigned long offset;
};

SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
	struct mmap_arg_struct a;

	if (copy_from_user(&a, arg, sizeof(a)))
		return -EFAULT;
1472
	if (offset_in_page(a.offset))
C
Christoph Hellwig 已提交
1473 1474 1475 1476 1477 1478 1479
		return -EINVAL;

	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
			      a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */

L
Linus Torvalds 已提交
1480
/*
1481 1482
 * split a vma into two pieces at address 'addr', a new vma is allocated either
 * for the first part or the tail.
L
Linus Torvalds 已提交
1483
 */
1484 1485
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
	      unsigned long addr, int new_below)
L
Linus Torvalds 已提交
1486
{
1487 1488 1489
	struct vm_area_struct *new;
	struct vm_region *region;
	unsigned long npages;
L
Linus Torvalds 已提交
1490

1491 1492 1493
	/* we're only permitted to split anonymous regions (these should have
	 * only a single usage on the region) */
	if (vma->vm_file)
1494
		return -ENOMEM;
L
Linus Torvalds 已提交
1495

1496 1497
	if (mm->map_count >= sysctl_max_map_count)
		return -ENOMEM;
L
Linus Torvalds 已提交
1498

1499 1500 1501
	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		return -ENOMEM;
L
Linus Torvalds 已提交
1502

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
	if (!new) {
		kmem_cache_free(vm_region_jar, region);
		return -ENOMEM;
	}

	/* most fields are the same, copy all, and then fixup */
	*new = *vma;
	*region = *vma->vm_region;
	new->vm_region = region;

	npages = (addr - vma->vm_start) >> PAGE_SHIFT;

	if (new_below) {
1517
		region->vm_top = region->vm_end = new->vm_end = addr;
1518 1519 1520
	} else {
		region->vm_start = new->vm_start = addr;
		region->vm_pgoff = new->vm_pgoff += npages;
L
Linus Torvalds 已提交
1521
	}
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533

	if (new->vm_ops && new->vm_ops->open)
		new->vm_ops->open(new);

	delete_vma_from_mm(vma);
	down_write(&nommu_region_sem);
	delete_nommu_region(vma->vm_region);
	if (new_below) {
		vma->vm_region->vm_start = vma->vm_start = addr;
		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
	} else {
		vma->vm_region->vm_end = vma->vm_end = addr;
1534
		vma->vm_region->vm_top = addr;
1535 1536 1537 1538 1539 1540 1541
	}
	add_nommu_region(vma->vm_region);
	add_nommu_region(new->vm_region);
	up_write(&nommu_region_sem);
	add_vma_to_mm(mm, vma);
	add_vma_to_mm(mm, new);
	return 0;
L
Linus Torvalds 已提交
1542 1543
}

1544
/*
1545 1546
 * shrink a VMA by removing the specified chunk from either the beginning or
 * the end
1547
 */
1548 1549 1550
static int shrink_vma(struct mm_struct *mm,
		      struct vm_area_struct *vma,
		      unsigned long from, unsigned long to)
L
Linus Torvalds 已提交
1551
{
1552
	struct vm_region *region;
L
Linus Torvalds 已提交
1553

1554 1555 1556 1557 1558 1559 1560 1561
	/* adjust the VMA's pointers, which may reposition it in the MM's tree
	 * and list */
	delete_vma_from_mm(vma);
	if (from > vma->vm_start)
		vma->vm_end = from;
	else
		vma->vm_start = to;
	add_vma_to_mm(mm, vma);
L
Linus Torvalds 已提交
1562

1563 1564
	/* cut the backing region down to size */
	region = vma->vm_region;
1565
	BUG_ON(region->vm_usage != 1);
1566 1567 1568

	down_write(&nommu_region_sem);
	delete_nommu_region(region);
1569 1570 1571 1572
	if (from > region->vm_start) {
		to = region->vm_top;
		region->vm_top = region->vm_end = from;
	} else {
1573
		region->vm_start = to;
1574
	}
1575 1576 1577 1578 1579 1580
	add_nommu_region(region);
	up_write(&nommu_region_sem);

	free_page_series(from, to);
	return 0;
}
L
Linus Torvalds 已提交
1581

1582 1583 1584 1585 1586 1587 1588 1589
/*
 * release a mapping
 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
 *   VMA, though it need not cover the whole VMA
 */
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
B
Bob Liu 已提交
1590
	unsigned long end;
1591
	int ret;
L
Linus Torvalds 已提交
1592

B
Bob Liu 已提交
1593
	len = PAGE_ALIGN(len);
1594 1595
	if (len == 0)
		return -EINVAL;
1596

B
Bob Liu 已提交
1597 1598
	end = start + len;

1599 1600 1601
	/* find the first potentially overlapping VMA */
	vma = find_vma(mm, start);
	if (!vma) {
1602
		static int limit;
1603
		if (limit < 5) {
1604 1605 1606
			pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
					current->pid, current->comm,
					start, start + len - 1);
1607 1608
			limit++;
		}
1609 1610
		return -EINVAL;
	}
L
Linus Torvalds 已提交
1611

1612 1613 1614
	/* we're allowed to split an anonymous VMA but not a file-backed one */
	if (vma->vm_file) {
		do {
1615
			if (start > vma->vm_start)
1616 1617 1618
				return -EINVAL;
			if (end == vma->vm_end)
				goto erase_whole_vma;
1619 1620
			vma = vma->vm_next;
		} while (vma);
1621 1622 1623 1624 1625
		return -EINVAL;
	} else {
		/* the chunk must be a subset of the VMA found */
		if (start == vma->vm_start && end == vma->vm_end)
			goto erase_whole_vma;
1626
		if (start < vma->vm_start || end > vma->vm_end)
1627
			return -EINVAL;
1628
		if (offset_in_page(start))
1629
			return -EINVAL;
1630
		if (end != vma->vm_end && offset_in_page(end))
1631 1632 1633
			return -EINVAL;
		if (start != vma->vm_start && end != vma->vm_end) {
			ret = split_vma(mm, vma, start, 1);
1634
			if (ret < 0)
1635 1636 1637 1638
				return ret;
		}
		return shrink_vma(mm, vma, start, end);
	}
L
Linus Torvalds 已提交
1639

1640 1641 1642
erase_whole_vma:
	delete_vma_from_mm(vma);
	delete_vma(mm, vma);
L
Linus Torvalds 已提交
1643 1644
	return 0;
}
1645
EXPORT_SYMBOL(do_munmap);
L
Linus Torvalds 已提交
1646

A
Al Viro 已提交
1647
int vm_munmap(unsigned long addr, size_t len)
1648
{
A
Al Viro 已提交
1649
	struct mm_struct *mm = current->mm;
1650 1651 1652 1653 1654 1655 1656
	int ret;

	down_write(&mm->mmap_sem);
	ret = do_munmap(mm, addr, len);
	up_write(&mm->mmap_sem);
	return ret;
}
1657 1658 1659 1660
EXPORT_SYMBOL(vm_munmap);

SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
A
Al Viro 已提交
1661
	return vm_munmap(addr, len);
1662
}
1663 1664

/*
1665
 * release all the mappings made in a process's VM space
1666
 */
1667
void exit_mmap(struct mm_struct *mm)
L
Linus Torvalds 已提交
1668
{
1669
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1670

1671 1672
	if (!mm)
		return;
L
Linus Torvalds 已提交
1673

1674
	mm->total_vm = 0;
L
Linus Torvalds 已提交
1675

1676 1677 1678 1679
	while ((vma = mm->mmap)) {
		mm->mmap = vma->vm_next;
		delete_vma_from_mm(vma);
		delete_vma(mm, vma);
1680
		cond_resched();
L
Linus Torvalds 已提交
1681 1682 1683
	}
}

1684
int vm_brk(unsigned long addr, unsigned long len)
L
Linus Torvalds 已提交
1685 1686 1687 1688 1689
{
	return -ENOMEM;
}

/*
1690 1691
 * expand (or shrink) an existing mapping, potentially moving it at the same
 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
L
Linus Torvalds 已提交
1692
 *
1693
 * under NOMMU conditions, we only permit changing a mapping's size, and only
1694 1695
 * as long as it stays within the region allocated by do_mmap_private() and the
 * block is not shareable
L
Linus Torvalds 已提交
1696
 *
1697
 * MREMAP_FIXED is not supported under NOMMU conditions
L
Linus Torvalds 已提交
1698
 */
A
Al Viro 已提交
1699
static unsigned long do_mremap(unsigned long addr,
L
Linus Torvalds 已提交
1700 1701 1702
			unsigned long old_len, unsigned long new_len,
			unsigned long flags, unsigned long new_addr)
{
1703
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1704 1705

	/* insanity checks first */
B
Bob Liu 已提交
1706 1707
	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);
1708
	if (old_len == 0 || new_len == 0)
L
Linus Torvalds 已提交
1709 1710
		return (unsigned long) -EINVAL;

1711
	if (offset_in_page(addr))
1712 1713
		return -EINVAL;

L
Linus Torvalds 已提交
1714 1715 1716
	if (flags & MREMAP_FIXED && new_addr != addr)
		return (unsigned long) -EINVAL;

1717
	vma = find_vma_exact(current->mm, addr, old_len);
1718 1719
	if (!vma)
		return (unsigned long) -EINVAL;
L
Linus Torvalds 已提交
1720

1721
	if (vma->vm_end != vma->vm_start + old_len)
L
Linus Torvalds 已提交
1722 1723
		return (unsigned long) -EFAULT;

1724
	if (vma->vm_flags & VM_MAYSHARE)
L
Linus Torvalds 已提交
1725 1726
		return (unsigned long) -EPERM;

1727
	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
L
Linus Torvalds 已提交
1728 1729 1730
		return (unsigned long) -ENOMEM;

	/* all checks complete - do it */
1731 1732 1733 1734
	vma->vm_end = vma->vm_start + new_len;
	return vma->vm_start;
}

1735 1736 1737
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
1738 1739 1740 1741 1742 1743 1744
{
	unsigned long ret;

	down_write(&current->mm->mmap_sem);
	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
	up_write(&current->mm->mmap_sem);
	return ret;
L
Linus Torvalds 已提交
1745 1746
}

1747 1748 1749
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
			      unsigned int *page_mask)
L
Linus Torvalds 已提交
1750
{
1751
	*page_mask = 0;
L
Linus Torvalds 已提交
1752 1753 1754
	return NULL;
}

B
Bob Liu 已提交
1755 1756
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		unsigned long pfn, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
1757
{
B
Bob Liu 已提交
1758 1759 1760
	if (addr != (pfn << PAGE_SHIFT))
		return -EINVAL;

1761
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1762
	return 0;
L
Linus Torvalds 已提交
1763
}
1764
EXPORT_SYMBOL(remap_pfn_range);
L
Linus Torvalds 已提交
1765

1766 1767 1768 1769 1770 1771 1772 1773 1774 1775
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
	unsigned long pfn = start >> PAGE_SHIFT;
	unsigned long vm_len = vma->vm_end - vma->vm_start;

	pfn += vma->vm_pgoff;
	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);

1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
			unsigned long pgoff)
{
	unsigned int size = vma->vm_end - vma->vm_start;

	if (!(vma->vm_flags & VM_USERMAP))
		return -EINVAL;

	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
	vma->vm_end = vma->vm_start + size;

	return 0;
}
EXPORT_SYMBOL(remap_vmalloc_range);

L
Linus Torvalds 已提交
1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801
unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
	unsigned long len, unsigned long pgoff, unsigned long flags)
{
	return -ENOMEM;
}

void unmap_mapping_range(struct address_space *mapping,
			 loff_t const holebegin, loff_t const holelen,
			 int even_cows)
{
}
1802
EXPORT_SYMBOL(unmap_mapping_range);
L
Linus Torvalds 已提交
1803

N
Nick Piggin 已提交
1804
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1805 1806
{
	BUG();
N
Nick Piggin 已提交
1807
	return 0;
1808
}
1809
EXPORT_SYMBOL(filemap_fault);
1810

K
Kirill A. Shutemov 已提交
1811 1812
void filemap_map_pages(struct fault_env *fe,
		pgoff_t start_pgoff, pgoff_t end_pgoff)
1813 1814 1815 1816 1817
{
	BUG();
}
EXPORT_SYMBOL(filemap_map_pages);

1818 1819
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long addr, void *buf, int len, int write)
1820 1821 1822 1823 1824 1825
{
	struct vm_area_struct *vma;

	down_read(&mm->mmap_sem);

	/* the access must start within one of the target process's mappings */
1826 1827
	vma = find_vma(mm, addr);
	if (vma) {
1828 1829 1830 1831 1832
		/* don't overrun this mapping */
		if (addr + len >= vma->vm_end)
			len = vma->vm_end - addr;

		/* only read or write mappings where it is permitted */
1833
		if (write && vma->vm_flags & VM_MAYWRITE)
1834 1835
			copy_to_user_page(vma, NULL, addr,
					 (void *) addr, buf, len);
1836
		else if (!write && vma->vm_flags & VM_MAYREAD)
1837 1838
			copy_from_user_page(vma, NULL, addr,
					    buf, (void *) addr, len);
1839 1840 1841 1842 1843 1844 1845
		else
			len = 0;
	} else {
		len = 0;
	}

	up_read(&mm->mmap_sem);
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882

	return len;
}

/**
 * @access_remote_vm - access another process' address space
 * @mm:		the mm_struct of the target address space
 * @addr:	start address to access
 * @buf:	source or destination buffer
 * @len:	number of bytes to transfer
 * @write:	whether the access is a write
 *
 * The caller must hold a reference on @mm.
 */
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write)
{
	return __access_remote_vm(NULL, mm, addr, buf, len, write);
}

/*
 * Access another process' address space.
 * - source/target buffer must be kernel space
 */
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;

	if (addr + len < addr)
		return 0;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	len = __access_remote_vm(tsk, mm, addr, buf, len, write);

1883 1884 1885
	mmput(mm);
	return len;
}
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909

/**
 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
 * @inode: The inode to check
 * @size: The current filesize of the inode
 * @newsize: The proposed filesize of the inode
 *
 * Check the shared mappings on an inode on behalf of a shrinking truncate to
 * make sure that that any outstanding VMAs aren't broken and then shrink the
 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
 * automatically grant mappings that are too large.
 */
int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
				size_t newsize)
{
	struct vm_area_struct *vma;
	struct vm_region *region;
	pgoff_t low, high;
	size_t r_size, r_top;

	low = newsize >> PAGE_SHIFT;
	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	down_write(&nommu_region_sem);
D
Davidlohr Bueso 已提交
1910
	i_mmap_lock_read(inode->i_mapping);
1911 1912

	/* search for VMAs that fall within the dead zone */
1913
	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1914 1915 1916
		/* found one - only interested if it's shared out of the page
		 * cache */
		if (vma->vm_flags & VM_SHARED) {
D
Davidlohr Bueso 已提交
1917
			i_mmap_unlock_read(inode->i_mapping);
1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
			up_write(&nommu_region_sem);
			return -ETXTBSY; /* not quite true, but near enough */
		}
	}

	/* reduce any regions that overlap the dead zone - if in existence,
	 * these will be pointed to by VMAs that don't overlap the dead zone
	 *
	 * we don't check for any regions that start beyond the EOF as there
	 * shouldn't be any
	 */
D
Davidlohr Bueso 已提交
1929
	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943
		if (!(vma->vm_flags & VM_SHARED))
			continue;

		region = vma->vm_region;
		r_size = region->vm_top - region->vm_start;
		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;

		if (r_top > newsize) {
			region->vm_top -= r_top - newsize;
			if (region->vm_end > region->vm_top)
				region->vm_end = region->vm_top;
		}
	}

D
Davidlohr Bueso 已提交
1944
	i_mmap_unlock_read(inode->i_mapping);
1945 1946 1947
	up_write(&nommu_region_sem);
	return 0;
}
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967

/*
 * Initialise sysctl_user_reserve_kbytes.
 *
 * This is intended to prevent a user from starting a single memory hogging
 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
 * mode.
 *
 * The default value is min(3% of free memory, 128MB)
 * 128MB is enough to recover with sshd/login, bash, and top/kill.
 */
static int __meminit init_user_reserve(void)
{
	unsigned long free_kbytes;

	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);

	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
	return 0;
}
1968
subsys_initcall(init_user_reserve);
1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988

/*
 * Initialise sysctl_admin_reserve_kbytes.
 *
 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
 * to log in and kill a memory hogging process.
 *
 * Systems with more than 256MB will reserve 8MB, enough to recover
 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
 * only reserve 3% of free pages by default.
 */
static int __meminit init_admin_reserve(void)
{
	unsigned long free_kbytes;

	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);

	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
	return 0;
}
1989
subsys_initcall(init_admin_reserve);