nommu.c 51.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *  linux/mm/nommu.c
 *
 *  Replacement code for mm functions to support CPU's that don't
 *  have any form of memory management unit (thus no virtual memory).
 *
 *  See Documentation/nommu-mmap.txt
 *
9
 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
L
Linus Torvalds 已提交
10 11 12
 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13
 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
L
Linus Torvalds 已提交
14 15
 */

16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29 30
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
31
#include <linux/audit.h>
L
Linus Torvalds 已提交
32 33 34 35

#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
36
#include <asm/mmu_context.h>
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
#include "internal.h"

#if 0
#define kenter(FMT, ...) \
	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) \
	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) \
	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
#else
#define kenter(FMT, ...) \
	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) \
	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) \
	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
L
Linus Torvalds 已提交
54 55 56 57 58

void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
unsigned long num_physpages;
H
Hugh Dickins 已提交
59
unsigned long highest_memmap_pfn;
60
struct percpu_counter vm_committed_as;
L
Linus Torvalds 已提交
61 62 63
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
64
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
L
Linus Torvalds 已提交
65 66
int heap_stack_gap = 0;

67
atomic_long_t mmap_pages_allocated;
68

L
Linus Torvalds 已提交
69
EXPORT_SYMBOL(mem_map);
70
EXPORT_SYMBOL(num_physpages);
L
Linus Torvalds 已提交
71

72 73 74 75
/* list of mapped, potentially shareable regions */
static struct kmem_cache *vm_region_jar;
struct rb_root nommu_region_tree = RB_ROOT;
DECLARE_RWSEM(nommu_region_sem);
L
Linus Torvalds 已提交
76

77
const struct vm_operations_struct generic_file_vm_ops = {
L
Linus Torvalds 已提交
78 79 80 81 82 83 84 85 86 87 88 89
};

/*
 * Return the total memory allocated for this pointer, not
 * just what the caller asked for.
 *
 * Doesn't have to be accurate, i.e. may have races.
 */
unsigned int kobjsize(const void *objp)
{
	struct page *page;

90 91 92 93
	/*
	 * If the object we have should not have ksize performed on it,
	 * return size of 0
	 */
94
	if (!objp || !virt_addr_valid(objp))
95 96 97 98 99 100 101 102
		return 0;

	page = virt_to_head_page(objp);

	/*
	 * If the allocator sets PageSlab, we know the pointer came from
	 * kmalloc().
	 */
L
Linus Torvalds 已提交
103 104 105
	if (PageSlab(page))
		return ksize(objp);

106 107 108 109 110 111 112 113 114 115 116 117 118 119
	/*
	 * If it's not a compound page, see if we have a matching VMA
	 * region. This test is intentionally done in reverse order,
	 * so if there's no VMA, we still fall through and hand back
	 * PAGE_SIZE for 0-order pages.
	 */
	if (!PageCompound(page)) {
		struct vm_area_struct *vma;

		vma = find_vma(current->mm, (unsigned long)objp);
		if (vma)
			return vma->vm_end - vma->vm_start;
	}

120 121
	/*
	 * The ksize() function is only guaranteed to work for pointers
122
	 * returned by kmalloc(). So handle arbitrary pointers here.
123
	 */
124
	return PAGE_SIZE << compound_order(page);
L
Linus Torvalds 已提交
125 126
}

N
Nick Piggin 已提交
127
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
H
Hugh Dickins 已提交
128
		     unsigned long start, int nr_pages, unsigned int foll_flags,
129 130
		     struct page **pages, struct vm_area_struct **vmas,
		     int *retry)
L
Linus Torvalds 已提交
131
{
132
	struct vm_area_struct *vma;
133 134 135 136
	unsigned long vm_flags;
	int i;

	/* calculate required read or write permissions.
H
Hugh Dickins 已提交
137
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
138
	 */
H
Hugh Dickins 已提交
139 140 141 142
	vm_flags  = (foll_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (foll_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
L
Linus Torvalds 已提交
143

144
	for (i = 0; i < nr_pages; i++) {
145
		vma = find_vma(mm, start);
146 147 148 149
		if (!vma)
			goto finish_or_fault;

		/* protect what we can, including chardevs */
H
Hugh Dickins 已提交
150 151
		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
		    !(vm_flags & vma->vm_flags))
152
			goto finish_or_fault;
153

L
Linus Torvalds 已提交
154 155 156 157 158 159
		if (pages) {
			pages[i] = virt_to_page(start);
			if (pages[i])
				page_cache_get(pages[i]);
		}
		if (vmas)
160
			vmas[i] = vma;
161
		start = (start + PAGE_SIZE) & PAGE_MASK;
L
Linus Torvalds 已提交
162
	}
163 164 165 166 167

	return i;

finish_or_fault:
	return i ? : -EFAULT;
L
Linus Torvalds 已提交
168
}
N
Nick Piggin 已提交
169 170 171 172 173 174 175 176 177

/*
 * get a list of pages in an address range belonging to the specified process
 * and indicate the VMA that covers each page
 * - this is potentially dodgy as we may end incrementing the page count of a
 *   slab page or a secondary page from a compound page
 * - don't permit access to VMAs that don't support it, such as I/O mappings
 */
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
178
	unsigned long start, int nr_pages, int write, int force,
N
Nick Piggin 已提交
179 180 181 182 183
	struct page **pages, struct vm_area_struct **vmas)
{
	int flags = 0;

	if (write)
H
Hugh Dickins 已提交
184
		flags |= FOLL_WRITE;
N
Nick Piggin 已提交
185
	if (force)
H
Hugh Dickins 已提交
186
		flags |= FOLL_FORCE;
N
Nick Piggin 已提交
187

188 189
	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
				NULL);
N
Nick Piggin 已提交
190
}
191 192
EXPORT_SYMBOL(get_user_pages);

P
Paul Mundt 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
/**
 * follow_pfn - look up PFN at a user virtual address
 * @vma: memory mapping
 * @address: user virtual address
 * @pfn: location to store found PFN
 *
 * Only IO mappings and raw PFN mappings are allowed.
 *
 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 */
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn)
{
	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
		return -EINVAL;

	*pfn = address >> PAGE_SHIFT;
	return 0;
}
EXPORT_SYMBOL(follow_pfn);

L
Linus Torvalds 已提交
214 215 216
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;

217
void vfree(const void *addr)
L
Linus Torvalds 已提交
218 219 220
{
	kfree(addr);
}
221
EXPORT_SYMBOL(vfree);
L
Linus Torvalds 已提交
222

A
Al Viro 已提交
223
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
L
Linus Torvalds 已提交
224 225
{
	/*
226 227
	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
	 * returns only a logical address.
L
Linus Torvalds 已提交
228
	 */
229
	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
L
Linus Torvalds 已提交
230
}
231
EXPORT_SYMBOL(__vmalloc);
L
Linus Torvalds 已提交
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
void *vmalloc_user(unsigned long size)
{
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	if (ret) {
		struct vm_area_struct *vma;

		down_write(&current->mm->mmap_sem);
		vma = find_vma(current->mm, (unsigned long)ret);
		if (vma)
			vma->vm_flags |= VM_USERMAP;
		up_write(&current->mm->mmap_sem);
	}

	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

253
struct page *vmalloc_to_page(const void *addr)
L
Linus Torvalds 已提交
254 255 256
{
	return virt_to_page(addr);
}
257
EXPORT_SYMBOL(vmalloc_to_page);
L
Linus Torvalds 已提交
258

259
unsigned long vmalloc_to_pfn(const void *addr)
L
Linus Torvalds 已提交
260 261 262
{
	return page_to_pfn(virt_to_page(addr));
}
263
EXPORT_SYMBOL(vmalloc_to_pfn);
L
Linus Torvalds 已提交
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288

long vread(char *buf, char *addr, unsigned long count)
{
	memcpy(buf, addr, count);
	return count;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	memcpy(addr, buf, count);
	return(count);
}

/*
 *	vmalloc  -  allocate virtually continguos memory
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into continguos kernel virtual space.
 *
289
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
290 291 292 293 294 295
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
296 297
EXPORT_SYMBOL(vmalloc);

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 *	vzalloc - allocate virtually continguos memory with zero fill
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into continguos kernel virtual space.
 *	The memory allocated is set to zero.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */
void *vzalloc(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
}
EXPORT_SYMBOL(vzalloc);

/**
 * vmalloc_node - allocate memory on a specific node
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
328 329 330 331
void *vmalloc_node(unsigned long size, int node)
{
	return vmalloc(size);
}
332
EXPORT_SYMBOL(vmalloc_node);
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350

/**
 * vzalloc_node - allocate memory on a specific node with zero fill
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 * The memory allocated is set to zero.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
void *vzalloc_node(unsigned long size, int node)
{
	return vzalloc(size);
}
EXPORT_SYMBOL(vzalloc_node);
L
Linus Torvalds 已提交
351

P
Paul Mundt 已提交
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

373 374
/**
 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
L
Linus Torvalds 已提交
375 376 377 378 379 380 381 382 383
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into continguos kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}
384 385 386 387 388 389 390 391
EXPORT_SYMBOL(vmalloc_32);

/**
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 *	@size:		allocation size
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
392 393 394
 *
 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 * remap_vmalloc_range() are permissible.
395 396 397
 */
void *vmalloc_32_user(unsigned long size)
{
398 399 400 401 402
	/*
	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
	 * but for now this can simply use vmalloc_user() directly.
	 */
	return vmalloc_user(size);
403 404
}
EXPORT_SYMBOL(vmalloc_32_user);
L
Linus Torvalds 已提交
405 406 407 408 409 410

void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
{
	BUG();
	return NULL;
}
411
EXPORT_SYMBOL(vmap);
L
Linus Torvalds 已提交
412

413
void vunmap(const void *addr)
L
Linus Torvalds 已提交
414 415 416
{
	BUG();
}
417
EXPORT_SYMBOL(vunmap);
L
Linus Torvalds 已提交
418

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL(vm_map_ram);

void vm_unmap_ram(const void *mem, unsigned int count)
{
	BUG();
}
EXPORT_SYMBOL(vm_unmap_ram);

void vm_unmap_aliases(void)
{
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

437 438 439 440 441 442 443 444
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
void  __attribute__((weak)) vmalloc_sync_all(void)
{
}

445 446 447 448 449 450 451 452 453 454 455 456
/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
 *
 *	Returns:	NULL on failure, vm_struct on success
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
457
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
458 459 460 461 462 463 464 465 466 467 468 469
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	BUG();
}
EXPORT_SYMBOL_GPL(free_vm_area);

470 471 472 473 474 475 476
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
		   struct page *page)
{
	return -EINVAL;
}
EXPORT_SYMBOL(vm_insert_page);

L
Linus Torvalds 已提交
477 478 479 480 481 482 483
/*
 *  sys_brk() for the most part doesn't need the global kernel
 *  lock, except when an application is doing something nasty
 *  like trying to un-brk an area that has already been mapped
 *  to a regular file.  in this case, the unmapping will need
 *  to invoke file system routines that need the global lock.
 */
484
SYSCALL_DEFINE1(brk, unsigned long, brk)
L
Linus Torvalds 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
{
	struct mm_struct *mm = current->mm;

	if (brk < mm->start_brk || brk > mm->context.end_brk)
		return mm->brk;

	if (mm->brk == brk)
		return mm->brk;

	/*
	 * Always allow shrinking brk
	 */
	if (brk <= mm->brk) {
		mm->brk = brk;
		return brk;
	}

	/*
	 * Ok, looks good - let it rip.
	 */
505
	flush_icache_range(mm->brk, brk);
L
Linus Torvalds 已提交
506 507 508
	return mm->brk = brk;
}

509 510 511 512
/*
 * initialise the VMA and region record slabs
 */
void __init mmap_init(void)
L
Linus Torvalds 已提交
513
{
514 515 516 517
	int ret;

	ret = percpu_counter_init(&vm_committed_as, 0);
	VM_BUG_ON(ret);
518
	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
L
Linus Torvalds 已提交
519 520
}

521
/*
522 523
 * validate the region tree
 * - the caller must hold the region lock
524
 */
525 526
#ifdef CONFIG_DEBUG_NOMMU_REGIONS
static noinline void validate_nommu_regions(void)
527
{
528 529
	struct vm_region *region, *last;
	struct rb_node *p, *lastp;
530

531 532 533 534 535
	lastp = rb_first(&nommu_region_tree);
	if (!lastp)
		return;

	last = rb_entry(lastp, struct vm_region, vm_rb);
536 537
	BUG_ON(unlikely(last->vm_end <= last->vm_start));
	BUG_ON(unlikely(last->vm_top < last->vm_end));
538 539 540 541 542

	while ((p = rb_next(lastp))) {
		region = rb_entry(p, struct vm_region, vm_rb);
		last = rb_entry(lastp, struct vm_region, vm_rb);

543 544 545
		BUG_ON(unlikely(region->vm_end <= region->vm_start));
		BUG_ON(unlikely(region->vm_top < region->vm_end));
		BUG_ON(unlikely(region->vm_start < last->vm_top));
546

547 548
		lastp = p;
	}
549
}
550
#else
551 552 553
static void validate_nommu_regions(void)
{
}
554
#endif
555 556

/*
557
 * add a region into the global tree
558
 */
559
static void add_nommu_region(struct vm_region *region)
560
{
561 562
	struct vm_region *pregion;
	struct rb_node **p, *parent;
563

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	validate_nommu_regions();

	parent = NULL;
	p = &nommu_region_tree.rb_node;
	while (*p) {
		parent = *p;
		pregion = rb_entry(parent, struct vm_region, vm_rb);
		if (region->vm_start < pregion->vm_start)
			p = &(*p)->rb_left;
		else if (region->vm_start > pregion->vm_start)
			p = &(*p)->rb_right;
		else if (pregion == region)
			return;
		else
			BUG();
579 580
	}

581 582
	rb_link_node(&region->vm_rb, parent, p);
	rb_insert_color(&region->vm_rb, &nommu_region_tree);
583

584
	validate_nommu_regions();
585 586
}

587
/*
588
 * delete a region from the global tree
589
 */
590
static void delete_nommu_region(struct vm_region *region)
591
{
592
	BUG_ON(!nommu_region_tree.rb_node);
593

594 595 596
	validate_nommu_regions();
	rb_erase(&region->vm_rb, &nommu_region_tree);
	validate_nommu_regions();
597 598
}

599
/*
600
 * free a contiguous series of pages
601
 */
602
static void free_page_series(unsigned long from, unsigned long to)
603
{
604 605 606 607
	for (; from < to; from += PAGE_SIZE) {
		struct page *page = virt_to_page(from);

		kdebug("- free %lx", from);
608
		atomic_long_dec(&mmap_pages_allocated);
609
		if (page_count(page) != 1)
610 611
			kdebug("free page %p: refcount not one: %d",
			       page, page_count(page));
612
		put_page(page);
613 614 615
	}
}

616
/*
617
 * release a reference to a region
618
 * - the caller must hold the region semaphore for writing, which this releases
619
 * - the region may not have been added to the tree yet, in which case vm_top
620
 *   will equal vm_start
621
 */
622 623
static void __put_nommu_region(struct vm_region *region)
	__releases(nommu_region_sem)
L
Linus Torvalds 已提交
624
{
625
	kenter("%p{%d}", region, region->vm_usage);
L
Linus Torvalds 已提交
626

627
	BUG_ON(!nommu_region_tree.rb_node);
L
Linus Torvalds 已提交
628

629
	if (--region->vm_usage == 0) {
630
		if (region->vm_top > region->vm_start)
631 632 633 634 635 636 637 638 639 640
			delete_nommu_region(region);
		up_write(&nommu_region_sem);

		if (region->vm_file)
			fput(region->vm_file);

		/* IO memory and memory shared directly out of the pagecache
		 * from ramfs/tmpfs mustn't be released here */
		if (region->vm_flags & VM_MAPPED_COPY) {
			kdebug("free series");
641
			free_page_series(region->vm_start, region->vm_top);
642 643 644 645
		}
		kmem_cache_free(vm_region_jar, region);
	} else {
		up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
646
	}
647
}
L
Linus Torvalds 已提交
648

649 650 651 652 653 654 655
/*
 * release a reference to a region
 */
static void put_nommu_region(struct vm_region *region)
{
	down_write(&nommu_region_sem);
	__put_nommu_region(region);
L
Linus Torvalds 已提交
656 657
}

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/*
 * update protection on a vma
 */
static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
{
#ifdef CONFIG_MPU
	struct mm_struct *mm = vma->vm_mm;
	long start = vma->vm_start & PAGE_MASK;
	while (start < vma->vm_end) {
		protect_page(mm, start, flags);
		start += PAGE_SIZE;
	}
	update_protections(mm);
#endif
}

674
/*
675 676 677 678
 * add a VMA into a process's mm_struct in the appropriate place in the list
 * and tree and add to the address space's page tree also if not an anonymous
 * page
 * - should be called with mm->mmap_sem held writelocked
679
 */
680
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
681
{
682
	struct vm_area_struct *pvma, *prev;
L
Linus Torvalds 已提交
683
	struct address_space *mapping;
684
	struct rb_node **p, *parent, *rb_prev;
685 686 687 688 689 690 691

	kenter(",%p", vma);

	BUG_ON(!vma->vm_region);

	mm->map_count++;
	vma->vm_mm = mm;
L
Linus Torvalds 已提交
692

693 694
	protect_vma(vma, vma->vm_flags);

L
Linus Torvalds 已提交
695 696 697 698
	/* add the VMA to the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

699
		mutex_lock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
700 701 702
		flush_dcache_mmap_lock(mapping);
		vma_prio_tree_insert(vma, &mapping->i_mmap);
		flush_dcache_mmap_unlock(mapping);
703
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
704 705
	}

706
	/* add the VMA to the tree */
707
	parent = rb_prev = NULL;
708
	p = &mm->mm_rb.rb_node;
L
Linus Torvalds 已提交
709 710 711 712
	while (*p) {
		parent = *p;
		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);

713 714 715
		/* sort by: start addr, end addr, VMA struct addr in that order
		 * (the latter is necessary as we may get identical VMAs) */
		if (vma->vm_start < pvma->vm_start)
L
Linus Torvalds 已提交
716
			p = &(*p)->rb_left;
717 718
		else if (vma->vm_start > pvma->vm_start) {
			rb_prev = parent;
L
Linus Torvalds 已提交
719
			p = &(*p)->rb_right;
720
		} else if (vma->vm_end < pvma->vm_end)
721
			p = &(*p)->rb_left;
722 723
		else if (vma->vm_end > pvma->vm_end) {
			rb_prev = parent;
724
			p = &(*p)->rb_right;
725
		} else if (vma < pvma)
726
			p = &(*p)->rb_left;
727 728
		else if (vma > pvma) {
			rb_prev = parent;
729
			p = &(*p)->rb_right;
730
		} else
731
			BUG();
L
Linus Torvalds 已提交
732 733 734
	}

	rb_link_node(&vma->vm_rb, parent, p);
735 736 737
	rb_insert_color(&vma->vm_rb, &mm->mm_rb);

	/* add VMA to the VMA list also */
738 739 740
	prev = NULL;
	if (rb_prev)
		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
741

742
	__vma_link_list(mm, vma, prev, parent);
L
Linus Torvalds 已提交
743 744
}

745
/*
746
 * delete a VMA from its owning mm_struct and address space
747
 */
748
static void delete_vma_from_mm(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
749 750
{
	struct address_space *mapping;
751 752 753 754
	struct mm_struct *mm = vma->vm_mm;

	kenter("%p", vma);

755 756
	protect_vma(vma, 0);

757 758 759
	mm->map_count--;
	if (mm->mmap_cache == vma)
		mm->mmap_cache = NULL;
L
Linus Torvalds 已提交
760 761 762 763 764

	/* remove the VMA from the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

765
		mutex_lock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
766 767 768
		flush_dcache_mmap_lock(mapping);
		vma_prio_tree_remove(vma, &mapping->i_mmap);
		flush_dcache_mmap_unlock(mapping);
769
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
770 771
	}

772 773
	/* remove from the MM's tree and list */
	rb_erase(&vma->vm_rb, &mm->mm_rb);
774 775 776 777 778 779 780 781

	if (vma->vm_prev)
		vma->vm_prev->vm_next = vma->vm_next;
	else
		mm->mmap = vma->vm_next;

	if (vma->vm_next)
		vma->vm_next->vm_prev = vma->vm_prev;
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
}

/*
 * destroy a VMA record
 */
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
	kenter("%p", vma);
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
	if (vma->vm_file) {
		fput(vma->vm_file);
		if (vma->vm_flags & VM_EXECUTABLE)
			removed_exe_file_vma(mm);
	}
	put_nommu_region(vma->vm_region);
	kmem_cache_free(vm_area_cachep, vma);
}

/*
 * look up the first VMA in which addr resides, NULL if none
 * - should be called with mm->mmap_sem at least held readlocked
 */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma;

	/* check the cache first */
	vma = mm->mmap_cache;
	if (vma && vma->vm_start <= addr && vma->vm_end > addr)
		return vma;

814
	/* trawl the list (there may be multiple mappings in which addr
815
	 * resides) */
816
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end > addr) {
			mm->mmap_cache = vma;
			return vma;
		}
	}

	return NULL;
}
EXPORT_SYMBOL(find_vma);

/*
 * find a VMA
 * - we don't extend stack VMAs under NOMMU conditions
 */
struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
835
	return find_vma(mm, addr);
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862
}

/*
 * expand a stack to a given address
 * - not supported under NOMMU conditions
 */
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
	return -ENOMEM;
}

/*
 * look up the first VMA exactly that exactly matches addr
 * - should be called with mm->mmap_sem at least held readlocked
 */
static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
					     unsigned long addr,
					     unsigned long len)
{
	struct vm_area_struct *vma;
	unsigned long end = addr + len;

	/* check the cache first */
	vma = mm->mmap_cache;
	if (vma && vma->vm_start == addr && vma->vm_end == end)
		return vma;

863
	/* trawl the list (there may be multiple mappings in which addr
864
	 * resides) */
865
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
866 867 868 869 870 871 872 873 874 875 876
		if (vma->vm_start < addr)
			continue;
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end == end) {
			mm->mmap_cache = vma;
			return vma;
		}
	}

	return NULL;
L
Linus Torvalds 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889 890
}

/*
 * determine whether a mapping should be permitted and, if so, what sort of
 * mapping we're capable of supporting
 */
static int validate_mmap_request(struct file *file,
				 unsigned long addr,
				 unsigned long len,
				 unsigned long prot,
				 unsigned long flags,
				 unsigned long pgoff,
				 unsigned long *_capabilities)
{
891
	unsigned long capabilities, rlen;
L
Linus Torvalds 已提交
892 893 894
	int ret;

	/* do the simple checks first */
895
	if (flags & MAP_FIXED) {
L
Linus Torvalds 已提交
896 897 898 899 900 901 902 903 904 905
		printk(KERN_DEBUG
		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
		       current->pid);
		return -EINVAL;
	}

	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
	    (flags & MAP_TYPE) != MAP_SHARED)
		return -EINVAL;

906
	if (!len)
L
Linus Torvalds 已提交
907 908
		return -EINVAL;

909
	/* Careful about overflows.. */
910 911
	rlen = PAGE_ALIGN(len);
	if (!rlen || rlen > TASK_SIZE)
912 913
		return -ENOMEM;

L
Linus Torvalds 已提交
914
	/* offset overflow? */
915
	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
916
		return -EOVERFLOW;
L
Linus Torvalds 已提交
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931

	if (file) {
		/* validate file mapping requests */
		struct address_space *mapping;

		/* files must support mmap */
		if (!file->f_op || !file->f_op->mmap)
			return -ENODEV;

		/* work out if what we've got could possibly be shared
		 * - we support chardevs that provide their own "memory"
		 * - we support files/blockdevs that are memory backed
		 */
		mapping = file->f_mapping;
		if (!mapping)
J
Josef Sipek 已提交
932
			mapping = file->f_path.dentry->d_inode->i_mapping;
L
Linus Torvalds 已提交
933 934 935 936 937 938 939 940

		capabilities = 0;
		if (mapping && mapping->backing_dev_info)
			capabilities = mapping->backing_dev_info->capabilities;

		if (!capabilities) {
			/* no explicit capabilities set, so assume some
			 * defaults */
J
Josef Sipek 已提交
941
			switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
L
Linus Torvalds 已提交
942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
			case S_IFREG:
			case S_IFBLK:
				capabilities = BDI_CAP_MAP_COPY;
				break;

			case S_IFCHR:
				capabilities =
					BDI_CAP_MAP_DIRECT |
					BDI_CAP_READ_MAP |
					BDI_CAP_WRITE_MAP;
				break;

			default:
				return -EINVAL;
			}
		}

		/* eliminate any capabilities that we can't support on this
		 * device */
		if (!file->f_op->get_unmapped_area)
			capabilities &= ~BDI_CAP_MAP_DIRECT;
		if (!file->f_op->read)
			capabilities &= ~BDI_CAP_MAP_COPY;

966 967 968 969
		/* The file shall have been opened with read permission. */
		if (!(file->f_mode & FMODE_READ))
			return -EACCES;

L
Linus Torvalds 已提交
970 971 972 973 974 975
		if (flags & MAP_SHARED) {
			/* do checks for writing, appending and locking */
			if ((prot & PROT_WRITE) &&
			    !(file->f_mode & FMODE_WRITE))
				return -EACCES;

J
Josef Sipek 已提交
976
			if (IS_APPEND(file->f_path.dentry->d_inode) &&
L
Linus Torvalds 已提交
977 978 979
			    (file->f_mode & FMODE_WRITE))
				return -EACCES;

J
Josef Sipek 已提交
980
			if (locks_verify_locked(file->f_path.dentry->d_inode))
L
Linus Torvalds 已提交
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
				return -EAGAIN;

			if (!(capabilities & BDI_CAP_MAP_DIRECT))
				return -ENODEV;

			/* we mustn't privatise shared mappings */
			capabilities &= ~BDI_CAP_MAP_COPY;
		}
		else {
			/* we're going to read the file into private memory we
			 * allocate */
			if (!(capabilities & BDI_CAP_MAP_COPY))
				return -ENODEV;

			/* we don't permit a private writable mapping to be
			 * shared with the backing device */
			if (prot & PROT_WRITE)
				capabilities &= ~BDI_CAP_MAP_DIRECT;
		}

1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		if (capabilities & BDI_CAP_MAP_DIRECT) {
			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
			    ) {
				capabilities &= ~BDI_CAP_MAP_DIRECT;
				if (flags & MAP_SHARED) {
					printk(KERN_WARNING
					       "MAP_SHARED not completely supported on !MMU\n");
					return -EINVAL;
				}
			}
		}

L
Linus Torvalds 已提交
1015 1016
		/* handle executable mappings and implied executable
		 * mappings */
J
Josef Sipek 已提交
1017
		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
L
Linus Torvalds 已提交
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
			if (prot & PROT_EXEC)
				return -EPERM;
		}
		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
			/* handle implication of PROT_EXEC by PROT_READ */
			if (current->personality & READ_IMPLIES_EXEC) {
				if (capabilities & BDI_CAP_EXEC_MAP)
					prot |= PROT_EXEC;
			}
		}
		else if ((prot & PROT_READ) &&
			 (prot & PROT_EXEC) &&
			 !(capabilities & BDI_CAP_EXEC_MAP)
			 ) {
			/* backing file is not executable, try to copy */
			capabilities &= ~BDI_CAP_MAP_DIRECT;
		}
	}
	else {
		/* anonymous mappings are always memory backed and can be
		 * privately mapped
		 */
		capabilities = BDI_CAP_MAP_COPY;

		/* handle PROT_EXEC implication by PROT_READ */
		if ((prot & PROT_READ) &&
		    (current->personality & READ_IMPLIES_EXEC))
			prot |= PROT_EXEC;
	}

	/* allow the security API to have its say */
1049
	ret = security_mmap_addr(addr);
L
Linus Torvalds 已提交
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	if (ret < 0)
		return ret;

	/* looks okay */
	*_capabilities = capabilities;
	return 0;
}

/*
 * we've determined that we can make the mapping, now translate what we
 * now know into VMA flags
 */
static unsigned long determine_vm_flags(struct file *file,
					unsigned long prot,
					unsigned long flags,
					unsigned long capabilities)
{
	unsigned long vm_flags;

	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
	/* vm_flags |= mm->def_flags; */

	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
		/* attempt to share read-only copies of mapped file chunks */
1074
		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
L
Linus Torvalds 已提交
1075 1076
		if (file && !(prot & PROT_WRITE))
			vm_flags |= VM_MAYSHARE;
1077
	} else {
L
Linus Torvalds 已提交
1078 1079 1080
		/* overlay a shareable mapping on the backing device or inode
		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
		 * romfs/cramfs */
1081
		vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
L
Linus Torvalds 已提交
1082
		if (flags & MAP_SHARED)
1083
			vm_flags |= VM_SHARED;
L
Linus Torvalds 已提交
1084 1085 1086 1087 1088 1089
	}

	/* refuse to let anyone share private mappings with this process if
	 * it's being traced - otherwise breakpoints set in it may interfere
	 * with another untraced process
	 */
T
Tejun Heo 已提交
1090
	if ((flags & MAP_PRIVATE) && current->ptrace)
L
Linus Torvalds 已提交
1091 1092 1093 1094 1095 1096
		vm_flags &= ~VM_MAYSHARE;

	return vm_flags;
}

/*
1097 1098
 * set up a shared mapping on a file (the driver or filesystem provides and
 * pins the storage)
L
Linus Torvalds 已提交
1099
 */
1100
static int do_mmap_shared_file(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
1101 1102 1103 1104
{
	int ret;

	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1105 1106
	if (ret == 0) {
		vma->vm_region->vm_top = vma->vm_region->vm_end;
1107
		return 0;
1108
	}
L
Linus Torvalds 已提交
1109 1110 1111
	if (ret != -ENOSYS)
		return ret;

1112 1113 1114
	/* getting -ENOSYS indicates that direct mmap isn't possible (as
	 * opposed to tried but failed) so we can only give a suitable error as
	 * it's not possible to make a private copy if MAP_SHARED was given */
L
Linus Torvalds 已提交
1115 1116 1117 1118 1119 1120
	return -ENODEV;
}

/*
 * set up a private mapping or an anonymous shared mapping
 */
1121 1122
static int do_mmap_private(struct vm_area_struct *vma,
			   struct vm_region *region,
1123 1124
			   unsigned long len,
			   unsigned long capabilities)
L
Linus Torvalds 已提交
1125
{
1126
	struct page *pages;
B
Bob Liu 已提交
1127
	unsigned long total, point, n;
L
Linus Torvalds 已提交
1128
	void *base;
1129
	int ret, order;
L
Linus Torvalds 已提交
1130 1131 1132 1133 1134

	/* invoke the file's mapping function so that it can keep track of
	 * shared mappings on devices or memory
	 * - VM_MAYSHARE will be set if it may attempt to share
	 */
1135
	if (capabilities & BDI_CAP_MAP_DIRECT) {
L
Linus Torvalds 已提交
1136
		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1137
		if (ret == 0) {
L
Linus Torvalds 已提交
1138
			/* shouldn't return success if we're not sharing */
1139 1140
			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
			vma->vm_region->vm_top = vma->vm_region->vm_end;
1141
			return 0;
L
Linus Torvalds 已提交
1142
		}
1143 1144
		if (ret != -ENOSYS)
			return ret;
L
Linus Torvalds 已提交
1145 1146 1147 1148 1149 1150

		/* getting an ENOSYS error indicates that direct mmap isn't
		 * possible (as opposed to tried but failed) so we'll try to
		 * make a private copy of the data and map that instead */
	}

1151

L
Linus Torvalds 已提交
1152 1153 1154 1155
	/* allocate some memory to hold the mapping
	 * - note that this may not return a page-aligned address if the object
	 *   we're allocating is smaller than a page
	 */
B
Bob Liu 已提交
1156
	order = get_order(len);
1157 1158 1159 1160
	kdebug("alloc order %d for %lx", order, len);

	pages = alloc_pages(GFP_KERNEL, order);
	if (!pages)
L
Linus Torvalds 已提交
1161 1162
		goto enomem;

1163
	total = 1 << order;
1164
	atomic_long_add(total, &mmap_pages_allocated);
1165

B
Bob Liu 已提交
1166
	point = len >> PAGE_SHIFT;
1167 1168 1169 1170 1171 1172 1173 1174

	/* we allocated a power-of-2 sized page set, so we may want to trim off
	 * the excess */
	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
		while (total > point) {
			order = ilog2(total - point);
			n = 1 << order;
			kdebug("shave %lu/%lu @%lu", n, total - point, total);
1175
			atomic_long_sub(n, &mmap_pages_allocated);
1176 1177 1178 1179
			total -= n;
			set_page_refcounted(pages + total);
			__free_pages(pages + total, order);
		}
1180 1181 1182 1183
	}

	for (point = 1; point < total; point++)
		set_page_refcounted(&pages[point]);
L
Linus Torvalds 已提交
1184

1185 1186 1187
	base = page_address(pages);
	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
	region->vm_start = (unsigned long) base;
B
Bob Liu 已提交
1188
	region->vm_end   = region->vm_start + len;
1189
	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1190 1191 1192

	vma->vm_start = region->vm_start;
	vma->vm_end   = region->vm_start + len;
L
Linus Torvalds 已提交
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203

	if (vma->vm_file) {
		/* read the contents of a file into the copy */
		mm_segment_t old_fs;
		loff_t fpos;

		fpos = vma->vm_pgoff;
		fpos <<= PAGE_SHIFT;

		old_fs = get_fs();
		set_fs(KERNEL_DS);
B
Bob Liu 已提交
1204
		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
L
Linus Torvalds 已提交
1205 1206 1207 1208 1209 1210
		set_fs(old_fs);

		if (ret < 0)
			goto error_free;

		/* clear the last little bit */
B
Bob Liu 已提交
1211 1212
		if (ret < len)
			memset(base + ret, 0, len - ret);
L
Linus Torvalds 已提交
1213 1214 1215 1216 1217 1218

	}

	return 0;

error_free:
1219
	free_page_series(region->vm_start, region->vm_top);
1220 1221
	region->vm_start = vma->vm_start = 0;
	region->vm_end   = vma->vm_end = 0;
1222
	region->vm_top   = 0;
L
Linus Torvalds 已提交
1223 1224 1225
	return ret;

enomem:
1226 1227
	printk("Allocation of length %lu from process %d (%s) failed\n",
	       len, current->pid, current->comm);
1228
	show_free_areas(0);
L
Linus Torvalds 已提交
1229 1230 1231 1232 1233 1234
	return -ENOMEM;
}

/*
 * handle mapping creation for uClinux
 */
1235
static unsigned long do_mmap_pgoff(struct file *file,
L
Linus Torvalds 已提交
1236 1237 1238 1239 1240 1241
			    unsigned long addr,
			    unsigned long len,
			    unsigned long prot,
			    unsigned long flags,
			    unsigned long pgoff)
{
1242 1243
	struct vm_area_struct *vma;
	struct vm_region *region;
L
Linus Torvalds 已提交
1244
	struct rb_node *rb;
1245
	unsigned long capabilities, vm_flags, result;
L
Linus Torvalds 已提交
1246 1247
	int ret;

1248 1249
	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);

L
Linus Torvalds 已提交
1250 1251 1252 1253
	/* decide whether we should attempt the mapping, and if so what sort of
	 * mapping */
	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
				    &capabilities);
1254 1255
	if (ret < 0) {
		kleave(" = %d [val]", ret);
L
Linus Torvalds 已提交
1256
		return ret;
1257
	}
L
Linus Torvalds 已提交
1258

1259 1260
	/* we ignore the address hint */
	addr = 0;
B
Bob Liu 已提交
1261
	len = PAGE_ALIGN(len);
1262

L
Linus Torvalds 已提交
1263 1264 1265 1266
	/* we've determined that we can make the mapping, now translate what we
	 * now know into VMA flags */
	vm_flags = determine_vm_flags(file, prot, flags, capabilities);

1267 1268 1269 1270 1271 1272 1273 1274
	/* we're going to need to record the mapping */
	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		goto error_getting_region;

	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		goto error_getting_vma;
L
Linus Torvalds 已提交
1275

1276
	region->vm_usage = 1;
1277 1278 1279
	region->vm_flags = vm_flags;
	region->vm_pgoff = pgoff;

1280
	INIT_LIST_HEAD(&vma->anon_vma_chain);
1281 1282
	vma->vm_flags = vm_flags;
	vma->vm_pgoff = pgoff;
L
Linus Torvalds 已提交
1283

1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
	if (file) {
		region->vm_file = file;
		get_file(file);
		vma->vm_file = file;
		get_file(file);
		if (vm_flags & VM_EXECUTABLE) {
			added_exe_file_vma(current->mm);
			vma->vm_mm = current->mm;
		}
	}

	down_write(&nommu_region_sem);

	/* if we want to share, we need to check for regions created by other
L
Linus Torvalds 已提交
1298
	 * mmap() calls that overlap with our proposed mapping
1299
	 * - we can only share with a superset match on most regular files
L
Linus Torvalds 已提交
1300 1301 1302 1303 1304 1305
	 * - shared mappings on character devices and memory backed files are
	 *   permitted to overlap inexactly as far as we are concerned for in
	 *   these cases, sharing is handled in the driver or filesystem rather
	 *   than here
	 */
	if (vm_flags & VM_MAYSHARE) {
1306 1307
		struct vm_region *pregion;
		unsigned long pglen, rpglen, pgend, rpgend, start;
L
Linus Torvalds 已提交
1308

1309 1310
		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pgend = pgoff + pglen;
1311

1312 1313
		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
			pregion = rb_entry(rb, struct vm_region, vm_rb);
L
Linus Torvalds 已提交
1314

1315
			if (!(pregion->vm_flags & VM_MAYSHARE))
L
Linus Torvalds 已提交
1316 1317 1318
				continue;

			/* search for overlapping mappings on the same file */
1319 1320
			if (pregion->vm_file->f_path.dentry->d_inode !=
			    file->f_path.dentry->d_inode)
L
Linus Torvalds 已提交
1321 1322
				continue;

1323
			if (pregion->vm_pgoff >= pgend)
L
Linus Torvalds 已提交
1324 1325
				continue;

1326 1327 1328 1329
			rpglen = pregion->vm_end - pregion->vm_start;
			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
			rpgend = pregion->vm_pgoff + rpglen;
			if (pgoff >= rpgend)
L
Linus Torvalds 已提交
1330 1331
				continue;

1332 1333 1334 1335 1336
			/* handle inexactly overlapping matches between
			 * mappings */
			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
				/* new mapping is not a subset of the region */
L
Linus Torvalds 已提交
1337 1338 1339 1340 1341
				if (!(capabilities & BDI_CAP_MAP_DIRECT))
					goto sharing_violation;
				continue;
			}

1342
			/* we've found a region we can share */
1343
			pregion->vm_usage++;
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
			vma->vm_region = pregion;
			start = pregion->vm_start;
			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
			vma->vm_start = start;
			vma->vm_end = start + len;

			if (pregion->vm_flags & VM_MAPPED_COPY) {
				kdebug("share copy");
				vma->vm_flags |= VM_MAPPED_COPY;
			} else {
				kdebug("share mmap");
				ret = do_mmap_shared_file(vma);
				if (ret < 0) {
					vma->vm_region = NULL;
					vma->vm_start = 0;
					vma->vm_end = 0;
1360
					pregion->vm_usage--;
1361 1362 1363 1364 1365 1366 1367 1368 1369
					pregion = NULL;
					goto error_just_free;
				}
			}
			fput(region->vm_file);
			kmem_cache_free(vm_region_jar, region);
			region = pregion;
			result = start;
			goto share;
L
Linus Torvalds 已提交
1370 1371 1372 1373 1374 1375
		}

		/* obtain the address at which to make a shared mapping
		 * - this is the hook for quasi-memory character devices to
		 *   tell us the location of a shared mapping
		 */
1376
		if (capabilities & BDI_CAP_MAP_DIRECT) {
L
Linus Torvalds 已提交
1377 1378
			addr = file->f_op->get_unmapped_area(file, addr, len,
							     pgoff, flags);
1379
			if (IS_ERR_VALUE(addr)) {
L
Linus Torvalds 已提交
1380
				ret = addr;
1381
				if (ret != -ENOSYS)
1382
					goto error_just_free;
L
Linus Torvalds 已提交
1383 1384 1385 1386

				/* the driver refused to tell us where to site
				 * the mapping so we'll have to attempt to copy
				 * it */
1387
				ret = -ENODEV;
L
Linus Torvalds 已提交
1388
				if (!(capabilities & BDI_CAP_MAP_COPY))
1389
					goto error_just_free;
L
Linus Torvalds 已提交
1390 1391

				capabilities &= ~BDI_CAP_MAP_DIRECT;
1392 1393 1394
			} else {
				vma->vm_start = region->vm_start = addr;
				vma->vm_end = region->vm_end = addr + len;
L
Linus Torvalds 已提交
1395 1396 1397 1398
			}
		}
	}

1399
	vma->vm_region = region;
L
Linus Torvalds 已提交
1400

1401 1402 1403
	/* set up the mapping
	 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
	 */
L
Linus Torvalds 已提交
1404
	if (file && vma->vm_flags & VM_SHARED)
1405
		ret = do_mmap_shared_file(vma);
L
Linus Torvalds 已提交
1406
	else
1407
		ret = do_mmap_private(vma, region, len, capabilities);
L
Linus Torvalds 已提交
1408
	if (ret < 0)
1409 1410
		goto error_just_free;
	add_nommu_region(region);
1411

1412 1413 1414 1415 1416
	/* clear anonymous mappings that don't ask for uninitialized data */
	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
		memset((void *)region->vm_start, 0,
		       region->vm_end - region->vm_start);

L
Linus Torvalds 已提交
1417
	/* okay... we have a mapping; now we have to register it */
1418
	result = vma->vm_start;
L
Linus Torvalds 已提交
1419 1420 1421

	current->mm->total_vm += len >> PAGE_SHIFT;

1422 1423
share:
	add_vma_to_mm(current->mm, vma);
L
Linus Torvalds 已提交
1424

1425 1426 1427 1428 1429 1430
	/* we flush the region from the icache only when the first executable
	 * mapping of it is made  */
	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
		flush_icache_range(region->vm_start, region->vm_end);
		region->vm_icache_flushed = true;
	}
L
Linus Torvalds 已提交
1431

1432
	up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
1433

1434 1435
	kleave(" = %lx", result);
	return result;
L
Linus Torvalds 已提交
1436

1437 1438 1439
error_just_free:
	up_write(&nommu_region_sem);
error:
1440 1441
	if (region->vm_file)
		fput(region->vm_file);
1442
	kmem_cache_free(vm_region_jar, region);
1443 1444
	if (vma->vm_file)
		fput(vma->vm_file);
1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	if (vma->vm_flags & VM_EXECUTABLE)
		removed_exe_file_vma(vma->vm_mm);
	kmem_cache_free(vm_area_cachep, vma);
	kleave(" = %d", ret);
	return ret;

sharing_violation:
	up_write(&nommu_region_sem);
	printk(KERN_WARNING "Attempt to share mismatched mappings\n");
	ret = -EINVAL;
	goto error;
L
Linus Torvalds 已提交
1456

1457 1458 1459 1460
error_getting_vma:
	kmem_cache_free(vm_region_jar, region);
	printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
	       " from process %d failed\n",
L
Linus Torvalds 已提交
1461
	       len, current->pid);
1462
	show_free_areas(0);
L
Linus Torvalds 已提交
1463 1464
	return -ENOMEM;

1465 1466 1467
error_getting_region:
	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
	       " from process %d failed\n",
L
Linus Torvalds 已提交
1468
	       len, current->pid);
1469
	show_free_areas(0);
L
Linus Torvalds 已提交
1470 1471
	return -ENOMEM;
}
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490

unsigned long do_mmap(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
	unsigned long flag, unsigned long offset)
{
	if (unlikely(offset + PAGE_ALIGN(len) < offset))
		return -EINVAL;
	if (unlikely(offset & ~PAGE_MASK))
		return -EINVAL;
	return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
}

unsigned long vm_mmap(struct file *file, unsigned long addr,
	unsigned long len, unsigned long prot,
	unsigned long flag, unsigned long offset)
{
	unsigned long ret;
	struct mm_struct *mm = current->mm;

1491 1492 1493 1494 1495 1496
	ret = security_mmap_file(file, prot, flag);
	if (!ret) {
		down_write(&mm->mmap_sem);
		ret = do_mmap(file, addr, len, prot, flag, offset);
		up_write(&mm->mmap_sem);
	}
1497 1498 1499
	return ret;
}
EXPORT_SYMBOL(vm_mmap);
L
Linus Torvalds 已提交
1500

H
Hugh Dickins 已提交
1501 1502 1503 1504 1505 1506 1507
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
		unsigned long, prot, unsigned long, flags,
		unsigned long, fd, unsigned long, pgoff)
{
	struct file *file = NULL;
	unsigned long retval = -EBADF;

A
Al Viro 已提交
1508
	audit_mmap_fd(fd, flags);
H
Hugh Dickins 已提交
1509 1510 1511 1512 1513 1514 1515 1516
	if (!(flags & MAP_ANONYMOUS)) {
		file = fget(fd);
		if (!file)
			goto out;
	}

	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);

1517 1518 1519 1520 1521 1522
	ret = security_mmap_file(file, prot, flags);
	if (!ret) {
		down_write(&current->mm->mmap_sem);
		retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
		up_write(&current->mm->mmap_sem);
	}
H
Hugh Dickins 已提交
1523 1524 1525 1526 1527 1528 1529

	if (file)
		fput(file);
out:
	return retval;
}

C
Christoph Hellwig 已提交
1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
	unsigned long addr;
	unsigned long len;
	unsigned long prot;
	unsigned long flags;
	unsigned long fd;
	unsigned long offset;
};

SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
	struct mmap_arg_struct a;

	if (copy_from_user(&a, arg, sizeof(a)))
		return -EFAULT;
	if (a.offset & ~PAGE_MASK)
		return -EINVAL;

	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
			      a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */

L
Linus Torvalds 已提交
1554
/*
1555 1556
 * split a vma into two pieces at address 'addr', a new vma is allocated either
 * for the first part or the tail.
L
Linus Torvalds 已提交
1557
 */
1558 1559
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
	      unsigned long addr, int new_below)
L
Linus Torvalds 已提交
1560
{
1561 1562 1563
	struct vm_area_struct *new;
	struct vm_region *region;
	unsigned long npages;
L
Linus Torvalds 已提交
1564

1565
	kenter("");
L
Linus Torvalds 已提交
1566

1567 1568 1569
	/* we're only permitted to split anonymous regions (these should have
	 * only a single usage on the region) */
	if (vma->vm_file)
1570
		return -ENOMEM;
L
Linus Torvalds 已提交
1571

1572 1573
	if (mm->map_count >= sysctl_max_map_count)
		return -ENOMEM;
L
Linus Torvalds 已提交
1574

1575 1576 1577
	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		return -ENOMEM;
L
Linus Torvalds 已提交
1578

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
	if (!new) {
		kmem_cache_free(vm_region_jar, region);
		return -ENOMEM;
	}

	/* most fields are the same, copy all, and then fixup */
	*new = *vma;
	*region = *vma->vm_region;
	new->vm_region = region;

	npages = (addr - vma->vm_start) >> PAGE_SHIFT;

	if (new_below) {
1593
		region->vm_top = region->vm_end = new->vm_end = addr;
1594 1595 1596
	} else {
		region->vm_start = new->vm_start = addr;
		region->vm_pgoff = new->vm_pgoff += npages;
L
Linus Torvalds 已提交
1597
	}
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609

	if (new->vm_ops && new->vm_ops->open)
		new->vm_ops->open(new);

	delete_vma_from_mm(vma);
	down_write(&nommu_region_sem);
	delete_nommu_region(vma->vm_region);
	if (new_below) {
		vma->vm_region->vm_start = vma->vm_start = addr;
		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
	} else {
		vma->vm_region->vm_end = vma->vm_end = addr;
1610
		vma->vm_region->vm_top = addr;
1611 1612 1613 1614 1615 1616 1617
	}
	add_nommu_region(vma->vm_region);
	add_nommu_region(new->vm_region);
	up_write(&nommu_region_sem);
	add_vma_to_mm(mm, vma);
	add_vma_to_mm(mm, new);
	return 0;
L
Linus Torvalds 已提交
1618 1619
}

1620
/*
1621 1622
 * shrink a VMA by removing the specified chunk from either the beginning or
 * the end
1623
 */
1624 1625 1626
static int shrink_vma(struct mm_struct *mm,
		      struct vm_area_struct *vma,
		      unsigned long from, unsigned long to)
L
Linus Torvalds 已提交
1627
{
1628
	struct vm_region *region;
L
Linus Torvalds 已提交
1629

1630
	kenter("");
L
Linus Torvalds 已提交
1631

1632 1633 1634 1635 1636 1637 1638 1639
	/* adjust the VMA's pointers, which may reposition it in the MM's tree
	 * and list */
	delete_vma_from_mm(vma);
	if (from > vma->vm_start)
		vma->vm_end = from;
	else
		vma->vm_start = to;
	add_vma_to_mm(mm, vma);
L
Linus Torvalds 已提交
1640

1641 1642
	/* cut the backing region down to size */
	region = vma->vm_region;
1643
	BUG_ON(region->vm_usage != 1);
1644 1645 1646

	down_write(&nommu_region_sem);
	delete_nommu_region(region);
1647 1648 1649 1650
	if (from > region->vm_start) {
		to = region->vm_top;
		region->vm_top = region->vm_end = from;
	} else {
1651
		region->vm_start = to;
1652
	}
1653 1654 1655 1656 1657 1658
	add_nommu_region(region);
	up_write(&nommu_region_sem);

	free_page_series(from, to);
	return 0;
}
L
Linus Torvalds 已提交
1659

1660 1661 1662 1663 1664 1665 1666 1667
/*
 * release a mapping
 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
 *   VMA, though it need not cover the whole VMA
 */
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
B
Bob Liu 已提交
1668
	unsigned long end;
1669
	int ret;
L
Linus Torvalds 已提交
1670

1671
	kenter(",%lx,%zx", start, len);
L
Linus Torvalds 已提交
1672

B
Bob Liu 已提交
1673
	len = PAGE_ALIGN(len);
1674 1675
	if (len == 0)
		return -EINVAL;
1676

B
Bob Liu 已提交
1677 1678
	end = start + len;

1679 1680 1681
	/* find the first potentially overlapping VMA */
	vma = find_vma(mm, start);
	if (!vma) {
1682 1683 1684 1685 1686 1687 1688 1689 1690
		static int limit = 0;
		if (limit < 5) {
			printk(KERN_WARNING
			       "munmap of memory not mmapped by process %d"
			       " (%s): 0x%lx-0x%lx\n",
			       current->pid, current->comm,
			       start, start + len - 1);
			limit++;
		}
1691 1692
		return -EINVAL;
	}
L
Linus Torvalds 已提交
1693

1694 1695 1696 1697 1698 1699 1700 1701 1702
	/* we're allowed to split an anonymous VMA but not a file-backed one */
	if (vma->vm_file) {
		do {
			if (start > vma->vm_start) {
				kleave(" = -EINVAL [miss]");
				return -EINVAL;
			}
			if (end == vma->vm_end)
				goto erase_whole_vma;
1703 1704
			vma = vma->vm_next;
		} while (vma);
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731
		kleave(" = -EINVAL [split file]");
		return -EINVAL;
	} else {
		/* the chunk must be a subset of the VMA found */
		if (start == vma->vm_start && end == vma->vm_end)
			goto erase_whole_vma;
		if (start < vma->vm_start || end > vma->vm_end) {
			kleave(" = -EINVAL [superset]");
			return -EINVAL;
		}
		if (start & ~PAGE_MASK) {
			kleave(" = -EINVAL [unaligned start]");
			return -EINVAL;
		}
		if (end != vma->vm_end && end & ~PAGE_MASK) {
			kleave(" = -EINVAL [unaligned split]");
			return -EINVAL;
		}
		if (start != vma->vm_start && end != vma->vm_end) {
			ret = split_vma(mm, vma, start, 1);
			if (ret < 0) {
				kleave(" = %d [split]", ret);
				return ret;
			}
		}
		return shrink_vma(mm, vma, start, end);
	}
L
Linus Torvalds 已提交
1732

1733 1734 1735 1736
erase_whole_vma:
	delete_vma_from_mm(vma);
	delete_vma(mm, vma);
	kleave(" = 0");
L
Linus Torvalds 已提交
1737 1738
	return 0;
}
1739
EXPORT_SYMBOL(do_munmap);
L
Linus Torvalds 已提交
1740

A
Al Viro 已提交
1741
int vm_munmap(unsigned long addr, size_t len)
1742
{
A
Al Viro 已提交
1743
	struct mm_struct *mm = current->mm;
1744 1745 1746 1747 1748 1749 1750
	int ret;

	down_write(&mm->mmap_sem);
	ret = do_munmap(mm, addr, len);
	up_write(&mm->mmap_sem);
	return ret;
}
1751 1752 1753 1754
EXPORT_SYMBOL(vm_munmap);

SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
A
Al Viro 已提交
1755
	return vm_munmap(addr, len);
1756
}
1757 1758

/*
1759
 * release all the mappings made in a process's VM space
1760
 */
1761
void exit_mmap(struct mm_struct *mm)
L
Linus Torvalds 已提交
1762
{
1763
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1764

1765 1766
	if (!mm)
		return;
L
Linus Torvalds 已提交
1767

1768
	kenter("");
L
Linus Torvalds 已提交
1769

1770
	mm->total_vm = 0;
L
Linus Torvalds 已提交
1771

1772 1773 1774 1775
	while ((vma = mm->mmap)) {
		mm->mmap = vma->vm_next;
		delete_vma_from_mm(vma);
		delete_vma(mm, vma);
1776
		cond_resched();
L
Linus Torvalds 已提交
1777
	}
1778 1779

	kleave("");
L
Linus Torvalds 已提交
1780 1781
}

1782
unsigned long vm_brk(unsigned long addr, unsigned long len)
L
Linus Torvalds 已提交
1783 1784 1785 1786 1787
{
	return -ENOMEM;
}

/*
1788 1789
 * expand (or shrink) an existing mapping, potentially moving it at the same
 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
L
Linus Torvalds 已提交
1790
 *
1791
 * under NOMMU conditions, we only permit changing a mapping's size, and only
1792 1793
 * as long as it stays within the region allocated by do_mmap_private() and the
 * block is not shareable
L
Linus Torvalds 已提交
1794
 *
1795
 * MREMAP_FIXED is not supported under NOMMU conditions
L
Linus Torvalds 已提交
1796 1797 1798 1799 1800
 */
unsigned long do_mremap(unsigned long addr,
			unsigned long old_len, unsigned long new_len,
			unsigned long flags, unsigned long new_addr)
{
1801
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1802 1803

	/* insanity checks first */
B
Bob Liu 已提交
1804 1805
	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);
1806
	if (old_len == 0 || new_len == 0)
L
Linus Torvalds 已提交
1807 1808
		return (unsigned long) -EINVAL;

1809 1810 1811
	if (addr & ~PAGE_MASK)
		return -EINVAL;

L
Linus Torvalds 已提交
1812 1813 1814
	if (flags & MREMAP_FIXED && new_addr != addr)
		return (unsigned long) -EINVAL;

1815
	vma = find_vma_exact(current->mm, addr, old_len);
1816 1817
	if (!vma)
		return (unsigned long) -EINVAL;
L
Linus Torvalds 已提交
1818

1819
	if (vma->vm_end != vma->vm_start + old_len)
L
Linus Torvalds 已提交
1820 1821
		return (unsigned long) -EFAULT;

1822
	if (vma->vm_flags & VM_MAYSHARE)
L
Linus Torvalds 已提交
1823 1824
		return (unsigned long) -EPERM;

1825
	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
L
Linus Torvalds 已提交
1826 1827 1828
		return (unsigned long) -ENOMEM;

	/* all checks complete - do it */
1829 1830 1831
	vma->vm_end = vma->vm_start + new_len;
	return vma->vm_start;
}
1832
EXPORT_SYMBOL(do_mremap);
1833

1834 1835 1836
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
1837 1838 1839 1840 1841 1842 1843
{
	unsigned long ret;

	down_write(&current->mm->mmap_sem);
	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
	up_write(&current->mm->mmap_sem);
	return ret;
L
Linus Torvalds 已提交
1844 1845
}

1846
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1847
			unsigned int foll_flags)
L
Linus Torvalds 已提交
1848 1849 1850 1851
{
	return NULL;
}

B
Bob Liu 已提交
1852 1853
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		unsigned long pfn, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
1854
{
B
Bob Liu 已提交
1855 1856 1857 1858
	if (addr != (pfn << PAGE_SHIFT))
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1859
	return 0;
L
Linus Torvalds 已提交
1860
}
1861
EXPORT_SYMBOL(remap_pfn_range);
L
Linus Torvalds 已提交
1862

1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
			unsigned long pgoff)
{
	unsigned int size = vma->vm_end - vma->vm_start;

	if (!(vma->vm_flags & VM_USERMAP))
		return -EINVAL;

	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
	vma->vm_end = vma->vm_start + size;

	return 0;
}
EXPORT_SYMBOL(remap_vmalloc_range);

L
Linus Torvalds 已提交
1878 1879 1880 1881 1882 1883
unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
	unsigned long len, unsigned long pgoff, unsigned long flags)
{
	return -ENOMEM;
}

1884
void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
1885 1886 1887 1888 1889 1890 1891 1892
{
}

void unmap_mapping_range(struct address_space *mapping,
			 loff_t const holebegin, loff_t const holelen,
			 int even_cows)
{
}
1893
EXPORT_SYMBOL(unmap_mapping_range);
L
Linus Torvalds 已提交
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910

/*
 * Check that a process has enough memory to allocate a new virtual
 * mapping. 0 means there is enough memory for the allocation to
 * succeed and -ENOMEM implies there is not.
 *
 * We currently support three overcommit policies, which are set via the
 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
 *
 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 * Additional code 2002 Jul 20 by Robert Love.
 *
 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 *
 * Note this is a helper function intended to be used by LSMs which
 * wish to use this logic.
 */
1911
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
L
Linus Torvalds 已提交
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
{
	unsigned long free, allowed;

	vm_acct_memory(pages);

	/*
	 * Sometimes we want to use more memory than we have
	 */
	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
		return 0;

	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1924 1925 1926 1927 1928 1929 1930 1931 1932 1933
		free = global_page_state(NR_FREE_PAGES);
		free += global_page_state(NR_FILE_PAGES);

		/*
		 * shmem pages shouldn't be counted as free in this
		 * case, they can't be purged, only swapped out, and
		 * that won't affect the overall amount of available
		 * memory in the system.
		 */
		free -= global_page_state(NR_SHMEM);
L
Linus Torvalds 已提交
1934 1935 1936 1937 1938 1939 1940 1941 1942

		free += nr_swap_pages;

		/*
		 * Any slabs which are created with the
		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
		 * which are reclaimable, under pressure.  The dentry
		 * cache and most inode caches should fall into this
		 */
1943
		free += global_page_state(NR_SLAB_RECLAIMABLE);
L
Linus Torvalds 已提交
1944

1945 1946 1947
		/*
		 * Leave reserved pages. The pages are not for anonymous pages.
		 */
1948
		if (free <= totalreserve_pages)
1949 1950
			goto error;
		else
1951
			free -= totalreserve_pages;
1952 1953 1954 1955

		/*
		 * Leave the last 3% for root
		 */
L
Linus Torvalds 已提交
1956
		if (!cap_sys_admin)
1957
			free -= free / 32;
L
Linus Torvalds 已提交
1958 1959 1960

		if (free > pages)
			return 0;
1961 1962

		goto error;
L
Linus Torvalds 已提交
1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974
	}

	allowed = totalram_pages * sysctl_overcommit_ratio / 100;
	/*
	 * Leave the last 3% for root
	 */
	if (!cap_sys_admin)
		allowed -= allowed / 32;
	allowed += total_swap_pages;

	/* Don't let a single process grow too big:
	   leave 3% of the size of this process for other processes */
A
Alan Cox 已提交
1975 1976
	if (mm)
		allowed -= mm->total_vm / 32;
L
Linus Torvalds 已提交
1977

1978
	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
L
Linus Torvalds 已提交
1979
		return 0;
1980

1981
error:
L
Linus Torvalds 已提交
1982 1983 1984 1985 1986
	vm_unacct_memory(pages);

	return -ENOMEM;
}

1987
int in_gate_area_no_mm(unsigned long addr)
L
Linus Torvalds 已提交
1988 1989 1990
{
	return 0;
}
1991

N
Nick Piggin 已提交
1992
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1993 1994
{
	BUG();
N
Nick Piggin 已提交
1995
	return 0;
1996
}
1997
EXPORT_SYMBOL(filemap_fault);
1998

1999 2000
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long addr, void *buf, int len, int write)
2001 2002 2003 2004 2005 2006
{
	struct vm_area_struct *vma;

	down_read(&mm->mmap_sem);

	/* the access must start within one of the target process's mappings */
2007 2008
	vma = find_vma(mm, addr);
	if (vma) {
2009 2010 2011 2012 2013
		/* don't overrun this mapping */
		if (addr + len >= vma->vm_end)
			len = vma->vm_end - addr;

		/* only read or write mappings where it is permitted */
2014
		if (write && vma->vm_flags & VM_MAYWRITE)
2015 2016
			copy_to_user_page(vma, NULL, addr,
					 (void *) addr, buf, len);
2017
		else if (!write && vma->vm_flags & VM_MAYREAD)
2018 2019
			copy_from_user_page(vma, NULL, addr,
					    buf, (void *) addr, len);
2020 2021 2022 2023 2024 2025 2026
		else
			len = 0;
	} else {
		len = 0;
	}

	up_read(&mm->mmap_sem);
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063

	return len;
}

/**
 * @access_remote_vm - access another process' address space
 * @mm:		the mm_struct of the target address space
 * @addr:	start address to access
 * @buf:	source or destination buffer
 * @len:	number of bytes to transfer
 * @write:	whether the access is a write
 *
 * The caller must hold a reference on @mm.
 */
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write)
{
	return __access_remote_vm(NULL, mm, addr, buf, len, write);
}

/*
 * Access another process' address space.
 * - source/target buffer must be kernel space
 */
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;

	if (addr + len < addr)
		return 0;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	len = __access_remote_vm(tsk, mm, addr, buf, len, write);

2064 2065 2066
	mmput(mm);
	return len;
}
2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091

/**
 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
 * @inode: The inode to check
 * @size: The current filesize of the inode
 * @newsize: The proposed filesize of the inode
 *
 * Check the shared mappings on an inode on behalf of a shrinking truncate to
 * make sure that that any outstanding VMAs aren't broken and then shrink the
 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
 * automatically grant mappings that are too large.
 */
int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
				size_t newsize)
{
	struct vm_area_struct *vma;
	struct prio_tree_iter iter;
	struct vm_region *region;
	pgoff_t low, high;
	size_t r_size, r_top;

	low = newsize >> PAGE_SHIFT;
	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	down_write(&nommu_region_sem);
2092
	mutex_lock(&inode->i_mapping->i_mmap_mutex);
2093 2094 2095 2096 2097 2098 2099

	/* search for VMAs that fall within the dead zone */
	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
			      low, high) {
		/* found one - only interested if it's shared out of the page
		 * cache */
		if (vma->vm_flags & VM_SHARED) {
2100
			mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
			up_write(&nommu_region_sem);
			return -ETXTBSY; /* not quite true, but near enough */
		}
	}

	/* reduce any regions that overlap the dead zone - if in existence,
	 * these will be pointed to by VMAs that don't overlap the dead zone
	 *
	 * we don't check for any regions that start beyond the EOF as there
	 * shouldn't be any
	 */
	vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
			      0, ULONG_MAX) {
		if (!(vma->vm_flags & VM_SHARED))
			continue;

		region = vma->vm_region;
		r_size = region->vm_top - region->vm_start;
		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;

		if (r_top > newsize) {
			region->vm_top -= r_top - newsize;
			if (region->vm_end > region->vm_top)
				region->vm_end = region->vm_top;
		}
	}

2128
	mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2129 2130 2131
	up_write(&nommu_region_sem);
	return 0;
}