nommu.c 53.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *  linux/mm/nommu.c
 *
 *  Replacement code for mm functions to support CPU's that don't
 *  have any form of memory management unit (thus no virtual memory).
 *
 *  See Documentation/nommu-mmap.txt
 *
9
 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
L
Linus Torvalds 已提交
10 11 12
 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13
 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
L
Linus Torvalds 已提交
14 15
 */

16 17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

18
#include <linux/export.h>
L
Linus Torvalds 已提交
19
#include <linux/mm.h>
D
Davidlohr Bueso 已提交
20
#include <linux/vmacache.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27 28 29
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
30
#include <linux/compiler.h>
L
Linus Torvalds 已提交
31 32 33 34
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
35
#include <linux/audit.h>
36
#include <linux/sched/sysctl.h>
37
#include <linux/printk.h>
L
Linus Torvalds 已提交
38 39 40 41

#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
42
#include <asm/mmu_context.h>
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#include "internal.h"

#if 0
#define kenter(FMT, ...) \
	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) \
	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) \
	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
#else
#define kenter(FMT, ...) \
	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) \
	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) \
	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
L
Linus Torvalds 已提交
60 61 62 63

void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
H
Hugh Dickins 已提交
64
unsigned long highest_memmap_pfn;
65
struct percpu_counter vm_committed_as;
L
Linus Torvalds 已提交
66 67
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
68
unsigned long sysctl_overcommit_kbytes __read_mostly;
L
Linus Torvalds 已提交
69
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
71
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
72
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
L
Linus Torvalds 已提交
73 74
int heap_stack_gap = 0;

75
atomic_long_t mmap_pages_allocated;
76

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * The global memory commitment made in the system can be a metric
 * that can be used to drive ballooning decisions when Linux is hosted
 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 * balancing memory across competing virtual machines that are hosted.
 * Several metrics drive this policy engine including the guest reported
 * memory commitment.
 */
unsigned long vm_memory_committed(void)
{
	return percpu_counter_read_positive(&vm_committed_as);
}

EXPORT_SYMBOL_GPL(vm_memory_committed);

L
Linus Torvalds 已提交
92 93
EXPORT_SYMBOL(mem_map);

94 95 96 97
/* list of mapped, potentially shareable regions */
static struct kmem_cache *vm_region_jar;
struct rb_root nommu_region_tree = RB_ROOT;
DECLARE_RWSEM(nommu_region_sem);
L
Linus Torvalds 已提交
98

99
const struct vm_operations_struct generic_file_vm_ops = {
L
Linus Torvalds 已提交
100 101 102 103 104 105 106 107 108 109 110 111
};

/*
 * Return the total memory allocated for this pointer, not
 * just what the caller asked for.
 *
 * Doesn't have to be accurate, i.e. may have races.
 */
unsigned int kobjsize(const void *objp)
{
	struct page *page;

112 113 114 115
	/*
	 * If the object we have should not have ksize performed on it,
	 * return size of 0
	 */
116
	if (!objp || !virt_addr_valid(objp))
117 118 119 120 121 122 123 124
		return 0;

	page = virt_to_head_page(objp);

	/*
	 * If the allocator sets PageSlab, we know the pointer came from
	 * kmalloc().
	 */
L
Linus Torvalds 已提交
125 126 127
	if (PageSlab(page))
		return ksize(objp);

128 129 130 131 132 133 134 135 136 137 138 139 140 141
	/*
	 * If it's not a compound page, see if we have a matching VMA
	 * region. This test is intentionally done in reverse order,
	 * so if there's no VMA, we still fall through and hand back
	 * PAGE_SIZE for 0-order pages.
	 */
	if (!PageCompound(page)) {
		struct vm_area_struct *vma;

		vma = find_vma(current->mm, (unsigned long)objp);
		if (vma)
			return vma->vm_end - vma->vm_start;
	}

142 143
	/*
	 * The ksize() function is only guaranteed to work for pointers
144
	 * returned by kmalloc(). So handle arbitrary pointers here.
145
	 */
146
	return PAGE_SIZE << compound_order(page);
L
Linus Torvalds 已提交
147 148
}

149 150 151 152
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		      unsigned long start, unsigned long nr_pages,
		      unsigned int foll_flags, struct page **pages,
		      struct vm_area_struct **vmas, int *nonblocking)
L
Linus Torvalds 已提交
153
{
154
	struct vm_area_struct *vma;
155 156 157 158
	unsigned long vm_flags;
	int i;

	/* calculate required read or write permissions.
H
Hugh Dickins 已提交
159
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
160
	 */
H
Hugh Dickins 已提交
161 162 163 164
	vm_flags  = (foll_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (foll_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
L
Linus Torvalds 已提交
165

166
	for (i = 0; i < nr_pages; i++) {
167
		vma = find_vma(mm, start);
168 169 170 171
		if (!vma)
			goto finish_or_fault;

		/* protect what we can, including chardevs */
H
Hugh Dickins 已提交
172 173
		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
		    !(vm_flags & vma->vm_flags))
174
			goto finish_or_fault;
175

L
Linus Torvalds 已提交
176 177 178 179 180 181
		if (pages) {
			pages[i] = virt_to_page(start);
			if (pages[i])
				page_cache_get(pages[i]);
		}
		if (vmas)
182
			vmas[i] = vma;
183
		start = (start + PAGE_SIZE) & PAGE_MASK;
L
Linus Torvalds 已提交
184
	}
185 186 187 188 189

	return i;

finish_or_fault:
	return i ? : -EFAULT;
L
Linus Torvalds 已提交
190
}
N
Nick Piggin 已提交
191 192 193 194 195 196 197 198

/*
 * get a list of pages in an address range belonging to the specified process
 * and indicate the VMA that covers each page
 * - this is potentially dodgy as we may end incrementing the page count of a
 *   slab page or a secondary page from a compound page
 * - don't permit access to VMAs that don't support it, such as I/O mappings
 */
199 200 201 202
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		    unsigned long start, unsigned long nr_pages,
		    int write, int force, struct page **pages,
		    struct vm_area_struct **vmas)
N
Nick Piggin 已提交
203 204 205 206
{
	int flags = 0;

	if (write)
H
Hugh Dickins 已提交
207
		flags |= FOLL_WRITE;
N
Nick Piggin 已提交
208
	if (force)
H
Hugh Dickins 已提交
209
		flags |= FOLL_FORCE;
N
Nick Piggin 已提交
210

211 212
	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
				NULL);
N
Nick Piggin 已提交
213
}
214 215
EXPORT_SYMBOL(get_user_pages);

P
Paul Mundt 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/**
 * follow_pfn - look up PFN at a user virtual address
 * @vma: memory mapping
 * @address: user virtual address
 * @pfn: location to store found PFN
 *
 * Only IO mappings and raw PFN mappings are allowed.
 *
 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 */
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn)
{
	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
		return -EINVAL;

	*pfn = address >> PAGE_SHIFT;
	return 0;
}
EXPORT_SYMBOL(follow_pfn);

237
LIST_HEAD(vmap_area_list);
L
Linus Torvalds 已提交
238

239
void vfree(const void *addr)
L
Linus Torvalds 已提交
240 241 242
{
	kfree(addr);
}
243
EXPORT_SYMBOL(vfree);
L
Linus Torvalds 已提交
244

A
Al Viro 已提交
245
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
L
Linus Torvalds 已提交
246 247
{
	/*
248 249
	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
	 * returns only a logical address.
L
Linus Torvalds 已提交
250
	 */
251
	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
L
Linus Torvalds 已提交
252
}
253
EXPORT_SYMBOL(__vmalloc);
L
Linus Torvalds 已提交
254

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
void *vmalloc_user(unsigned long size)
{
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	if (ret) {
		struct vm_area_struct *vma;

		down_write(&current->mm->mmap_sem);
		vma = find_vma(current->mm, (unsigned long)ret);
		if (vma)
			vma->vm_flags |= VM_USERMAP;
		up_write(&current->mm->mmap_sem);
	}

	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

275
struct page *vmalloc_to_page(const void *addr)
L
Linus Torvalds 已提交
276 277 278
{
	return virt_to_page(addr);
}
279
EXPORT_SYMBOL(vmalloc_to_page);
L
Linus Torvalds 已提交
280

281
unsigned long vmalloc_to_pfn(const void *addr)
L
Linus Torvalds 已提交
282 283 284
{
	return page_to_pfn(virt_to_page(addr));
}
285
EXPORT_SYMBOL(vmalloc_to_pfn);
L
Linus Torvalds 已提交
286 287 288

long vread(char *buf, char *addr, unsigned long count)
{
289 290 291 292
	/* Don't allow overflow */
	if ((unsigned long) buf + count < count)
		count = -(unsigned long) buf;

L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301 302 303
	memcpy(buf, addr, count);
	return count;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	memcpy(addr, buf, count);
304
	return count;
L
Linus Torvalds 已提交
305 306 307 308 309 310 311 312 313 314
}

/*
 *	vmalloc  -  allocate virtually continguos memory
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into continguos kernel virtual space.
 *
315
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
316 317 318 319 320 321
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
322 323
EXPORT_SYMBOL(vmalloc);

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
/*
 *	vzalloc - allocate virtually continguos memory with zero fill
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into continguos kernel virtual space.
 *	The memory allocated is set to zero.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */
void *vzalloc(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
}
EXPORT_SYMBOL(vzalloc);

/**
 * vmalloc_node - allocate memory on a specific node
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
354 355 356 357
void *vmalloc_node(unsigned long size, int node)
{
	return vmalloc(size);
}
358
EXPORT_SYMBOL(vmalloc_node);
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376

/**
 * vzalloc_node - allocate memory on a specific node with zero fill
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 * The memory allocated is set to zero.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
void *vzalloc_node(unsigned long size, int node)
{
	return vzalloc(size);
}
EXPORT_SYMBOL(vzalloc_node);
L
Linus Torvalds 已提交
377

P
Paul Mundt 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

399 400
/**
 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
L
Linus Torvalds 已提交
401 402 403 404 405 406 407 408 409
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into continguos kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}
410 411 412 413 414 415 416 417
EXPORT_SYMBOL(vmalloc_32);

/**
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 *	@size:		allocation size
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
418 419 420
 *
 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 * remap_vmalloc_range() are permissible.
421 422 423
 */
void *vmalloc_32_user(unsigned long size)
{
424 425 426 427 428
	/*
	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
	 * but for now this can simply use vmalloc_user() directly.
	 */
	return vmalloc_user(size);
429 430
}
EXPORT_SYMBOL(vmalloc_32_user);
L
Linus Torvalds 已提交
431 432 433 434 435 436

void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
{
	BUG();
	return NULL;
}
437
EXPORT_SYMBOL(vmap);
L
Linus Torvalds 已提交
438

439
void vunmap(const void *addr)
L
Linus Torvalds 已提交
440 441 442
{
	BUG();
}
443
EXPORT_SYMBOL(vunmap);
L
Linus Torvalds 已提交
444

445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL(vm_map_ram);

void vm_unmap_ram(const void *mem, unsigned int count)
{
	BUG();
}
EXPORT_SYMBOL(vm_unmap_ram);

void vm_unmap_aliases(void)
{
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

463 464 465 466
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
467
void __weak vmalloc_sync_all(void)
468 469 470
{
}

471 472 473 474 475 476 477 478 479 480 481 482
/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
 *
 *	Returns:	NULL on failure, vm_struct on success
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
483
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
484 485 486 487 488 489 490 491 492 493 494 495
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	BUG();
}
EXPORT_SYMBOL_GPL(free_vm_area);

496 497 498 499 500 501 502
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
		   struct page *page)
{
	return -EINVAL;
}
EXPORT_SYMBOL(vm_insert_page);

L
Linus Torvalds 已提交
503 504 505 506 507 508 509
/*
 *  sys_brk() for the most part doesn't need the global kernel
 *  lock, except when an application is doing something nasty
 *  like trying to un-brk an area that has already been mapped
 *  to a regular file.  in this case, the unmapping will need
 *  to invoke file system routines that need the global lock.
 */
510
SYSCALL_DEFINE1(brk, unsigned long, brk)
L
Linus Torvalds 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
{
	struct mm_struct *mm = current->mm;

	if (brk < mm->start_brk || brk > mm->context.end_brk)
		return mm->brk;

	if (mm->brk == brk)
		return mm->brk;

	/*
	 * Always allow shrinking brk
	 */
	if (brk <= mm->brk) {
		mm->brk = brk;
		return brk;
	}

	/*
	 * Ok, looks good - let it rip.
	 */
531
	flush_icache_range(mm->brk, brk);
L
Linus Torvalds 已提交
532 533 534
	return mm->brk = brk;
}

535 536 537 538
/*
 * initialise the VMA and region record slabs
 */
void __init mmap_init(void)
L
Linus Torvalds 已提交
539
{
540 541
	int ret;

542
	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
543
	VM_BUG_ON(ret);
544
	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
L
Linus Torvalds 已提交
545 546
}

547
/*
548 549
 * validate the region tree
 * - the caller must hold the region lock
550
 */
551 552
#ifdef CONFIG_DEBUG_NOMMU_REGIONS
static noinline void validate_nommu_regions(void)
553
{
554 555
	struct vm_region *region, *last;
	struct rb_node *p, *lastp;
556

557 558 559 560 561
	lastp = rb_first(&nommu_region_tree);
	if (!lastp)
		return;

	last = rb_entry(lastp, struct vm_region, vm_rb);
562 563
	BUG_ON(unlikely(last->vm_end <= last->vm_start));
	BUG_ON(unlikely(last->vm_top < last->vm_end));
564 565 566 567 568

	while ((p = rb_next(lastp))) {
		region = rb_entry(p, struct vm_region, vm_rb);
		last = rb_entry(lastp, struct vm_region, vm_rb);

569 570 571
		BUG_ON(unlikely(region->vm_end <= region->vm_start));
		BUG_ON(unlikely(region->vm_top < region->vm_end));
		BUG_ON(unlikely(region->vm_start < last->vm_top));
572

573 574
		lastp = p;
	}
575
}
576
#else
577 578 579
static void validate_nommu_regions(void)
{
}
580
#endif
581 582

/*
583
 * add a region into the global tree
584
 */
585
static void add_nommu_region(struct vm_region *region)
586
{
587 588
	struct vm_region *pregion;
	struct rb_node **p, *parent;
589

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
	validate_nommu_regions();

	parent = NULL;
	p = &nommu_region_tree.rb_node;
	while (*p) {
		parent = *p;
		pregion = rb_entry(parent, struct vm_region, vm_rb);
		if (region->vm_start < pregion->vm_start)
			p = &(*p)->rb_left;
		else if (region->vm_start > pregion->vm_start)
			p = &(*p)->rb_right;
		else if (pregion == region)
			return;
		else
			BUG();
605 606
	}

607 608
	rb_link_node(&region->vm_rb, parent, p);
	rb_insert_color(&region->vm_rb, &nommu_region_tree);
609

610
	validate_nommu_regions();
611 612
}

613
/*
614
 * delete a region from the global tree
615
 */
616
static void delete_nommu_region(struct vm_region *region)
617
{
618
	BUG_ON(!nommu_region_tree.rb_node);
619

620 621 622
	validate_nommu_regions();
	rb_erase(&region->vm_rb, &nommu_region_tree);
	validate_nommu_regions();
623 624
}

625
/*
626
 * free a contiguous series of pages
627
 */
628
static void free_page_series(unsigned long from, unsigned long to)
629
{
630 631 632 633
	for (; from < to; from += PAGE_SIZE) {
		struct page *page = virt_to_page(from);

		kdebug("- free %lx", from);
634
		atomic_long_dec(&mmap_pages_allocated);
635
		if (page_count(page) != 1)
636 637
			kdebug("free page %p: refcount not one: %d",
			       page, page_count(page));
638
		put_page(page);
639 640 641
	}
}

642
/*
643
 * release a reference to a region
644
 * - the caller must hold the region semaphore for writing, which this releases
645
 * - the region may not have been added to the tree yet, in which case vm_top
646
 *   will equal vm_start
647
 */
648 649
static void __put_nommu_region(struct vm_region *region)
	__releases(nommu_region_sem)
L
Linus Torvalds 已提交
650
{
651
	kenter("%p{%d}", region, region->vm_usage);
L
Linus Torvalds 已提交
652

653
	BUG_ON(!nommu_region_tree.rb_node);
L
Linus Torvalds 已提交
654

655
	if (--region->vm_usage == 0) {
656
		if (region->vm_top > region->vm_start)
657 658 659 660 661 662 663 664 665 666
			delete_nommu_region(region);
		up_write(&nommu_region_sem);

		if (region->vm_file)
			fput(region->vm_file);

		/* IO memory and memory shared directly out of the pagecache
		 * from ramfs/tmpfs mustn't be released here */
		if (region->vm_flags & VM_MAPPED_COPY) {
			kdebug("free series");
667
			free_page_series(region->vm_start, region->vm_top);
668 669 670 671
		}
		kmem_cache_free(vm_region_jar, region);
	} else {
		up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
672
	}
673
}
L
Linus Torvalds 已提交
674

675 676 677 678 679 680 681
/*
 * release a reference to a region
 */
static void put_nommu_region(struct vm_region *region)
{
	down_write(&nommu_region_sem);
	__put_nommu_region(region);
L
Linus Torvalds 已提交
682 683
}

684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
/*
 * update protection on a vma
 */
static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
{
#ifdef CONFIG_MPU
	struct mm_struct *mm = vma->vm_mm;
	long start = vma->vm_start & PAGE_MASK;
	while (start < vma->vm_end) {
		protect_page(mm, start, flags);
		start += PAGE_SIZE;
	}
	update_protections(mm);
#endif
}

700
/*
701 702 703 704
 * add a VMA into a process's mm_struct in the appropriate place in the list
 * and tree and add to the address space's page tree also if not an anonymous
 * page
 * - should be called with mm->mmap_sem held writelocked
705
 */
706
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
707
{
708
	struct vm_area_struct *pvma, *prev;
L
Linus Torvalds 已提交
709
	struct address_space *mapping;
710
	struct rb_node **p, *parent, *rb_prev;
711 712 713 714 715 716 717

	kenter(",%p", vma);

	BUG_ON(!vma->vm_region);

	mm->map_count++;
	vma->vm_mm = mm;
L
Linus Torvalds 已提交
718

719 720
	protect_vma(vma, vma->vm_flags);

L
Linus Torvalds 已提交
721 722 723 724
	/* add the VMA to the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

725
		mutex_lock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
726
		flush_dcache_mmap_lock(mapping);
727
		vma_interval_tree_insert(vma, &mapping->i_mmap);
L
Linus Torvalds 已提交
728
		flush_dcache_mmap_unlock(mapping);
729
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
730 731
	}

732
	/* add the VMA to the tree */
733
	parent = rb_prev = NULL;
734
	p = &mm->mm_rb.rb_node;
L
Linus Torvalds 已提交
735 736 737 738
	while (*p) {
		parent = *p;
		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);

739 740 741
		/* sort by: start addr, end addr, VMA struct addr in that order
		 * (the latter is necessary as we may get identical VMAs) */
		if (vma->vm_start < pvma->vm_start)
L
Linus Torvalds 已提交
742
			p = &(*p)->rb_left;
743 744
		else if (vma->vm_start > pvma->vm_start) {
			rb_prev = parent;
L
Linus Torvalds 已提交
745
			p = &(*p)->rb_right;
746
		} else if (vma->vm_end < pvma->vm_end)
747
			p = &(*p)->rb_left;
748 749
		else if (vma->vm_end > pvma->vm_end) {
			rb_prev = parent;
750
			p = &(*p)->rb_right;
751
		} else if (vma < pvma)
752
			p = &(*p)->rb_left;
753 754
		else if (vma > pvma) {
			rb_prev = parent;
755
			p = &(*p)->rb_right;
756
		} else
757
			BUG();
L
Linus Torvalds 已提交
758 759 760
	}

	rb_link_node(&vma->vm_rb, parent, p);
761 762 763
	rb_insert_color(&vma->vm_rb, &mm->mm_rb);

	/* add VMA to the VMA list also */
764 765 766
	prev = NULL;
	if (rb_prev)
		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
767

768
	__vma_link_list(mm, vma, prev, parent);
L
Linus Torvalds 已提交
769 770
}

771
/*
772
 * delete a VMA from its owning mm_struct and address space
773
 */
774
static void delete_vma_from_mm(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
775
{
D
Davidlohr Bueso 已提交
776
	int i;
L
Linus Torvalds 已提交
777
	struct address_space *mapping;
778
	struct mm_struct *mm = vma->vm_mm;
D
Davidlohr Bueso 已提交
779
	struct task_struct *curr = current;
780 781 782

	kenter("%p", vma);

783 784
	protect_vma(vma, 0);

785
	mm->map_count--;
D
Davidlohr Bueso 已提交
786 787 788
	for (i = 0; i < VMACACHE_SIZE; i++) {
		/* if the vma is cached, invalidate the entire cache */
		if (curr->vmacache[i] == vma) {
789
			vmacache_invalidate(mm);
D
Davidlohr Bueso 已提交
790 791 792
			break;
		}
	}
L
Linus Torvalds 已提交
793 794 795 796 797

	/* remove the VMA from the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

798
		mutex_lock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
799
		flush_dcache_mmap_lock(mapping);
800
		vma_interval_tree_remove(vma, &mapping->i_mmap);
L
Linus Torvalds 已提交
801
		flush_dcache_mmap_unlock(mapping);
802
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
803 804
	}

805 806
	/* remove from the MM's tree and list */
	rb_erase(&vma->vm_rb, &mm->mm_rb);
807 808 809 810 811 812 813 814

	if (vma->vm_prev)
		vma->vm_prev->vm_next = vma->vm_next;
	else
		mm->mmap = vma->vm_next;

	if (vma->vm_next)
		vma->vm_next->vm_prev = vma->vm_prev;
815 816 817 818 819 820 821 822 823 824
}

/*
 * destroy a VMA record
 */
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
	kenter("%p", vma);
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
825
	if (vma->vm_file)
826 827 828 829 830 831 832 833 834 835 836 837 838 839
		fput(vma->vm_file);
	put_nommu_region(vma->vm_region);
	kmem_cache_free(vm_area_cachep, vma);
}

/*
 * look up the first VMA in which addr resides, NULL if none
 * - should be called with mm->mmap_sem at least held readlocked
 */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma;

	/* check the cache first */
D
Davidlohr Bueso 已提交
840 841
	vma = vmacache_find(mm, addr);
	if (likely(vma))
842 843
		return vma;

844
	/* trawl the list (there may be multiple mappings in which addr
845
	 * resides) */
846
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
847 848 849
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end > addr) {
D
Davidlohr Bueso 已提交
850
			vmacache_update(addr, vma);
851 852 853 854 855 856 857 858 859 860 861 862 863 864
			return vma;
		}
	}

	return NULL;
}
EXPORT_SYMBOL(find_vma);

/*
 * find a VMA
 * - we don't extend stack VMAs under NOMMU conditions
 */
struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
865
	return find_vma(mm, addr);
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
}

/*
 * expand a stack to a given address
 * - not supported under NOMMU conditions
 */
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
	return -ENOMEM;
}

/*
 * look up the first VMA exactly that exactly matches addr
 * - should be called with mm->mmap_sem at least held readlocked
 */
static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
					     unsigned long addr,
					     unsigned long len)
{
	struct vm_area_struct *vma;
	unsigned long end = addr + len;

	/* check the cache first */
D
Davidlohr Bueso 已提交
889 890
	vma = vmacache_find_exact(mm, addr, end);
	if (vma)
891 892
		return vma;

893
	/* trawl the list (there may be multiple mappings in which addr
894
	 * resides) */
895
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
896 897 898 899 900
		if (vma->vm_start < addr)
			continue;
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end == end) {
D
Davidlohr Bueso 已提交
901
			vmacache_update(addr, vma);
902 903 904 905 906
			return vma;
		}
	}

	return NULL;
L
Linus Torvalds 已提交
907 908 909 910 911 912 913 914 915 916 917 918 919 920
}

/*
 * determine whether a mapping should be permitted and, if so, what sort of
 * mapping we're capable of supporting
 */
static int validate_mmap_request(struct file *file,
				 unsigned long addr,
				 unsigned long len,
				 unsigned long prot,
				 unsigned long flags,
				 unsigned long pgoff,
				 unsigned long *_capabilities)
{
921
	unsigned long capabilities, rlen;
L
Linus Torvalds 已提交
922 923 924
	int ret;

	/* do the simple checks first */
925
	if (flags & MAP_FIXED) {
L
Linus Torvalds 已提交
926 927 928 929 930 931 932 933 934 935
		printk(KERN_DEBUG
		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
		       current->pid);
		return -EINVAL;
	}

	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
	    (flags & MAP_TYPE) != MAP_SHARED)
		return -EINVAL;

936
	if (!len)
L
Linus Torvalds 已提交
937 938
		return -EINVAL;

939
	/* Careful about overflows.. */
940 941
	rlen = PAGE_ALIGN(len);
	if (!rlen || rlen > TASK_SIZE)
942 943
		return -ENOMEM;

L
Linus Torvalds 已提交
944
	/* offset overflow? */
945
	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
946
		return -EOVERFLOW;
L
Linus Torvalds 已提交
947 948 949 950 951 952

	if (file) {
		/* validate file mapping requests */
		struct address_space *mapping;

		/* files must support mmap */
A
Al Viro 已提交
953
		if (!file->f_op->mmap)
L
Linus Torvalds 已提交
954 955 956 957 958 959 960 961
			return -ENODEV;

		/* work out if what we've got could possibly be shared
		 * - we support chardevs that provide their own "memory"
		 * - we support files/blockdevs that are memory backed
		 */
		mapping = file->f_mapping;
		if (!mapping)
A
Al Viro 已提交
962
			mapping = file_inode(file)->i_mapping;
L
Linus Torvalds 已提交
963 964 965 966 967 968 969 970

		capabilities = 0;
		if (mapping && mapping->backing_dev_info)
			capabilities = mapping->backing_dev_info->capabilities;

		if (!capabilities) {
			/* no explicit capabilities set, so assume some
			 * defaults */
A
Al Viro 已提交
971
			switch (file_inode(file)->i_mode & S_IFMT) {
L
Linus Torvalds 已提交
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995
			case S_IFREG:
			case S_IFBLK:
				capabilities = BDI_CAP_MAP_COPY;
				break;

			case S_IFCHR:
				capabilities =
					BDI_CAP_MAP_DIRECT |
					BDI_CAP_READ_MAP |
					BDI_CAP_WRITE_MAP;
				break;

			default:
				return -EINVAL;
			}
		}

		/* eliminate any capabilities that we can't support on this
		 * device */
		if (!file->f_op->get_unmapped_area)
			capabilities &= ~BDI_CAP_MAP_DIRECT;
		if (!file->f_op->read)
			capabilities &= ~BDI_CAP_MAP_COPY;

996 997 998 999
		/* The file shall have been opened with read permission. */
		if (!(file->f_mode & FMODE_READ))
			return -EACCES;

L
Linus Torvalds 已提交
1000 1001 1002 1003 1004 1005
		if (flags & MAP_SHARED) {
			/* do checks for writing, appending and locking */
			if ((prot & PROT_WRITE) &&
			    !(file->f_mode & FMODE_WRITE))
				return -EACCES;

A
Al Viro 已提交
1006
			if (IS_APPEND(file_inode(file)) &&
L
Linus Torvalds 已提交
1007 1008 1009
			    (file->f_mode & FMODE_WRITE))
				return -EACCES;

1010
			if (locks_verify_locked(file))
L
Linus Torvalds 已提交
1011 1012 1013 1014 1015 1016 1017
				return -EAGAIN;

			if (!(capabilities & BDI_CAP_MAP_DIRECT))
				return -ENODEV;

			/* we mustn't privatise shared mappings */
			capabilities &= ~BDI_CAP_MAP_COPY;
1018
		} else {
L
Linus Torvalds 已提交
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
			/* we're going to read the file into private memory we
			 * allocate */
			if (!(capabilities & BDI_CAP_MAP_COPY))
				return -ENODEV;

			/* we don't permit a private writable mapping to be
			 * shared with the backing device */
			if (prot & PROT_WRITE)
				capabilities &= ~BDI_CAP_MAP_DIRECT;
		}

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
		if (capabilities & BDI_CAP_MAP_DIRECT) {
			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
			    ) {
				capabilities &= ~BDI_CAP_MAP_DIRECT;
				if (flags & MAP_SHARED) {
					printk(KERN_WARNING
					       "MAP_SHARED not completely supported on !MMU\n");
					return -EINVAL;
				}
			}
		}

L
Linus Torvalds 已提交
1044 1045
		/* handle executable mappings and implied executable
		 * mappings */
J
Josef Sipek 已提交
1046
		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
L
Linus Torvalds 已提交
1047 1048
			if (prot & PROT_EXEC)
				return -EPERM;
1049
		} else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
L
Linus Torvalds 已提交
1050 1051 1052 1053 1054
			/* handle implication of PROT_EXEC by PROT_READ */
			if (current->personality & READ_IMPLIES_EXEC) {
				if (capabilities & BDI_CAP_EXEC_MAP)
					prot |= PROT_EXEC;
			}
1055
		} else if ((prot & PROT_READ) &&
L
Linus Torvalds 已提交
1056 1057 1058 1059 1060 1061
			 (prot & PROT_EXEC) &&
			 !(capabilities & BDI_CAP_EXEC_MAP)
			 ) {
			/* backing file is not executable, try to copy */
			capabilities &= ~BDI_CAP_MAP_DIRECT;
		}
1062
	} else {
L
Linus Torvalds 已提交
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
		/* anonymous mappings are always memory backed and can be
		 * privately mapped
		 */
		capabilities = BDI_CAP_MAP_COPY;

		/* handle PROT_EXEC implication by PROT_READ */
		if ((prot & PROT_READ) &&
		    (current->personality & READ_IMPLIES_EXEC))
			prot |= PROT_EXEC;
	}

	/* allow the security API to have its say */
1075
	ret = security_mmap_addr(addr);
L
Linus Torvalds 已提交
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	if (ret < 0)
		return ret;

	/* looks okay */
	*_capabilities = capabilities;
	return 0;
}

/*
 * we've determined that we can make the mapping, now translate what we
 * now know into VMA flags
 */
static unsigned long determine_vm_flags(struct file *file,
					unsigned long prot,
					unsigned long flags,
					unsigned long capabilities)
{
	unsigned long vm_flags;

	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
	/* vm_flags |= mm->def_flags; */

	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
		/* attempt to share read-only copies of mapped file chunks */
1100
		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
L
Linus Torvalds 已提交
1101 1102
		if (file && !(prot & PROT_WRITE))
			vm_flags |= VM_MAYSHARE;
1103
	} else {
L
Linus Torvalds 已提交
1104 1105 1106
		/* overlay a shareable mapping on the backing device or inode
		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
		 * romfs/cramfs */
1107
		vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
L
Linus Torvalds 已提交
1108
		if (flags & MAP_SHARED)
1109
			vm_flags |= VM_SHARED;
L
Linus Torvalds 已提交
1110 1111 1112 1113 1114 1115
	}

	/* refuse to let anyone share private mappings with this process if
	 * it's being traced - otherwise breakpoints set in it may interfere
	 * with another untraced process
	 */
T
Tejun Heo 已提交
1116
	if ((flags & MAP_PRIVATE) && current->ptrace)
L
Linus Torvalds 已提交
1117 1118 1119 1120 1121 1122
		vm_flags &= ~VM_MAYSHARE;

	return vm_flags;
}

/*
1123 1124
 * set up a shared mapping on a file (the driver or filesystem provides and
 * pins the storage)
L
Linus Torvalds 已提交
1125
 */
1126
static int do_mmap_shared_file(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
1127 1128 1129 1130
{
	int ret;

	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1131 1132
	if (ret == 0) {
		vma->vm_region->vm_top = vma->vm_region->vm_end;
1133
		return 0;
1134
	}
L
Linus Torvalds 已提交
1135 1136 1137
	if (ret != -ENOSYS)
		return ret;

1138 1139 1140
	/* getting -ENOSYS indicates that direct mmap isn't possible (as
	 * opposed to tried but failed) so we can only give a suitable error as
	 * it's not possible to make a private copy if MAP_SHARED was given */
L
Linus Torvalds 已提交
1141 1142 1143 1144 1145 1146
	return -ENODEV;
}

/*
 * set up a private mapping or an anonymous shared mapping
 */
1147 1148
static int do_mmap_private(struct vm_area_struct *vma,
			   struct vm_region *region,
1149 1150
			   unsigned long len,
			   unsigned long capabilities)
L
Linus Torvalds 已提交
1151
{
1152
	struct page *pages;
B
Bob Liu 已提交
1153
	unsigned long total, point, n;
L
Linus Torvalds 已提交
1154
	void *base;
1155
	int ret, order;
L
Linus Torvalds 已提交
1156 1157 1158 1159 1160

	/* invoke the file's mapping function so that it can keep track of
	 * shared mappings on devices or memory
	 * - VM_MAYSHARE will be set if it may attempt to share
	 */
1161
	if (capabilities & BDI_CAP_MAP_DIRECT) {
L
Linus Torvalds 已提交
1162
		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1163
		if (ret == 0) {
L
Linus Torvalds 已提交
1164
			/* shouldn't return success if we're not sharing */
1165 1166
			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
			vma->vm_region->vm_top = vma->vm_region->vm_end;
1167
			return 0;
L
Linus Torvalds 已提交
1168
		}
1169 1170
		if (ret != -ENOSYS)
			return ret;
L
Linus Torvalds 已提交
1171 1172 1173 1174 1175 1176

		/* getting an ENOSYS error indicates that direct mmap isn't
		 * possible (as opposed to tried but failed) so we'll try to
		 * make a private copy of the data and map that instead */
	}

1177

L
Linus Torvalds 已提交
1178 1179 1180 1181
	/* allocate some memory to hold the mapping
	 * - note that this may not return a page-aligned address if the object
	 *   we're allocating is smaller than a page
	 */
B
Bob Liu 已提交
1182
	order = get_order(len);
1183 1184 1185 1186
	kdebug("alloc order %d for %lx", order, len);

	pages = alloc_pages(GFP_KERNEL, order);
	if (!pages)
L
Linus Torvalds 已提交
1187 1188
		goto enomem;

1189
	total = 1 << order;
1190
	atomic_long_add(total, &mmap_pages_allocated);
1191

B
Bob Liu 已提交
1192
	point = len >> PAGE_SHIFT;
1193 1194 1195 1196 1197 1198 1199 1200

	/* we allocated a power-of-2 sized page set, so we may want to trim off
	 * the excess */
	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
		while (total > point) {
			order = ilog2(total - point);
			n = 1 << order;
			kdebug("shave %lu/%lu @%lu", n, total - point, total);
1201
			atomic_long_sub(n, &mmap_pages_allocated);
1202 1203 1204 1205
			total -= n;
			set_page_refcounted(pages + total);
			__free_pages(pages + total, order);
		}
1206 1207 1208 1209
	}

	for (point = 1; point < total; point++)
		set_page_refcounted(&pages[point]);
L
Linus Torvalds 已提交
1210

1211 1212 1213
	base = page_address(pages);
	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
	region->vm_start = (unsigned long) base;
B
Bob Liu 已提交
1214
	region->vm_end   = region->vm_start + len;
1215
	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1216 1217 1218

	vma->vm_start = region->vm_start;
	vma->vm_end   = region->vm_start + len;
L
Linus Torvalds 已提交
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229

	if (vma->vm_file) {
		/* read the contents of a file into the copy */
		mm_segment_t old_fs;
		loff_t fpos;

		fpos = vma->vm_pgoff;
		fpos <<= PAGE_SHIFT;

		old_fs = get_fs();
		set_fs(KERNEL_DS);
B
Bob Liu 已提交
1230
		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
L
Linus Torvalds 已提交
1231 1232 1233 1234 1235 1236
		set_fs(old_fs);

		if (ret < 0)
			goto error_free;

		/* clear the last little bit */
B
Bob Liu 已提交
1237 1238
		if (ret < len)
			memset(base + ret, 0, len - ret);
L
Linus Torvalds 已提交
1239 1240 1241 1242 1243 1244

	}

	return 0;

error_free:
1245
	free_page_series(region->vm_start, region->vm_top);
1246 1247
	region->vm_start = vma->vm_start = 0;
	region->vm_end   = vma->vm_end = 0;
1248
	region->vm_top   = 0;
L
Linus Torvalds 已提交
1249 1250 1251
	return ret;

enomem:
1252
	pr_err("Allocation of length %lu from process %d (%s) failed\n",
1253
	       len, current->pid, current->comm);
1254
	show_free_areas(0);
L
Linus Torvalds 已提交
1255 1256 1257 1258 1259 1260
	return -ENOMEM;
}

/*
 * handle mapping creation for uClinux
 */
1261
unsigned long do_mmap_pgoff(struct file *file,
L
Linus Torvalds 已提交
1262 1263 1264 1265
			    unsigned long addr,
			    unsigned long len,
			    unsigned long prot,
			    unsigned long flags,
1266
			    unsigned long pgoff,
1267
			    unsigned long *populate)
L
Linus Torvalds 已提交
1268
{
1269 1270
	struct vm_area_struct *vma;
	struct vm_region *region;
L
Linus Torvalds 已提交
1271
	struct rb_node *rb;
1272
	unsigned long capabilities, vm_flags, result;
L
Linus Torvalds 已提交
1273 1274
	int ret;

1275 1276
	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);

1277
	*populate = 0;
1278

L
Linus Torvalds 已提交
1279 1280 1281 1282
	/* decide whether we should attempt the mapping, and if so what sort of
	 * mapping */
	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
				    &capabilities);
1283 1284
	if (ret < 0) {
		kleave(" = %d [val]", ret);
L
Linus Torvalds 已提交
1285
		return ret;
1286
	}
L
Linus Torvalds 已提交
1287

1288 1289
	/* we ignore the address hint */
	addr = 0;
B
Bob Liu 已提交
1290
	len = PAGE_ALIGN(len);
1291

L
Linus Torvalds 已提交
1292 1293 1294 1295
	/* we've determined that we can make the mapping, now translate what we
	 * now know into VMA flags */
	vm_flags = determine_vm_flags(file, prot, flags, capabilities);

1296 1297 1298 1299 1300 1301 1302 1303
	/* we're going to need to record the mapping */
	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		goto error_getting_region;

	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		goto error_getting_vma;
L
Linus Torvalds 已提交
1304

1305
	region->vm_usage = 1;
1306 1307 1308
	region->vm_flags = vm_flags;
	region->vm_pgoff = pgoff;

1309
	INIT_LIST_HEAD(&vma->anon_vma_chain);
1310 1311
	vma->vm_flags = vm_flags;
	vma->vm_pgoff = pgoff;
L
Linus Torvalds 已提交
1312

1313
	if (file) {
A
Al Viro 已提交
1314 1315
		region->vm_file = get_file(file);
		vma->vm_file = get_file(file);
1316 1317 1318 1319 1320
	}

	down_write(&nommu_region_sem);

	/* if we want to share, we need to check for regions created by other
L
Linus Torvalds 已提交
1321
	 * mmap() calls that overlap with our proposed mapping
1322
	 * - we can only share with a superset match on most regular files
L
Linus Torvalds 已提交
1323 1324 1325 1326 1327 1328
	 * - shared mappings on character devices and memory backed files are
	 *   permitted to overlap inexactly as far as we are concerned for in
	 *   these cases, sharing is handled in the driver or filesystem rather
	 *   than here
	 */
	if (vm_flags & VM_MAYSHARE) {
1329 1330
		struct vm_region *pregion;
		unsigned long pglen, rpglen, pgend, rpgend, start;
L
Linus Torvalds 已提交
1331

1332 1333
		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pgend = pgoff + pglen;
1334

1335 1336
		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
			pregion = rb_entry(rb, struct vm_region, vm_rb);
L
Linus Torvalds 已提交
1337

1338
			if (!(pregion->vm_flags & VM_MAYSHARE))
L
Linus Torvalds 已提交
1339 1340 1341
				continue;

			/* search for overlapping mappings on the same file */
A
Al Viro 已提交
1342 1343
			if (file_inode(pregion->vm_file) !=
			    file_inode(file))
L
Linus Torvalds 已提交
1344 1345
				continue;

1346
			if (pregion->vm_pgoff >= pgend)
L
Linus Torvalds 已提交
1347 1348
				continue;

1349 1350 1351 1352
			rpglen = pregion->vm_end - pregion->vm_start;
			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
			rpgend = pregion->vm_pgoff + rpglen;
			if (pgoff >= rpgend)
L
Linus Torvalds 已提交
1353 1354
				continue;

1355 1356 1357 1358 1359
			/* handle inexactly overlapping matches between
			 * mappings */
			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
				/* new mapping is not a subset of the region */
L
Linus Torvalds 已提交
1360 1361 1362 1363 1364
				if (!(capabilities & BDI_CAP_MAP_DIRECT))
					goto sharing_violation;
				continue;
			}

1365
			/* we've found a region we can share */
1366
			pregion->vm_usage++;
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382
			vma->vm_region = pregion;
			start = pregion->vm_start;
			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
			vma->vm_start = start;
			vma->vm_end = start + len;

			if (pregion->vm_flags & VM_MAPPED_COPY) {
				kdebug("share copy");
				vma->vm_flags |= VM_MAPPED_COPY;
			} else {
				kdebug("share mmap");
				ret = do_mmap_shared_file(vma);
				if (ret < 0) {
					vma->vm_region = NULL;
					vma->vm_start = 0;
					vma->vm_end = 0;
1383
					pregion->vm_usage--;
1384 1385 1386 1387 1388 1389 1390 1391 1392
					pregion = NULL;
					goto error_just_free;
				}
			}
			fput(region->vm_file);
			kmem_cache_free(vm_region_jar, region);
			region = pregion;
			result = start;
			goto share;
L
Linus Torvalds 已提交
1393 1394 1395 1396 1397 1398
		}

		/* obtain the address at which to make a shared mapping
		 * - this is the hook for quasi-memory character devices to
		 *   tell us the location of a shared mapping
		 */
1399
		if (capabilities & BDI_CAP_MAP_DIRECT) {
L
Linus Torvalds 已提交
1400 1401
			addr = file->f_op->get_unmapped_area(file, addr, len,
							     pgoff, flags);
1402
			if (IS_ERR_VALUE(addr)) {
L
Linus Torvalds 已提交
1403
				ret = addr;
1404
				if (ret != -ENOSYS)
1405
					goto error_just_free;
L
Linus Torvalds 已提交
1406 1407 1408 1409

				/* the driver refused to tell us where to site
				 * the mapping so we'll have to attempt to copy
				 * it */
1410
				ret = -ENODEV;
L
Linus Torvalds 已提交
1411
				if (!(capabilities & BDI_CAP_MAP_COPY))
1412
					goto error_just_free;
L
Linus Torvalds 已提交
1413 1414

				capabilities &= ~BDI_CAP_MAP_DIRECT;
1415 1416 1417
			} else {
				vma->vm_start = region->vm_start = addr;
				vma->vm_end = region->vm_end = addr + len;
L
Linus Torvalds 已提交
1418 1419 1420 1421
			}
		}
	}

1422
	vma->vm_region = region;
L
Linus Torvalds 已提交
1423

1424 1425 1426
	/* set up the mapping
	 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
	 */
L
Linus Torvalds 已提交
1427
	if (file && vma->vm_flags & VM_SHARED)
1428
		ret = do_mmap_shared_file(vma);
L
Linus Torvalds 已提交
1429
	else
1430
		ret = do_mmap_private(vma, region, len, capabilities);
L
Linus Torvalds 已提交
1431
	if (ret < 0)
1432 1433
		goto error_just_free;
	add_nommu_region(region);
1434

1435 1436 1437 1438 1439
	/* clear anonymous mappings that don't ask for uninitialized data */
	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
		memset((void *)region->vm_start, 0,
		       region->vm_end - region->vm_start);

L
Linus Torvalds 已提交
1440
	/* okay... we have a mapping; now we have to register it */
1441
	result = vma->vm_start;
L
Linus Torvalds 已提交
1442 1443 1444

	current->mm->total_vm += len >> PAGE_SHIFT;

1445 1446
share:
	add_vma_to_mm(current->mm, vma);
L
Linus Torvalds 已提交
1447

1448 1449 1450 1451 1452 1453
	/* we flush the region from the icache only when the first executable
	 * mapping of it is made  */
	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
		flush_icache_range(region->vm_start, region->vm_end);
		region->vm_icache_flushed = true;
	}
L
Linus Torvalds 已提交
1454

1455
	up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
1456

1457 1458
	kleave(" = %lx", result);
	return result;
L
Linus Torvalds 已提交
1459

1460 1461 1462
error_just_free:
	up_write(&nommu_region_sem);
error:
1463 1464
	if (region->vm_file)
		fput(region->vm_file);
1465
	kmem_cache_free(vm_region_jar, region);
1466 1467
	if (vma->vm_file)
		fput(vma->vm_file);
1468 1469 1470 1471 1472 1473 1474 1475 1476
	kmem_cache_free(vm_area_cachep, vma);
	kleave(" = %d", ret);
	return ret;

sharing_violation:
	up_write(&nommu_region_sem);
	printk(KERN_WARNING "Attempt to share mismatched mappings\n");
	ret = -EINVAL;
	goto error;
L
Linus Torvalds 已提交
1477

1478 1479 1480 1481
error_getting_vma:
	kmem_cache_free(vm_region_jar, region);
	printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
	       " from process %d failed\n",
L
Linus Torvalds 已提交
1482
	       len, current->pid);
1483
	show_free_areas(0);
L
Linus Torvalds 已提交
1484 1485
	return -ENOMEM;

1486 1487 1488
error_getting_region:
	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
	       " from process %d failed\n",
L
Linus Torvalds 已提交
1489
	       len, current->pid);
1490
	show_free_areas(0);
L
Linus Torvalds 已提交
1491 1492
	return -ENOMEM;
}
1493

H
Hugh Dickins 已提交
1494 1495 1496 1497 1498 1499 1500
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
		unsigned long, prot, unsigned long, flags,
		unsigned long, fd, unsigned long, pgoff)
{
	struct file *file = NULL;
	unsigned long retval = -EBADF;

A
Al Viro 已提交
1501
	audit_mmap_fd(fd, flags);
H
Hugh Dickins 已提交
1502 1503 1504 1505 1506 1507 1508 1509
	if (!(flags & MAP_ANONYMOUS)) {
		file = fget(fd);
		if (!file)
			goto out;
	}

	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);

G
Greg Ungerer 已提交
1510
	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
H
Hugh Dickins 已提交
1511 1512 1513 1514 1515 1516 1517

	if (file)
		fput(file);
out:
	return retval;
}

C
Christoph Hellwig 已提交
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
	unsigned long addr;
	unsigned long len;
	unsigned long prot;
	unsigned long flags;
	unsigned long fd;
	unsigned long offset;
};

SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
	struct mmap_arg_struct a;

	if (copy_from_user(&a, arg, sizeof(a)))
		return -EFAULT;
	if (a.offset & ~PAGE_MASK)
		return -EINVAL;

	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
			      a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */

L
Linus Torvalds 已提交
1542
/*
1543 1544
 * split a vma into two pieces at address 'addr', a new vma is allocated either
 * for the first part or the tail.
L
Linus Torvalds 已提交
1545
 */
1546 1547
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
	      unsigned long addr, int new_below)
L
Linus Torvalds 已提交
1548
{
1549 1550 1551
	struct vm_area_struct *new;
	struct vm_region *region;
	unsigned long npages;
L
Linus Torvalds 已提交
1552

1553
	kenter("");
L
Linus Torvalds 已提交
1554

1555 1556 1557
	/* we're only permitted to split anonymous regions (these should have
	 * only a single usage on the region) */
	if (vma->vm_file)
1558
		return -ENOMEM;
L
Linus Torvalds 已提交
1559

1560 1561
	if (mm->map_count >= sysctl_max_map_count)
		return -ENOMEM;
L
Linus Torvalds 已提交
1562

1563 1564 1565
	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		return -ENOMEM;
L
Linus Torvalds 已提交
1566

1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
	if (!new) {
		kmem_cache_free(vm_region_jar, region);
		return -ENOMEM;
	}

	/* most fields are the same, copy all, and then fixup */
	*new = *vma;
	*region = *vma->vm_region;
	new->vm_region = region;

	npages = (addr - vma->vm_start) >> PAGE_SHIFT;

	if (new_below) {
1581
		region->vm_top = region->vm_end = new->vm_end = addr;
1582 1583 1584
	} else {
		region->vm_start = new->vm_start = addr;
		region->vm_pgoff = new->vm_pgoff += npages;
L
Linus Torvalds 已提交
1585
	}
1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597

	if (new->vm_ops && new->vm_ops->open)
		new->vm_ops->open(new);

	delete_vma_from_mm(vma);
	down_write(&nommu_region_sem);
	delete_nommu_region(vma->vm_region);
	if (new_below) {
		vma->vm_region->vm_start = vma->vm_start = addr;
		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
	} else {
		vma->vm_region->vm_end = vma->vm_end = addr;
1598
		vma->vm_region->vm_top = addr;
1599 1600 1601 1602 1603 1604 1605
	}
	add_nommu_region(vma->vm_region);
	add_nommu_region(new->vm_region);
	up_write(&nommu_region_sem);
	add_vma_to_mm(mm, vma);
	add_vma_to_mm(mm, new);
	return 0;
L
Linus Torvalds 已提交
1606 1607
}

1608
/*
1609 1610
 * shrink a VMA by removing the specified chunk from either the beginning or
 * the end
1611
 */
1612 1613 1614
static int shrink_vma(struct mm_struct *mm,
		      struct vm_area_struct *vma,
		      unsigned long from, unsigned long to)
L
Linus Torvalds 已提交
1615
{
1616
	struct vm_region *region;
L
Linus Torvalds 已提交
1617

1618
	kenter("");
L
Linus Torvalds 已提交
1619

1620 1621 1622 1623 1624 1625 1626 1627
	/* adjust the VMA's pointers, which may reposition it in the MM's tree
	 * and list */
	delete_vma_from_mm(vma);
	if (from > vma->vm_start)
		vma->vm_end = from;
	else
		vma->vm_start = to;
	add_vma_to_mm(mm, vma);
L
Linus Torvalds 已提交
1628

1629 1630
	/* cut the backing region down to size */
	region = vma->vm_region;
1631
	BUG_ON(region->vm_usage != 1);
1632 1633 1634

	down_write(&nommu_region_sem);
	delete_nommu_region(region);
1635 1636 1637 1638
	if (from > region->vm_start) {
		to = region->vm_top;
		region->vm_top = region->vm_end = from;
	} else {
1639
		region->vm_start = to;
1640
	}
1641 1642 1643 1644 1645 1646
	add_nommu_region(region);
	up_write(&nommu_region_sem);

	free_page_series(from, to);
	return 0;
}
L
Linus Torvalds 已提交
1647

1648 1649 1650 1651 1652 1653 1654 1655
/*
 * release a mapping
 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
 *   VMA, though it need not cover the whole VMA
 */
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
B
Bob Liu 已提交
1656
	unsigned long end;
1657
	int ret;
L
Linus Torvalds 已提交
1658

1659
	kenter(",%lx,%zx", start, len);
L
Linus Torvalds 已提交
1660

B
Bob Liu 已提交
1661
	len = PAGE_ALIGN(len);
1662 1663
	if (len == 0)
		return -EINVAL;
1664

B
Bob Liu 已提交
1665 1666
	end = start + len;

1667 1668 1669
	/* find the first potentially overlapping VMA */
	vma = find_vma(mm, start);
	if (!vma) {
1670
		static int limit;
1671 1672 1673 1674 1675 1676 1677 1678
		if (limit < 5) {
			printk(KERN_WARNING
			       "munmap of memory not mmapped by process %d"
			       " (%s): 0x%lx-0x%lx\n",
			       current->pid, current->comm,
			       start, start + len - 1);
			limit++;
		}
1679 1680
		return -EINVAL;
	}
L
Linus Torvalds 已提交
1681

1682 1683 1684 1685 1686 1687 1688 1689 1690
	/* we're allowed to split an anonymous VMA but not a file-backed one */
	if (vma->vm_file) {
		do {
			if (start > vma->vm_start) {
				kleave(" = -EINVAL [miss]");
				return -EINVAL;
			}
			if (end == vma->vm_end)
				goto erase_whole_vma;
1691 1692
			vma = vma->vm_next;
		} while (vma);
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
		kleave(" = -EINVAL [split file]");
		return -EINVAL;
	} else {
		/* the chunk must be a subset of the VMA found */
		if (start == vma->vm_start && end == vma->vm_end)
			goto erase_whole_vma;
		if (start < vma->vm_start || end > vma->vm_end) {
			kleave(" = -EINVAL [superset]");
			return -EINVAL;
		}
		if (start & ~PAGE_MASK) {
			kleave(" = -EINVAL [unaligned start]");
			return -EINVAL;
		}
		if (end != vma->vm_end && end & ~PAGE_MASK) {
			kleave(" = -EINVAL [unaligned split]");
			return -EINVAL;
		}
		if (start != vma->vm_start && end != vma->vm_end) {
			ret = split_vma(mm, vma, start, 1);
			if (ret < 0) {
				kleave(" = %d [split]", ret);
				return ret;
			}
		}
		return shrink_vma(mm, vma, start, end);
	}
L
Linus Torvalds 已提交
1720

1721 1722 1723 1724
erase_whole_vma:
	delete_vma_from_mm(vma);
	delete_vma(mm, vma);
	kleave(" = 0");
L
Linus Torvalds 已提交
1725 1726
	return 0;
}
1727
EXPORT_SYMBOL(do_munmap);
L
Linus Torvalds 已提交
1728

A
Al Viro 已提交
1729
int vm_munmap(unsigned long addr, size_t len)
1730
{
A
Al Viro 已提交
1731
	struct mm_struct *mm = current->mm;
1732 1733 1734 1735 1736 1737 1738
	int ret;

	down_write(&mm->mmap_sem);
	ret = do_munmap(mm, addr, len);
	up_write(&mm->mmap_sem);
	return ret;
}
1739 1740 1741 1742
EXPORT_SYMBOL(vm_munmap);

SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
A
Al Viro 已提交
1743
	return vm_munmap(addr, len);
1744
}
1745 1746

/*
1747
 * release all the mappings made in a process's VM space
1748
 */
1749
void exit_mmap(struct mm_struct *mm)
L
Linus Torvalds 已提交
1750
{
1751
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1752

1753 1754
	if (!mm)
		return;
L
Linus Torvalds 已提交
1755

1756
	kenter("");
L
Linus Torvalds 已提交
1757

1758
	mm->total_vm = 0;
L
Linus Torvalds 已提交
1759

1760 1761 1762 1763
	while ((vma = mm->mmap)) {
		mm->mmap = vma->vm_next;
		delete_vma_from_mm(vma);
		delete_vma(mm, vma);
1764
		cond_resched();
L
Linus Torvalds 已提交
1765
	}
1766 1767

	kleave("");
L
Linus Torvalds 已提交
1768 1769
}

1770
unsigned long vm_brk(unsigned long addr, unsigned long len)
L
Linus Torvalds 已提交
1771 1772 1773 1774 1775
{
	return -ENOMEM;
}

/*
1776 1777
 * expand (or shrink) an existing mapping, potentially moving it at the same
 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
L
Linus Torvalds 已提交
1778
 *
1779
 * under NOMMU conditions, we only permit changing a mapping's size, and only
1780 1781
 * as long as it stays within the region allocated by do_mmap_private() and the
 * block is not shareable
L
Linus Torvalds 已提交
1782
 *
1783
 * MREMAP_FIXED is not supported under NOMMU conditions
L
Linus Torvalds 已提交
1784
 */
A
Al Viro 已提交
1785
static unsigned long do_mremap(unsigned long addr,
L
Linus Torvalds 已提交
1786 1787 1788
			unsigned long old_len, unsigned long new_len,
			unsigned long flags, unsigned long new_addr)
{
1789
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1790 1791

	/* insanity checks first */
B
Bob Liu 已提交
1792 1793
	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);
1794
	if (old_len == 0 || new_len == 0)
L
Linus Torvalds 已提交
1795 1796
		return (unsigned long) -EINVAL;

1797 1798 1799
	if (addr & ~PAGE_MASK)
		return -EINVAL;

L
Linus Torvalds 已提交
1800 1801 1802
	if (flags & MREMAP_FIXED && new_addr != addr)
		return (unsigned long) -EINVAL;

1803
	vma = find_vma_exact(current->mm, addr, old_len);
1804 1805
	if (!vma)
		return (unsigned long) -EINVAL;
L
Linus Torvalds 已提交
1806

1807
	if (vma->vm_end != vma->vm_start + old_len)
L
Linus Torvalds 已提交
1808 1809
		return (unsigned long) -EFAULT;

1810
	if (vma->vm_flags & VM_MAYSHARE)
L
Linus Torvalds 已提交
1811 1812
		return (unsigned long) -EPERM;

1813
	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
L
Linus Torvalds 已提交
1814 1815 1816
		return (unsigned long) -ENOMEM;

	/* all checks complete - do it */
1817 1818 1819 1820
	vma->vm_end = vma->vm_start + new_len;
	return vma->vm_start;
}

1821 1822 1823
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
1824 1825 1826 1827 1828 1829 1830
{
	unsigned long ret;

	down_write(&current->mm->mmap_sem);
	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
	up_write(&current->mm->mmap_sem);
	return ret;
L
Linus Torvalds 已提交
1831 1832
}

1833 1834 1835
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
			      unsigned int *page_mask)
L
Linus Torvalds 已提交
1836
{
1837
	*page_mask = 0;
L
Linus Torvalds 已提交
1838 1839 1840
	return NULL;
}

B
Bob Liu 已提交
1841 1842
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		unsigned long pfn, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
1843
{
B
Bob Liu 已提交
1844 1845 1846
	if (addr != (pfn << PAGE_SHIFT))
		return -EINVAL;

1847
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1848
	return 0;
L
Linus Torvalds 已提交
1849
}
1850
EXPORT_SYMBOL(remap_pfn_range);
L
Linus Torvalds 已提交
1851

1852 1853 1854 1855 1856 1857 1858 1859 1860 1861
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
	unsigned long pfn = start >> PAGE_SHIFT;
	unsigned long vm_len = vma->vm_end - vma->vm_start;

	pfn += vma->vm_pgoff;
	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
			unsigned long pgoff)
{
	unsigned int size = vma->vm_end - vma->vm_start;

	if (!(vma->vm_flags & VM_USERMAP))
		return -EINVAL;

	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
	vma->vm_end = vma->vm_start + size;

	return 0;
}
EXPORT_SYMBOL(remap_vmalloc_range);

L
Linus Torvalds 已提交
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
	unsigned long len, unsigned long pgoff, unsigned long flags)
{
	return -ENOMEM;
}

void unmap_mapping_range(struct address_space *mapping,
			 loff_t const holebegin, loff_t const holelen,
			 int even_cows)
{
}
1888
EXPORT_SYMBOL(unmap_mapping_range);
L
Linus Torvalds 已提交
1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905

/*
 * Check that a process has enough memory to allocate a new virtual
 * mapping. 0 means there is enough memory for the allocation to
 * succeed and -ENOMEM implies there is not.
 *
 * We currently support three overcommit policies, which are set via the
 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
 *
 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 * Additional code 2002 Jul 20 by Robert Love.
 *
 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 *
 * Note this is a helper function intended to be used by LSMs which
 * wish to use this logic.
 */
1906
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
L
Linus Torvalds 已提交
1907
{
1908
	unsigned long free, allowed, reserve;
L
Linus Torvalds 已提交
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918

	vm_acct_memory(pages);

	/*
	 * Sometimes we want to use more memory than we have
	 */
	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
		return 0;

	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
		free = global_page_state(NR_FREE_PAGES);
		free += global_page_state(NR_FILE_PAGES);

		/*
		 * shmem pages shouldn't be counted as free in this
		 * case, they can't be purged, only swapped out, and
		 * that won't affect the overall amount of available
		 * memory in the system.
		 */
		free -= global_page_state(NR_SHMEM);
L
Linus Torvalds 已提交
1929

1930
		free += get_nr_swap_pages();
L
Linus Torvalds 已提交
1931 1932 1933 1934 1935 1936 1937

		/*
		 * Any slabs which are created with the
		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
		 * which are reclaimable, under pressure.  The dentry
		 * cache and most inode caches should fall into this
		 */
1938
		free += global_page_state(NR_SLAB_RECLAIMABLE);
L
Linus Torvalds 已提交
1939

1940 1941 1942
		/*
		 * Leave reserved pages. The pages are not for anonymous pages.
		 */
1943
		if (free <= totalreserve_pages)
1944 1945
			goto error;
		else
1946
			free -= totalreserve_pages;
1947 1948

		/*
1949
		 * Reserve some for root
1950
		 */
L
Linus Torvalds 已提交
1951
		if (!cap_sys_admin)
1952
			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
L
Linus Torvalds 已提交
1953 1954 1955

		if (free > pages)
			return 0;
1956 1957

		goto error;
L
Linus Torvalds 已提交
1958 1959
	}

1960
	allowed = vm_commit_limit();
L
Linus Torvalds 已提交
1961
	/*
1962
	 * Reserve some 3% for root
L
Linus Torvalds 已提交
1963 1964
	 */
	if (!cap_sys_admin)
1965
		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
L
Linus Torvalds 已提交
1966

1967 1968 1969 1970 1971 1972 1973
	/*
	 * Don't let a single process grow so big a user can't recover
	 */
	if (mm) {
		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
		allowed -= min(mm->total_vm / 32, reserve);
	}
L
Linus Torvalds 已提交
1974

1975
	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
L
Linus Torvalds 已提交
1976
		return 0;
1977

1978
error:
L
Linus Torvalds 已提交
1979 1980 1981 1982 1983
	vm_unacct_memory(pages);

	return -ENOMEM;
}

N
Nick Piggin 已提交
1984
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1985 1986
{
	BUG();
N
Nick Piggin 已提交
1987
	return 0;
1988
}
1989
EXPORT_SYMBOL(filemap_fault);
1990

1991 1992 1993 1994 1995 1996
void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	BUG();
}
EXPORT_SYMBOL(filemap_map_pages);

1997 1998 1999 2000 2001 2002 2003 2004
int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
			     unsigned long size, pgoff_t pgoff)
{
	BUG();
	return 0;
}
EXPORT_SYMBOL(generic_file_remap_pages);

2005 2006
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long addr, void *buf, int len, int write)
2007 2008 2009 2010 2011 2012
{
	struct vm_area_struct *vma;

	down_read(&mm->mmap_sem);

	/* the access must start within one of the target process's mappings */
2013 2014
	vma = find_vma(mm, addr);
	if (vma) {
2015 2016 2017 2018 2019
		/* don't overrun this mapping */
		if (addr + len >= vma->vm_end)
			len = vma->vm_end - addr;

		/* only read or write mappings where it is permitted */
2020
		if (write && vma->vm_flags & VM_MAYWRITE)
2021 2022
			copy_to_user_page(vma, NULL, addr,
					 (void *) addr, buf, len);
2023
		else if (!write && vma->vm_flags & VM_MAYREAD)
2024 2025
			copy_from_user_page(vma, NULL, addr,
					    buf, (void *) addr, len);
2026 2027 2028 2029 2030 2031 2032
		else
			len = 0;
	} else {
		len = 0;
	}

	up_read(&mm->mmap_sem);
2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069

	return len;
}

/**
 * @access_remote_vm - access another process' address space
 * @mm:		the mm_struct of the target address space
 * @addr:	start address to access
 * @buf:	source or destination buffer
 * @len:	number of bytes to transfer
 * @write:	whether the access is a write
 *
 * The caller must hold a reference on @mm.
 */
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write)
{
	return __access_remote_vm(NULL, mm, addr, buf, len, write);
}

/*
 * Access another process' address space.
 * - source/target buffer must be kernel space
 */
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;

	if (addr + len < addr)
		return 0;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	len = __access_remote_vm(tsk, mm, addr, buf, len, write);

2070 2071 2072
	mmput(mm);
	return len;
}
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096

/**
 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
 * @inode: The inode to check
 * @size: The current filesize of the inode
 * @newsize: The proposed filesize of the inode
 *
 * Check the shared mappings on an inode on behalf of a shrinking truncate to
 * make sure that that any outstanding VMAs aren't broken and then shrink the
 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
 * automatically grant mappings that are too large.
 */
int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
				size_t newsize)
{
	struct vm_area_struct *vma;
	struct vm_region *region;
	pgoff_t low, high;
	size_t r_size, r_top;

	low = newsize >> PAGE_SHIFT;
	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	down_write(&nommu_region_sem);
2097
	mutex_lock(&inode->i_mapping->i_mmap_mutex);
2098 2099

	/* search for VMAs that fall within the dead zone */
2100
	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2101 2102 2103
		/* found one - only interested if it's shared out of the page
		 * cache */
		if (vma->vm_flags & VM_SHARED) {
2104
			mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115
			up_write(&nommu_region_sem);
			return -ETXTBSY; /* not quite true, but near enough */
		}
	}

	/* reduce any regions that overlap the dead zone - if in existence,
	 * these will be pointed to by VMAs that don't overlap the dead zone
	 *
	 * we don't check for any regions that start beyond the EOF as there
	 * shouldn't be any
	 */
2116 2117
	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
				  0, ULONG_MAX) {
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
		if (!(vma->vm_flags & VM_SHARED))
			continue;

		region = vma->vm_region;
		r_size = region->vm_top - region->vm_start;
		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;

		if (r_top > newsize) {
			region->vm_top -= r_top - newsize;
			if (region->vm_end > region->vm_top)
				region->vm_end = region->vm_top;
		}
	}

2132
	mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2133 2134 2135
	up_write(&nommu_region_sem);
	return 0;
}
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156

/*
 * Initialise sysctl_user_reserve_kbytes.
 *
 * This is intended to prevent a user from starting a single memory hogging
 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
 * mode.
 *
 * The default value is min(3% of free memory, 128MB)
 * 128MB is enough to recover with sshd/login, bash, and top/kill.
 */
static int __meminit init_user_reserve(void)
{
	unsigned long free_kbytes;

	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);

	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
	return 0;
}
module_init(init_user_reserve)
2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177

/*
 * Initialise sysctl_admin_reserve_kbytes.
 *
 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
 * to log in and kill a memory hogging process.
 *
 * Systems with more than 256MB will reserve 8MB, enough to recover
 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
 * only reserve 3% of free pages by default.
 */
static int __meminit init_admin_reserve(void)
{
	unsigned long free_kbytes;

	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);

	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
	return 0;
}
module_init(init_admin_reserve)