nommu.c 53.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 *  linux/mm/nommu.c
 *
 *  Replacement code for mm functions to support CPU's that don't
 *  have any form of memory management unit (thus no virtual memory).
 *
 *  See Documentation/nommu-mmap.txt
 *
9
 *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
L
Linus Torvalds 已提交
10 11 12
 *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
 *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
 *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13
 *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
L
Linus Torvalds 已提交
14 15
 */

16
#include <linux/export.h>
L
Linus Torvalds 已提交
17
#include <linux/mm.h>
D
Davidlohr Bueso 已提交
18
#include <linux/vmacache.h>
L
Linus Torvalds 已提交
19 20 21 22 23 24 25 26 27
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
28
#include <linux/compiler.h>
L
Linus Torvalds 已提交
29 30 31 32
#include <linux/mount.h>
#include <linux/personality.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Al Viro 已提交
33
#include <linux/audit.h>
34
#include <linux/sched/sysctl.h>
L
Linus Torvalds 已提交
35 36 37 38

#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
39
#include <asm/mmu_context.h>
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
#include "internal.h"

#if 0
#define kenter(FMT, ...) \
	printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) \
	printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) \
	printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
#else
#define kenter(FMT, ...) \
	no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) \
	no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) \
	no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
L
Linus Torvalds 已提交
57 58 59 60

void *high_memory;
struct page *mem_map;
unsigned long max_mapnr;
H
Hugh Dickins 已提交
61
unsigned long highest_memmap_pfn;
62
struct percpu_counter vm_committed_as;
L
Linus Torvalds 已提交
63 64
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
65
unsigned long sysctl_overcommit_kbytes __read_mostly;
L
Linus Torvalds 已提交
66
int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
67
int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
68
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
69
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
L
Linus Torvalds 已提交
70 71
int heap_stack_gap = 0;

72
atomic_long_t mmap_pages_allocated;
73

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
/*
 * The global memory commitment made in the system can be a metric
 * that can be used to drive ballooning decisions when Linux is hosted
 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 * balancing memory across competing virtual machines that are hosted.
 * Several metrics drive this policy engine including the guest reported
 * memory commitment.
 */
unsigned long vm_memory_committed(void)
{
	return percpu_counter_read_positive(&vm_committed_as);
}

EXPORT_SYMBOL_GPL(vm_memory_committed);

L
Linus Torvalds 已提交
89 90
EXPORT_SYMBOL(mem_map);

91 92 93 94
/* list of mapped, potentially shareable regions */
static struct kmem_cache *vm_region_jar;
struct rb_root nommu_region_tree = RB_ROOT;
DECLARE_RWSEM(nommu_region_sem);
L
Linus Torvalds 已提交
95

96
const struct vm_operations_struct generic_file_vm_ops = {
L
Linus Torvalds 已提交
97 98 99 100 101 102 103 104 105 106 107 108
};

/*
 * Return the total memory allocated for this pointer, not
 * just what the caller asked for.
 *
 * Doesn't have to be accurate, i.e. may have races.
 */
unsigned int kobjsize(const void *objp)
{
	struct page *page;

109 110 111 112
	/*
	 * If the object we have should not have ksize performed on it,
	 * return size of 0
	 */
113
	if (!objp || !virt_addr_valid(objp))
114 115 116 117 118 119 120 121
		return 0;

	page = virt_to_head_page(objp);

	/*
	 * If the allocator sets PageSlab, we know the pointer came from
	 * kmalloc().
	 */
L
Linus Torvalds 已提交
122 123 124
	if (PageSlab(page))
		return ksize(objp);

125 126 127 128 129 130 131 132 133 134 135 136 137 138
	/*
	 * If it's not a compound page, see if we have a matching VMA
	 * region. This test is intentionally done in reverse order,
	 * so if there's no VMA, we still fall through and hand back
	 * PAGE_SIZE for 0-order pages.
	 */
	if (!PageCompound(page)) {
		struct vm_area_struct *vma;

		vma = find_vma(current->mm, (unsigned long)objp);
		if (vma)
			return vma->vm_end - vma->vm_start;
	}

139 140
	/*
	 * The ksize() function is only guaranteed to work for pointers
141
	 * returned by kmalloc(). So handle arbitrary pointers here.
142
	 */
143
	return PAGE_SIZE << compound_order(page);
L
Linus Torvalds 已提交
144 145
}

146 147 148 149
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		      unsigned long start, unsigned long nr_pages,
		      unsigned int foll_flags, struct page **pages,
		      struct vm_area_struct **vmas, int *nonblocking)
L
Linus Torvalds 已提交
150
{
151
	struct vm_area_struct *vma;
152 153 154 155
	unsigned long vm_flags;
	int i;

	/* calculate required read or write permissions.
H
Hugh Dickins 已提交
156
	 * If FOLL_FORCE is set, we only require the "MAY" flags.
157
	 */
H
Hugh Dickins 已提交
158 159 160 161
	vm_flags  = (foll_flags & FOLL_WRITE) ?
			(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= (foll_flags & FOLL_FORCE) ?
			(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
L
Linus Torvalds 已提交
162

163
	for (i = 0; i < nr_pages; i++) {
164
		vma = find_vma(mm, start);
165 166 167 168
		if (!vma)
			goto finish_or_fault;

		/* protect what we can, including chardevs */
H
Hugh Dickins 已提交
169 170
		if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
		    !(vm_flags & vma->vm_flags))
171
			goto finish_or_fault;
172

L
Linus Torvalds 已提交
173 174 175 176 177 178
		if (pages) {
			pages[i] = virt_to_page(start);
			if (pages[i])
				page_cache_get(pages[i]);
		}
		if (vmas)
179
			vmas[i] = vma;
180
		start = (start + PAGE_SIZE) & PAGE_MASK;
L
Linus Torvalds 已提交
181
	}
182 183 184 185 186

	return i;

finish_or_fault:
	return i ? : -EFAULT;
L
Linus Torvalds 已提交
187
}
N
Nick Piggin 已提交
188 189 190 191 192 193 194 195

/*
 * get a list of pages in an address range belonging to the specified process
 * and indicate the VMA that covers each page
 * - this is potentially dodgy as we may end incrementing the page count of a
 *   slab page or a secondary page from a compound page
 * - don't permit access to VMAs that don't support it, such as I/O mappings
 */
196 197 198 199
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		    unsigned long start, unsigned long nr_pages,
		    int write, int force, struct page **pages,
		    struct vm_area_struct **vmas)
N
Nick Piggin 已提交
200 201 202 203
{
	int flags = 0;

	if (write)
H
Hugh Dickins 已提交
204
		flags |= FOLL_WRITE;
N
Nick Piggin 已提交
205
	if (force)
H
Hugh Dickins 已提交
206
		flags |= FOLL_FORCE;
N
Nick Piggin 已提交
207

208 209
	return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
				NULL);
N
Nick Piggin 已提交
210
}
211 212
EXPORT_SYMBOL(get_user_pages);

P
Paul Mundt 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
/**
 * follow_pfn - look up PFN at a user virtual address
 * @vma: memory mapping
 * @address: user virtual address
 * @pfn: location to store found PFN
 *
 * Only IO mappings and raw PFN mappings are allowed.
 *
 * Returns zero and the pfn at @pfn on success, -ve otherwise.
 */
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn)
{
	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
		return -EINVAL;

	*pfn = address >> PAGE_SHIFT;
	return 0;
}
EXPORT_SYMBOL(follow_pfn);

234
LIST_HEAD(vmap_area_list);
L
Linus Torvalds 已提交
235

236
void vfree(const void *addr)
L
Linus Torvalds 已提交
237 238 239
{
	kfree(addr);
}
240
EXPORT_SYMBOL(vfree);
L
Linus Torvalds 已提交
241

A
Al Viro 已提交
242
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
L
Linus Torvalds 已提交
243 244
{
	/*
245 246
	 *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
	 * returns only a logical address.
L
Linus Torvalds 已提交
247
	 */
248
	return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
L
Linus Torvalds 已提交
249
}
250
EXPORT_SYMBOL(__vmalloc);
L
Linus Torvalds 已提交
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
void *vmalloc_user(unsigned long size)
{
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
	if (ret) {
		struct vm_area_struct *vma;

		down_write(&current->mm->mmap_sem);
		vma = find_vma(current->mm, (unsigned long)ret);
		if (vma)
			vma->vm_flags |= VM_USERMAP;
		up_write(&current->mm->mmap_sem);
	}

	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

272
struct page *vmalloc_to_page(const void *addr)
L
Linus Torvalds 已提交
273 274 275
{
	return virt_to_page(addr);
}
276
EXPORT_SYMBOL(vmalloc_to_page);
L
Linus Torvalds 已提交
277

278
unsigned long vmalloc_to_pfn(const void *addr)
L
Linus Torvalds 已提交
279 280 281
{
	return page_to_pfn(virt_to_page(addr));
}
282
EXPORT_SYMBOL(vmalloc_to_pfn);
L
Linus Torvalds 已提交
283 284 285

long vread(char *buf, char *addr, unsigned long count)
{
286 287 288 289
	/* Don't allow overflow */
	if ((unsigned long) buf + count < count)
		count = -(unsigned long) buf;

L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
	memcpy(buf, addr, count);
	return count;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	memcpy(addr, buf, count);
	return(count);
}

/*
 *	vmalloc  -  allocate virtually continguos memory
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into continguos kernel virtual space.
 *
312
 *	For tight control over page level allocator and protection flags
L
Linus Torvalds 已提交
313 314 315 316 317 318
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
319 320
EXPORT_SYMBOL(vmalloc);

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
/*
 *	vzalloc - allocate virtually continguos memory with zero fill
 *
 *	@size:		allocation size
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into continguos kernel virtual space.
 *	The memory allocated is set to zero.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */
void *vzalloc(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
			PAGE_KERNEL);
}
EXPORT_SYMBOL(vzalloc);

/**
 * vmalloc_node - allocate memory on a specific node
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
351 352 353 354
void *vmalloc_node(unsigned long size, int node)
{
	return vmalloc(size);
}
355
EXPORT_SYMBOL(vmalloc_node);
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373

/**
 * vzalloc_node - allocate memory on a specific node with zero fill
 * @size:	allocation size
 * @node:	numa node
 *
 * Allocate enough pages to cover @size from the page level
 * allocator and map them into contiguous kernel virtual space.
 * The memory allocated is set to zero.
 *
 * For tight control over page level allocator and protection flags
 * use __vmalloc() instead.
 */
void *vzalloc_node(unsigned long size, int node)
{
	return vzalloc(size);
}
EXPORT_SYMBOL(vzalloc_node);
L
Linus Torvalds 已提交
374

P
Paul Mundt 已提交
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
 *	For tight control over page level allocator and protection flags
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

396 397
/**
 * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405 406
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into continguos kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
}
407 408 409 410 411 412 413 414
EXPORT_SYMBOL(vmalloc_32);

/**
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
 *	@size:		allocation size
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
415 416 417
 *
 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
 * remap_vmalloc_range() are permissible.
418 419 420
 */
void *vmalloc_32_user(unsigned long size)
{
421 422 423 424 425
	/*
	 * We'll have to sort out the ZONE_DMA bits for 64-bit,
	 * but for now this can simply use vmalloc_user() directly.
	 */
	return vmalloc_user(size);
426 427
}
EXPORT_SYMBOL(vmalloc_32_user);
L
Linus Torvalds 已提交
428 429 430 431 432 433

void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
{
	BUG();
	return NULL;
}
434
EXPORT_SYMBOL(vmap);
L
Linus Torvalds 已提交
435

436
void vunmap(const void *addr)
L
Linus Torvalds 已提交
437 438 439
{
	BUG();
}
440
EXPORT_SYMBOL(vunmap);
L
Linus Torvalds 已提交
441

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL(vm_map_ram);

void vm_unmap_ram(const void *mem, unsigned int count)
{
	BUG();
}
EXPORT_SYMBOL(vm_unmap_ram);

void vm_unmap_aliases(void)
{
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

460 461 462 463
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
464
void __weak vmalloc_sync_all(void)
465 466 467
{
}

468 469 470 471 472 473 474 475 476 477 478 479
/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
 *
 *	Returns:	NULL on failure, vm_struct on success
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
480
struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
481 482 483 484 485 486 487 488 489 490 491 492
{
	BUG();
	return NULL;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	BUG();
}
EXPORT_SYMBOL_GPL(free_vm_area);

493 494 495 496 497 498 499
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
		   struct page *page)
{
	return -EINVAL;
}
EXPORT_SYMBOL(vm_insert_page);

L
Linus Torvalds 已提交
500 501 502 503 504 505 506
/*
 *  sys_brk() for the most part doesn't need the global kernel
 *  lock, except when an application is doing something nasty
 *  like trying to un-brk an area that has already been mapped
 *  to a regular file.  in this case, the unmapping will need
 *  to invoke file system routines that need the global lock.
 */
507
SYSCALL_DEFINE1(brk, unsigned long, brk)
L
Linus Torvalds 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
{
	struct mm_struct *mm = current->mm;

	if (brk < mm->start_brk || brk > mm->context.end_brk)
		return mm->brk;

	if (mm->brk == brk)
		return mm->brk;

	/*
	 * Always allow shrinking brk
	 */
	if (brk <= mm->brk) {
		mm->brk = brk;
		return brk;
	}

	/*
	 * Ok, looks good - let it rip.
	 */
528
	flush_icache_range(mm->brk, brk);
L
Linus Torvalds 已提交
529 530 531
	return mm->brk = brk;
}

532 533 534 535
/*
 * initialise the VMA and region record slabs
 */
void __init mmap_init(void)
L
Linus Torvalds 已提交
536
{
537 538 539 540
	int ret;

	ret = percpu_counter_init(&vm_committed_as, 0);
	VM_BUG_ON(ret);
541
	vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
L
Linus Torvalds 已提交
542 543
}

544
/*
545 546
 * validate the region tree
 * - the caller must hold the region lock
547
 */
548 549
#ifdef CONFIG_DEBUG_NOMMU_REGIONS
static noinline void validate_nommu_regions(void)
550
{
551 552
	struct vm_region *region, *last;
	struct rb_node *p, *lastp;
553

554 555 556 557 558
	lastp = rb_first(&nommu_region_tree);
	if (!lastp)
		return;

	last = rb_entry(lastp, struct vm_region, vm_rb);
559 560
	BUG_ON(unlikely(last->vm_end <= last->vm_start));
	BUG_ON(unlikely(last->vm_top < last->vm_end));
561 562 563 564 565

	while ((p = rb_next(lastp))) {
		region = rb_entry(p, struct vm_region, vm_rb);
		last = rb_entry(lastp, struct vm_region, vm_rb);

566 567 568
		BUG_ON(unlikely(region->vm_end <= region->vm_start));
		BUG_ON(unlikely(region->vm_top < region->vm_end));
		BUG_ON(unlikely(region->vm_start < last->vm_top));
569

570 571
		lastp = p;
	}
572
}
573
#else
574 575 576
static void validate_nommu_regions(void)
{
}
577
#endif
578 579

/*
580
 * add a region into the global tree
581
 */
582
static void add_nommu_region(struct vm_region *region)
583
{
584 585
	struct vm_region *pregion;
	struct rb_node **p, *parent;
586

587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
	validate_nommu_regions();

	parent = NULL;
	p = &nommu_region_tree.rb_node;
	while (*p) {
		parent = *p;
		pregion = rb_entry(parent, struct vm_region, vm_rb);
		if (region->vm_start < pregion->vm_start)
			p = &(*p)->rb_left;
		else if (region->vm_start > pregion->vm_start)
			p = &(*p)->rb_right;
		else if (pregion == region)
			return;
		else
			BUG();
602 603
	}

604 605
	rb_link_node(&region->vm_rb, parent, p);
	rb_insert_color(&region->vm_rb, &nommu_region_tree);
606

607
	validate_nommu_regions();
608 609
}

610
/*
611
 * delete a region from the global tree
612
 */
613
static void delete_nommu_region(struct vm_region *region)
614
{
615
	BUG_ON(!nommu_region_tree.rb_node);
616

617 618 619
	validate_nommu_regions();
	rb_erase(&region->vm_rb, &nommu_region_tree);
	validate_nommu_regions();
620 621
}

622
/*
623
 * free a contiguous series of pages
624
 */
625
static void free_page_series(unsigned long from, unsigned long to)
626
{
627 628 629 630
	for (; from < to; from += PAGE_SIZE) {
		struct page *page = virt_to_page(from);

		kdebug("- free %lx", from);
631
		atomic_long_dec(&mmap_pages_allocated);
632
		if (page_count(page) != 1)
633 634
			kdebug("free page %p: refcount not one: %d",
			       page, page_count(page));
635
		put_page(page);
636 637 638
	}
}

639
/*
640
 * release a reference to a region
641
 * - the caller must hold the region semaphore for writing, which this releases
642
 * - the region may not have been added to the tree yet, in which case vm_top
643
 *   will equal vm_start
644
 */
645 646
static void __put_nommu_region(struct vm_region *region)
	__releases(nommu_region_sem)
L
Linus Torvalds 已提交
647
{
648
	kenter("%p{%d}", region, region->vm_usage);
L
Linus Torvalds 已提交
649

650
	BUG_ON(!nommu_region_tree.rb_node);
L
Linus Torvalds 已提交
651

652
	if (--region->vm_usage == 0) {
653
		if (region->vm_top > region->vm_start)
654 655 656 657 658 659 660 661 662 663
			delete_nommu_region(region);
		up_write(&nommu_region_sem);

		if (region->vm_file)
			fput(region->vm_file);

		/* IO memory and memory shared directly out of the pagecache
		 * from ramfs/tmpfs mustn't be released here */
		if (region->vm_flags & VM_MAPPED_COPY) {
			kdebug("free series");
664
			free_page_series(region->vm_start, region->vm_top);
665 666 667 668
		}
		kmem_cache_free(vm_region_jar, region);
	} else {
		up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
669
	}
670
}
L
Linus Torvalds 已提交
671

672 673 674 675 676 677 678
/*
 * release a reference to a region
 */
static void put_nommu_region(struct vm_region *region)
{
	down_write(&nommu_region_sem);
	__put_nommu_region(region);
L
Linus Torvalds 已提交
679 680
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
/*
 * update protection on a vma
 */
static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
{
#ifdef CONFIG_MPU
	struct mm_struct *mm = vma->vm_mm;
	long start = vma->vm_start & PAGE_MASK;
	while (start < vma->vm_end) {
		protect_page(mm, start, flags);
		start += PAGE_SIZE;
	}
	update_protections(mm);
#endif
}

697
/*
698 699 700 701
 * add a VMA into a process's mm_struct in the appropriate place in the list
 * and tree and add to the address space's page tree also if not an anonymous
 * page
 * - should be called with mm->mmap_sem held writelocked
702
 */
703
static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
704
{
705
	struct vm_area_struct *pvma, *prev;
L
Linus Torvalds 已提交
706
	struct address_space *mapping;
707
	struct rb_node **p, *parent, *rb_prev;
708 709 710 711 712 713 714

	kenter(",%p", vma);

	BUG_ON(!vma->vm_region);

	mm->map_count++;
	vma->vm_mm = mm;
L
Linus Torvalds 已提交
715

716 717
	protect_vma(vma, vma->vm_flags);

L
Linus Torvalds 已提交
718 719 720 721
	/* add the VMA to the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

722
		mutex_lock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
723
		flush_dcache_mmap_lock(mapping);
724
		vma_interval_tree_insert(vma, &mapping->i_mmap);
L
Linus Torvalds 已提交
725
		flush_dcache_mmap_unlock(mapping);
726
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
727 728
	}

729
	/* add the VMA to the tree */
730
	parent = rb_prev = NULL;
731
	p = &mm->mm_rb.rb_node;
L
Linus Torvalds 已提交
732 733 734 735
	while (*p) {
		parent = *p;
		pvma = rb_entry(parent, struct vm_area_struct, vm_rb);

736 737 738
		/* sort by: start addr, end addr, VMA struct addr in that order
		 * (the latter is necessary as we may get identical VMAs) */
		if (vma->vm_start < pvma->vm_start)
L
Linus Torvalds 已提交
739
			p = &(*p)->rb_left;
740 741
		else if (vma->vm_start > pvma->vm_start) {
			rb_prev = parent;
L
Linus Torvalds 已提交
742
			p = &(*p)->rb_right;
743
		} else if (vma->vm_end < pvma->vm_end)
744
			p = &(*p)->rb_left;
745 746
		else if (vma->vm_end > pvma->vm_end) {
			rb_prev = parent;
747
			p = &(*p)->rb_right;
748
		} else if (vma < pvma)
749
			p = &(*p)->rb_left;
750 751
		else if (vma > pvma) {
			rb_prev = parent;
752
			p = &(*p)->rb_right;
753
		} else
754
			BUG();
L
Linus Torvalds 已提交
755 756 757
	}

	rb_link_node(&vma->vm_rb, parent, p);
758 759 760
	rb_insert_color(&vma->vm_rb, &mm->mm_rb);

	/* add VMA to the VMA list also */
761 762 763
	prev = NULL;
	if (rb_prev)
		prev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
764

765
	__vma_link_list(mm, vma, prev, parent);
L
Linus Torvalds 已提交
766 767
}

768
/*
769
 * delete a VMA from its owning mm_struct and address space
770
 */
771
static void delete_vma_from_mm(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
772
{
D
Davidlohr Bueso 已提交
773
	int i;
L
Linus Torvalds 已提交
774
	struct address_space *mapping;
775
	struct mm_struct *mm = vma->vm_mm;
D
Davidlohr Bueso 已提交
776
	struct task_struct *curr = current;
777 778 779

	kenter("%p", vma);

780 781
	protect_vma(vma, 0);

782
	mm->map_count--;
D
Davidlohr Bueso 已提交
783 784 785 786 787 788 789
	for (i = 0; i < VMACACHE_SIZE; i++) {
		/* if the vma is cached, invalidate the entire cache */
		if (curr->vmacache[i] == vma) {
			vmacache_invalidate(curr->mm);
			break;
		}
	}
L
Linus Torvalds 已提交
790 791 792 793 794

	/* remove the VMA from the mapping */
	if (vma->vm_file) {
		mapping = vma->vm_file->f_mapping;

795
		mutex_lock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
796
		flush_dcache_mmap_lock(mapping);
797
		vma_interval_tree_remove(vma, &mapping->i_mmap);
L
Linus Torvalds 已提交
798
		flush_dcache_mmap_unlock(mapping);
799
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
800 801
	}

802 803
	/* remove from the MM's tree and list */
	rb_erase(&vma->vm_rb, &mm->mm_rb);
804 805 806 807 808 809 810 811

	if (vma->vm_prev)
		vma->vm_prev->vm_next = vma->vm_next;
	else
		mm->mmap = vma->vm_next;

	if (vma->vm_next)
		vma->vm_next->vm_prev = vma->vm_prev;
812 813 814 815 816 817 818 819 820 821
}

/*
 * destroy a VMA record
 */
static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
{
	kenter("%p", vma);
	if (vma->vm_ops && vma->vm_ops->close)
		vma->vm_ops->close(vma);
822
	if (vma->vm_file)
823 824 825 826 827 828 829 830 831 832 833 834 835 836
		fput(vma->vm_file);
	put_nommu_region(vma->vm_region);
	kmem_cache_free(vm_area_cachep, vma);
}

/*
 * look up the first VMA in which addr resides, NULL if none
 * - should be called with mm->mmap_sem at least held readlocked
 */
struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
{
	struct vm_area_struct *vma;

	/* check the cache first */
D
Davidlohr Bueso 已提交
837 838
	vma = vmacache_find(mm, addr);
	if (likely(vma))
839 840
		return vma;

841
	/* trawl the list (there may be multiple mappings in which addr
842
	 * resides) */
843
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
844 845 846
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end > addr) {
D
Davidlohr Bueso 已提交
847
			vmacache_update(addr, vma);
848 849 850 851 852 853 854 855 856 857 858 859 860 861
			return vma;
		}
	}

	return NULL;
}
EXPORT_SYMBOL(find_vma);

/*
 * find a VMA
 * - we don't extend stack VMAs under NOMMU conditions
 */
struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
862
	return find_vma(mm, addr);
863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
}

/*
 * expand a stack to a given address
 * - not supported under NOMMU conditions
 */
int expand_stack(struct vm_area_struct *vma, unsigned long address)
{
	return -ENOMEM;
}

/*
 * look up the first VMA exactly that exactly matches addr
 * - should be called with mm->mmap_sem at least held readlocked
 */
static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
					     unsigned long addr,
					     unsigned long len)
{
	struct vm_area_struct *vma;
	unsigned long end = addr + len;

	/* check the cache first */
D
Davidlohr Bueso 已提交
886 887
	vma = vmacache_find_exact(mm, addr, end);
	if (vma)
888 889
		return vma;

890
	/* trawl the list (there may be multiple mappings in which addr
891
	 * resides) */
892
	for (vma = mm->mmap; vma; vma = vma->vm_next) {
893 894 895 896 897
		if (vma->vm_start < addr)
			continue;
		if (vma->vm_start > addr)
			return NULL;
		if (vma->vm_end == end) {
D
Davidlohr Bueso 已提交
898
			vmacache_update(addr, vma);
899 900 901 902 903
			return vma;
		}
	}

	return NULL;
L
Linus Torvalds 已提交
904 905 906 907 908 909 910 911 912 913 914 915 916 917
}

/*
 * determine whether a mapping should be permitted and, if so, what sort of
 * mapping we're capable of supporting
 */
static int validate_mmap_request(struct file *file,
				 unsigned long addr,
				 unsigned long len,
				 unsigned long prot,
				 unsigned long flags,
				 unsigned long pgoff,
				 unsigned long *_capabilities)
{
918
	unsigned long capabilities, rlen;
L
Linus Torvalds 已提交
919 920 921
	int ret;

	/* do the simple checks first */
922
	if (flags & MAP_FIXED) {
L
Linus Torvalds 已提交
923 924 925 926 927 928 929 930 931 932
		printk(KERN_DEBUG
		       "%d: Can't do fixed-address/overlay mmap of RAM\n",
		       current->pid);
		return -EINVAL;
	}

	if ((flags & MAP_TYPE) != MAP_PRIVATE &&
	    (flags & MAP_TYPE) != MAP_SHARED)
		return -EINVAL;

933
	if (!len)
L
Linus Torvalds 已提交
934 935
		return -EINVAL;

936
	/* Careful about overflows.. */
937 938
	rlen = PAGE_ALIGN(len);
	if (!rlen || rlen > TASK_SIZE)
939 940
		return -ENOMEM;

L
Linus Torvalds 已提交
941
	/* offset overflow? */
942
	if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
943
		return -EOVERFLOW;
L
Linus Torvalds 已提交
944 945 946 947 948 949

	if (file) {
		/* validate file mapping requests */
		struct address_space *mapping;

		/* files must support mmap */
A
Al Viro 已提交
950
		if (!file->f_op->mmap)
L
Linus Torvalds 已提交
951 952 953 954 955 956 957 958
			return -ENODEV;

		/* work out if what we've got could possibly be shared
		 * - we support chardevs that provide their own "memory"
		 * - we support files/blockdevs that are memory backed
		 */
		mapping = file->f_mapping;
		if (!mapping)
A
Al Viro 已提交
959
			mapping = file_inode(file)->i_mapping;
L
Linus Torvalds 已提交
960 961 962 963 964 965 966 967

		capabilities = 0;
		if (mapping && mapping->backing_dev_info)
			capabilities = mapping->backing_dev_info->capabilities;

		if (!capabilities) {
			/* no explicit capabilities set, so assume some
			 * defaults */
A
Al Viro 已提交
968
			switch (file_inode(file)->i_mode & S_IFMT) {
L
Linus Torvalds 已提交
969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
			case S_IFREG:
			case S_IFBLK:
				capabilities = BDI_CAP_MAP_COPY;
				break;

			case S_IFCHR:
				capabilities =
					BDI_CAP_MAP_DIRECT |
					BDI_CAP_READ_MAP |
					BDI_CAP_WRITE_MAP;
				break;

			default:
				return -EINVAL;
			}
		}

		/* eliminate any capabilities that we can't support on this
		 * device */
		if (!file->f_op->get_unmapped_area)
			capabilities &= ~BDI_CAP_MAP_DIRECT;
		if (!file->f_op->read)
			capabilities &= ~BDI_CAP_MAP_COPY;

993 994 995 996
		/* The file shall have been opened with read permission. */
		if (!(file->f_mode & FMODE_READ))
			return -EACCES;

L
Linus Torvalds 已提交
997 998 999 1000 1001 1002
		if (flags & MAP_SHARED) {
			/* do checks for writing, appending and locking */
			if ((prot & PROT_WRITE) &&
			    !(file->f_mode & FMODE_WRITE))
				return -EACCES;

A
Al Viro 已提交
1003
			if (IS_APPEND(file_inode(file)) &&
L
Linus Torvalds 已提交
1004 1005 1006
			    (file->f_mode & FMODE_WRITE))
				return -EACCES;

1007
			if (locks_verify_locked(file))
L
Linus Torvalds 已提交
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
				return -EAGAIN;

			if (!(capabilities & BDI_CAP_MAP_DIRECT))
				return -ENODEV;

			/* we mustn't privatise shared mappings */
			capabilities &= ~BDI_CAP_MAP_COPY;
		}
		else {
			/* we're going to read the file into private memory we
			 * allocate */
			if (!(capabilities & BDI_CAP_MAP_COPY))
				return -ENODEV;

			/* we don't permit a private writable mapping to be
			 * shared with the backing device */
			if (prot & PROT_WRITE)
				capabilities &= ~BDI_CAP_MAP_DIRECT;
		}

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
		if (capabilities & BDI_CAP_MAP_DIRECT) {
			if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
			    ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
			    ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
			    ) {
				capabilities &= ~BDI_CAP_MAP_DIRECT;
				if (flags & MAP_SHARED) {
					printk(KERN_WARNING
					       "MAP_SHARED not completely supported on !MMU\n");
					return -EINVAL;
				}
			}
		}

L
Linus Torvalds 已提交
1042 1043
		/* handle executable mappings and implied executable
		 * mappings */
J
Josef Sipek 已提交
1044
		if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
L
Linus Torvalds 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
			if (prot & PROT_EXEC)
				return -EPERM;
		}
		else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
			/* handle implication of PROT_EXEC by PROT_READ */
			if (current->personality & READ_IMPLIES_EXEC) {
				if (capabilities & BDI_CAP_EXEC_MAP)
					prot |= PROT_EXEC;
			}
		}
		else if ((prot & PROT_READ) &&
			 (prot & PROT_EXEC) &&
			 !(capabilities & BDI_CAP_EXEC_MAP)
			 ) {
			/* backing file is not executable, try to copy */
			capabilities &= ~BDI_CAP_MAP_DIRECT;
		}
	}
	else {
		/* anonymous mappings are always memory backed and can be
		 * privately mapped
		 */
		capabilities = BDI_CAP_MAP_COPY;

		/* handle PROT_EXEC implication by PROT_READ */
		if ((prot & PROT_READ) &&
		    (current->personality & READ_IMPLIES_EXEC))
			prot |= PROT_EXEC;
	}

	/* allow the security API to have its say */
1076
	ret = security_mmap_addr(addr);
L
Linus Torvalds 已提交
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	if (ret < 0)
		return ret;

	/* looks okay */
	*_capabilities = capabilities;
	return 0;
}

/*
 * we've determined that we can make the mapping, now translate what we
 * now know into VMA flags
 */
static unsigned long determine_vm_flags(struct file *file,
					unsigned long prot,
					unsigned long flags,
					unsigned long capabilities)
{
	unsigned long vm_flags;

	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
	/* vm_flags |= mm->def_flags; */

	if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
		/* attempt to share read-only copies of mapped file chunks */
1101
		vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
L
Linus Torvalds 已提交
1102 1103
		if (file && !(prot & PROT_WRITE))
			vm_flags |= VM_MAYSHARE;
1104
	} else {
L
Linus Torvalds 已提交
1105 1106 1107
		/* overlay a shareable mapping on the backing device or inode
		 * if possible - used for chardevs, ramfs/tmpfs/shmfs and
		 * romfs/cramfs */
1108
		vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
L
Linus Torvalds 已提交
1109
		if (flags & MAP_SHARED)
1110
			vm_flags |= VM_SHARED;
L
Linus Torvalds 已提交
1111 1112 1113 1114 1115 1116
	}

	/* refuse to let anyone share private mappings with this process if
	 * it's being traced - otherwise breakpoints set in it may interfere
	 * with another untraced process
	 */
T
Tejun Heo 已提交
1117
	if ((flags & MAP_PRIVATE) && current->ptrace)
L
Linus Torvalds 已提交
1118 1119 1120 1121 1122 1123
		vm_flags &= ~VM_MAYSHARE;

	return vm_flags;
}

/*
1124 1125
 * set up a shared mapping on a file (the driver or filesystem provides and
 * pins the storage)
L
Linus Torvalds 已提交
1126
 */
1127
static int do_mmap_shared_file(struct vm_area_struct *vma)
L
Linus Torvalds 已提交
1128 1129 1130 1131
{
	int ret;

	ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1132 1133
	if (ret == 0) {
		vma->vm_region->vm_top = vma->vm_region->vm_end;
1134
		return 0;
1135
	}
L
Linus Torvalds 已提交
1136 1137 1138
	if (ret != -ENOSYS)
		return ret;

1139 1140 1141
	/* getting -ENOSYS indicates that direct mmap isn't possible (as
	 * opposed to tried but failed) so we can only give a suitable error as
	 * it's not possible to make a private copy if MAP_SHARED was given */
L
Linus Torvalds 已提交
1142 1143 1144 1145 1146 1147
	return -ENODEV;
}

/*
 * set up a private mapping or an anonymous shared mapping
 */
1148 1149
static int do_mmap_private(struct vm_area_struct *vma,
			   struct vm_region *region,
1150 1151
			   unsigned long len,
			   unsigned long capabilities)
L
Linus Torvalds 已提交
1152
{
1153
	struct page *pages;
B
Bob Liu 已提交
1154
	unsigned long total, point, n;
L
Linus Torvalds 已提交
1155
	void *base;
1156
	int ret, order;
L
Linus Torvalds 已提交
1157 1158 1159 1160 1161

	/* invoke the file's mapping function so that it can keep track of
	 * shared mappings on devices or memory
	 * - VM_MAYSHARE will be set if it may attempt to share
	 */
1162
	if (capabilities & BDI_CAP_MAP_DIRECT) {
L
Linus Torvalds 已提交
1163
		ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1164
		if (ret == 0) {
L
Linus Torvalds 已提交
1165
			/* shouldn't return success if we're not sharing */
1166 1167
			BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
			vma->vm_region->vm_top = vma->vm_region->vm_end;
1168
			return 0;
L
Linus Torvalds 已提交
1169
		}
1170 1171
		if (ret != -ENOSYS)
			return ret;
L
Linus Torvalds 已提交
1172 1173 1174 1175 1176 1177

		/* getting an ENOSYS error indicates that direct mmap isn't
		 * possible (as opposed to tried but failed) so we'll try to
		 * make a private copy of the data and map that instead */
	}

1178

L
Linus Torvalds 已提交
1179 1180 1181 1182
	/* allocate some memory to hold the mapping
	 * - note that this may not return a page-aligned address if the object
	 *   we're allocating is smaller than a page
	 */
B
Bob Liu 已提交
1183
	order = get_order(len);
1184 1185 1186 1187
	kdebug("alloc order %d for %lx", order, len);

	pages = alloc_pages(GFP_KERNEL, order);
	if (!pages)
L
Linus Torvalds 已提交
1188 1189
		goto enomem;

1190
	total = 1 << order;
1191
	atomic_long_add(total, &mmap_pages_allocated);
1192

B
Bob Liu 已提交
1193
	point = len >> PAGE_SHIFT;
1194 1195 1196 1197 1198 1199 1200 1201

	/* we allocated a power-of-2 sized page set, so we may want to trim off
	 * the excess */
	if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
		while (total > point) {
			order = ilog2(total - point);
			n = 1 << order;
			kdebug("shave %lu/%lu @%lu", n, total - point, total);
1202
			atomic_long_sub(n, &mmap_pages_allocated);
1203 1204 1205 1206
			total -= n;
			set_page_refcounted(pages + total);
			__free_pages(pages + total, order);
		}
1207 1208 1209 1210
	}

	for (point = 1; point < total; point++)
		set_page_refcounted(&pages[point]);
L
Linus Torvalds 已提交
1211

1212 1213 1214
	base = page_address(pages);
	region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
	region->vm_start = (unsigned long) base;
B
Bob Liu 已提交
1215
	region->vm_end   = region->vm_start + len;
1216
	region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1217 1218 1219

	vma->vm_start = region->vm_start;
	vma->vm_end   = region->vm_start + len;
L
Linus Torvalds 已提交
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230

	if (vma->vm_file) {
		/* read the contents of a file into the copy */
		mm_segment_t old_fs;
		loff_t fpos;

		fpos = vma->vm_pgoff;
		fpos <<= PAGE_SHIFT;

		old_fs = get_fs();
		set_fs(KERNEL_DS);
B
Bob Liu 已提交
1231
		ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
L
Linus Torvalds 已提交
1232 1233 1234 1235 1236 1237
		set_fs(old_fs);

		if (ret < 0)
			goto error_free;

		/* clear the last little bit */
B
Bob Liu 已提交
1238 1239
		if (ret < len)
			memset(base + ret, 0, len - ret);
L
Linus Torvalds 已提交
1240 1241 1242 1243 1244 1245

	}

	return 0;

error_free:
1246
	free_page_series(region->vm_start, region->vm_top);
1247 1248
	region->vm_start = vma->vm_start = 0;
	region->vm_end   = vma->vm_end = 0;
1249
	region->vm_top   = 0;
L
Linus Torvalds 已提交
1250 1251 1252
	return ret;

enomem:
1253 1254
	printk("Allocation of length %lu from process %d (%s) failed\n",
	       len, current->pid, current->comm);
1255
	show_free_areas(0);
L
Linus Torvalds 已提交
1256 1257 1258 1259 1260 1261
	return -ENOMEM;
}

/*
 * handle mapping creation for uClinux
 */
1262
unsigned long do_mmap_pgoff(struct file *file,
L
Linus Torvalds 已提交
1263 1264 1265 1266
			    unsigned long addr,
			    unsigned long len,
			    unsigned long prot,
			    unsigned long flags,
1267
			    unsigned long pgoff,
1268
			    unsigned long *populate)
L
Linus Torvalds 已提交
1269
{
1270 1271
	struct vm_area_struct *vma;
	struct vm_region *region;
L
Linus Torvalds 已提交
1272
	struct rb_node *rb;
1273
	unsigned long capabilities, vm_flags, result;
L
Linus Torvalds 已提交
1274 1275
	int ret;

1276 1277
	kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);

1278
	*populate = 0;
1279

L
Linus Torvalds 已提交
1280 1281 1282 1283
	/* decide whether we should attempt the mapping, and if so what sort of
	 * mapping */
	ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
				    &capabilities);
1284 1285
	if (ret < 0) {
		kleave(" = %d [val]", ret);
L
Linus Torvalds 已提交
1286
		return ret;
1287
	}
L
Linus Torvalds 已提交
1288

1289 1290
	/* we ignore the address hint */
	addr = 0;
B
Bob Liu 已提交
1291
	len = PAGE_ALIGN(len);
1292

L
Linus Torvalds 已提交
1293 1294 1295 1296
	/* we've determined that we can make the mapping, now translate what we
	 * now know into VMA flags */
	vm_flags = determine_vm_flags(file, prot, flags, capabilities);

1297 1298 1299 1300 1301 1302 1303 1304
	/* we're going to need to record the mapping */
	region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		goto error_getting_region;

	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		goto error_getting_vma;
L
Linus Torvalds 已提交
1305

1306
	region->vm_usage = 1;
1307 1308 1309
	region->vm_flags = vm_flags;
	region->vm_pgoff = pgoff;

1310
	INIT_LIST_HEAD(&vma->anon_vma_chain);
1311 1312
	vma->vm_flags = vm_flags;
	vma->vm_pgoff = pgoff;
L
Linus Torvalds 已提交
1313

1314
	if (file) {
A
Al Viro 已提交
1315 1316
		region->vm_file = get_file(file);
		vma->vm_file = get_file(file);
1317 1318 1319 1320 1321
	}

	down_write(&nommu_region_sem);

	/* if we want to share, we need to check for regions created by other
L
Linus Torvalds 已提交
1322
	 * mmap() calls that overlap with our proposed mapping
1323
	 * - we can only share with a superset match on most regular files
L
Linus Torvalds 已提交
1324 1325 1326 1327 1328 1329
	 * - shared mappings on character devices and memory backed files are
	 *   permitted to overlap inexactly as far as we are concerned for in
	 *   these cases, sharing is handled in the driver or filesystem rather
	 *   than here
	 */
	if (vm_flags & VM_MAYSHARE) {
1330 1331
		struct vm_region *pregion;
		unsigned long pglen, rpglen, pgend, rpgend, start;
L
Linus Torvalds 已提交
1332

1333 1334
		pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pgend = pgoff + pglen;
1335

1336 1337
		for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
			pregion = rb_entry(rb, struct vm_region, vm_rb);
L
Linus Torvalds 已提交
1338

1339
			if (!(pregion->vm_flags & VM_MAYSHARE))
L
Linus Torvalds 已提交
1340 1341 1342
				continue;

			/* search for overlapping mappings on the same file */
A
Al Viro 已提交
1343 1344
			if (file_inode(pregion->vm_file) !=
			    file_inode(file))
L
Linus Torvalds 已提交
1345 1346
				continue;

1347
			if (pregion->vm_pgoff >= pgend)
L
Linus Torvalds 已提交
1348 1349
				continue;

1350 1351 1352 1353
			rpglen = pregion->vm_end - pregion->vm_start;
			rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
			rpgend = pregion->vm_pgoff + rpglen;
			if (pgoff >= rpgend)
L
Linus Torvalds 已提交
1354 1355
				continue;

1356 1357 1358 1359 1360
			/* handle inexactly overlapping matches between
			 * mappings */
			if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
			    !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
				/* new mapping is not a subset of the region */
L
Linus Torvalds 已提交
1361 1362 1363 1364 1365
				if (!(capabilities & BDI_CAP_MAP_DIRECT))
					goto sharing_violation;
				continue;
			}

1366
			/* we've found a region we can share */
1367
			pregion->vm_usage++;
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
			vma->vm_region = pregion;
			start = pregion->vm_start;
			start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
			vma->vm_start = start;
			vma->vm_end = start + len;

			if (pregion->vm_flags & VM_MAPPED_COPY) {
				kdebug("share copy");
				vma->vm_flags |= VM_MAPPED_COPY;
			} else {
				kdebug("share mmap");
				ret = do_mmap_shared_file(vma);
				if (ret < 0) {
					vma->vm_region = NULL;
					vma->vm_start = 0;
					vma->vm_end = 0;
1384
					pregion->vm_usage--;
1385 1386 1387 1388 1389 1390 1391 1392 1393
					pregion = NULL;
					goto error_just_free;
				}
			}
			fput(region->vm_file);
			kmem_cache_free(vm_region_jar, region);
			region = pregion;
			result = start;
			goto share;
L
Linus Torvalds 已提交
1394 1395 1396 1397 1398 1399
		}

		/* obtain the address at which to make a shared mapping
		 * - this is the hook for quasi-memory character devices to
		 *   tell us the location of a shared mapping
		 */
1400
		if (capabilities & BDI_CAP_MAP_DIRECT) {
L
Linus Torvalds 已提交
1401 1402
			addr = file->f_op->get_unmapped_area(file, addr, len,
							     pgoff, flags);
1403
			if (IS_ERR_VALUE(addr)) {
L
Linus Torvalds 已提交
1404
				ret = addr;
1405
				if (ret != -ENOSYS)
1406
					goto error_just_free;
L
Linus Torvalds 已提交
1407 1408 1409 1410

				/* the driver refused to tell us where to site
				 * the mapping so we'll have to attempt to copy
				 * it */
1411
				ret = -ENODEV;
L
Linus Torvalds 已提交
1412
				if (!(capabilities & BDI_CAP_MAP_COPY))
1413
					goto error_just_free;
L
Linus Torvalds 已提交
1414 1415

				capabilities &= ~BDI_CAP_MAP_DIRECT;
1416 1417 1418
			} else {
				vma->vm_start = region->vm_start = addr;
				vma->vm_end = region->vm_end = addr + len;
L
Linus Torvalds 已提交
1419 1420 1421 1422
			}
		}
	}

1423
	vma->vm_region = region;
L
Linus Torvalds 已提交
1424

1425 1426 1427
	/* set up the mapping
	 * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
	 */
L
Linus Torvalds 已提交
1428
	if (file && vma->vm_flags & VM_SHARED)
1429
		ret = do_mmap_shared_file(vma);
L
Linus Torvalds 已提交
1430
	else
1431
		ret = do_mmap_private(vma, region, len, capabilities);
L
Linus Torvalds 已提交
1432
	if (ret < 0)
1433 1434
		goto error_just_free;
	add_nommu_region(region);
1435

1436 1437 1438 1439 1440
	/* clear anonymous mappings that don't ask for uninitialized data */
	if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
		memset((void *)region->vm_start, 0,
		       region->vm_end - region->vm_start);

L
Linus Torvalds 已提交
1441
	/* okay... we have a mapping; now we have to register it */
1442
	result = vma->vm_start;
L
Linus Torvalds 已提交
1443 1444 1445

	current->mm->total_vm += len >> PAGE_SHIFT;

1446 1447
share:
	add_vma_to_mm(current->mm, vma);
L
Linus Torvalds 已提交
1448

1449 1450 1451 1452 1453 1454
	/* we flush the region from the icache only when the first executable
	 * mapping of it is made  */
	if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
		flush_icache_range(region->vm_start, region->vm_end);
		region->vm_icache_flushed = true;
	}
L
Linus Torvalds 已提交
1455

1456
	up_write(&nommu_region_sem);
L
Linus Torvalds 已提交
1457

1458 1459
	kleave(" = %lx", result);
	return result;
L
Linus Torvalds 已提交
1460

1461 1462 1463
error_just_free:
	up_write(&nommu_region_sem);
error:
1464 1465
	if (region->vm_file)
		fput(region->vm_file);
1466
	kmem_cache_free(vm_region_jar, region);
1467 1468
	if (vma->vm_file)
		fput(vma->vm_file);
1469 1470 1471 1472 1473 1474 1475 1476 1477
	kmem_cache_free(vm_area_cachep, vma);
	kleave(" = %d", ret);
	return ret;

sharing_violation:
	up_write(&nommu_region_sem);
	printk(KERN_WARNING "Attempt to share mismatched mappings\n");
	ret = -EINVAL;
	goto error;
L
Linus Torvalds 已提交
1478

1479 1480 1481 1482
error_getting_vma:
	kmem_cache_free(vm_region_jar, region);
	printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
	       " from process %d failed\n",
L
Linus Torvalds 已提交
1483
	       len, current->pid);
1484
	show_free_areas(0);
L
Linus Torvalds 已提交
1485 1486
	return -ENOMEM;

1487 1488 1489
error_getting_region:
	printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
	       " from process %d failed\n",
L
Linus Torvalds 已提交
1490
	       len, current->pid);
1491
	show_free_areas(0);
L
Linus Torvalds 已提交
1492 1493
	return -ENOMEM;
}
1494

H
Hugh Dickins 已提交
1495 1496 1497 1498 1499 1500 1501
SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
		unsigned long, prot, unsigned long, flags,
		unsigned long, fd, unsigned long, pgoff)
{
	struct file *file = NULL;
	unsigned long retval = -EBADF;

A
Al Viro 已提交
1502
	audit_mmap_fd(fd, flags);
H
Hugh Dickins 已提交
1503 1504 1505 1506 1507 1508 1509 1510
	if (!(flags & MAP_ANONYMOUS)) {
		file = fget(fd);
		if (!file)
			goto out;
	}

	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);

G
Greg Ungerer 已提交
1511
	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
H
Hugh Dickins 已提交
1512 1513 1514 1515 1516 1517 1518

	if (file)
		fput(file);
out:
	return retval;
}

C
Christoph Hellwig 已提交
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542
#ifdef __ARCH_WANT_SYS_OLD_MMAP
struct mmap_arg_struct {
	unsigned long addr;
	unsigned long len;
	unsigned long prot;
	unsigned long flags;
	unsigned long fd;
	unsigned long offset;
};

SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
{
	struct mmap_arg_struct a;

	if (copy_from_user(&a, arg, sizeof(a)))
		return -EFAULT;
	if (a.offset & ~PAGE_MASK)
		return -EINVAL;

	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
			      a.offset >> PAGE_SHIFT);
}
#endif /* __ARCH_WANT_SYS_OLD_MMAP */

L
Linus Torvalds 已提交
1543
/*
1544 1545
 * split a vma into two pieces at address 'addr', a new vma is allocated either
 * for the first part or the tail.
L
Linus Torvalds 已提交
1546
 */
1547 1548
int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
	      unsigned long addr, int new_below)
L
Linus Torvalds 已提交
1549
{
1550 1551 1552
	struct vm_area_struct *new;
	struct vm_region *region;
	unsigned long npages;
L
Linus Torvalds 已提交
1553

1554
	kenter("");
L
Linus Torvalds 已提交
1555

1556 1557 1558
	/* we're only permitted to split anonymous regions (these should have
	 * only a single usage on the region) */
	if (vma->vm_file)
1559
		return -ENOMEM;
L
Linus Torvalds 已提交
1560

1561 1562
	if (mm->map_count >= sysctl_max_map_count)
		return -ENOMEM;
L
Linus Torvalds 已提交
1563

1564 1565 1566
	region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
	if (!region)
		return -ENOMEM;
L
Linus Torvalds 已提交
1567

1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
	if (!new) {
		kmem_cache_free(vm_region_jar, region);
		return -ENOMEM;
	}

	/* most fields are the same, copy all, and then fixup */
	*new = *vma;
	*region = *vma->vm_region;
	new->vm_region = region;

	npages = (addr - vma->vm_start) >> PAGE_SHIFT;

	if (new_below) {
1582
		region->vm_top = region->vm_end = new->vm_end = addr;
1583 1584 1585
	} else {
		region->vm_start = new->vm_start = addr;
		region->vm_pgoff = new->vm_pgoff += npages;
L
Linus Torvalds 已提交
1586
	}
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598

	if (new->vm_ops && new->vm_ops->open)
		new->vm_ops->open(new);

	delete_vma_from_mm(vma);
	down_write(&nommu_region_sem);
	delete_nommu_region(vma->vm_region);
	if (new_below) {
		vma->vm_region->vm_start = vma->vm_start = addr;
		vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
	} else {
		vma->vm_region->vm_end = vma->vm_end = addr;
1599
		vma->vm_region->vm_top = addr;
1600 1601 1602 1603 1604 1605 1606
	}
	add_nommu_region(vma->vm_region);
	add_nommu_region(new->vm_region);
	up_write(&nommu_region_sem);
	add_vma_to_mm(mm, vma);
	add_vma_to_mm(mm, new);
	return 0;
L
Linus Torvalds 已提交
1607 1608
}

1609
/*
1610 1611
 * shrink a VMA by removing the specified chunk from either the beginning or
 * the end
1612
 */
1613 1614 1615
static int shrink_vma(struct mm_struct *mm,
		      struct vm_area_struct *vma,
		      unsigned long from, unsigned long to)
L
Linus Torvalds 已提交
1616
{
1617
	struct vm_region *region;
L
Linus Torvalds 已提交
1618

1619
	kenter("");
L
Linus Torvalds 已提交
1620

1621 1622 1623 1624 1625 1626 1627 1628
	/* adjust the VMA's pointers, which may reposition it in the MM's tree
	 * and list */
	delete_vma_from_mm(vma);
	if (from > vma->vm_start)
		vma->vm_end = from;
	else
		vma->vm_start = to;
	add_vma_to_mm(mm, vma);
L
Linus Torvalds 已提交
1629

1630 1631
	/* cut the backing region down to size */
	region = vma->vm_region;
1632
	BUG_ON(region->vm_usage != 1);
1633 1634 1635

	down_write(&nommu_region_sem);
	delete_nommu_region(region);
1636 1637 1638 1639
	if (from > region->vm_start) {
		to = region->vm_top;
		region->vm_top = region->vm_end = from;
	} else {
1640
		region->vm_start = to;
1641
	}
1642 1643 1644 1645 1646 1647
	add_nommu_region(region);
	up_write(&nommu_region_sem);

	free_page_series(from, to);
	return 0;
}
L
Linus Torvalds 已提交
1648

1649 1650 1651 1652 1653 1654 1655 1656
/*
 * release a mapping
 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
 *   VMA, though it need not cover the whole VMA
 */
int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
{
	struct vm_area_struct *vma;
B
Bob Liu 已提交
1657
	unsigned long end;
1658
	int ret;
L
Linus Torvalds 已提交
1659

1660
	kenter(",%lx,%zx", start, len);
L
Linus Torvalds 已提交
1661

B
Bob Liu 已提交
1662
	len = PAGE_ALIGN(len);
1663 1664
	if (len == 0)
		return -EINVAL;
1665

B
Bob Liu 已提交
1666 1667
	end = start + len;

1668 1669 1670
	/* find the first potentially overlapping VMA */
	vma = find_vma(mm, start);
	if (!vma) {
1671 1672 1673 1674 1675 1676 1677 1678 1679
		static int limit = 0;
		if (limit < 5) {
			printk(KERN_WARNING
			       "munmap of memory not mmapped by process %d"
			       " (%s): 0x%lx-0x%lx\n",
			       current->pid, current->comm,
			       start, start + len - 1);
			limit++;
		}
1680 1681
		return -EINVAL;
	}
L
Linus Torvalds 已提交
1682

1683 1684 1685 1686 1687 1688 1689 1690 1691
	/* we're allowed to split an anonymous VMA but not a file-backed one */
	if (vma->vm_file) {
		do {
			if (start > vma->vm_start) {
				kleave(" = -EINVAL [miss]");
				return -EINVAL;
			}
			if (end == vma->vm_end)
				goto erase_whole_vma;
1692 1693
			vma = vma->vm_next;
		} while (vma);
1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720
		kleave(" = -EINVAL [split file]");
		return -EINVAL;
	} else {
		/* the chunk must be a subset of the VMA found */
		if (start == vma->vm_start && end == vma->vm_end)
			goto erase_whole_vma;
		if (start < vma->vm_start || end > vma->vm_end) {
			kleave(" = -EINVAL [superset]");
			return -EINVAL;
		}
		if (start & ~PAGE_MASK) {
			kleave(" = -EINVAL [unaligned start]");
			return -EINVAL;
		}
		if (end != vma->vm_end && end & ~PAGE_MASK) {
			kleave(" = -EINVAL [unaligned split]");
			return -EINVAL;
		}
		if (start != vma->vm_start && end != vma->vm_end) {
			ret = split_vma(mm, vma, start, 1);
			if (ret < 0) {
				kleave(" = %d [split]", ret);
				return ret;
			}
		}
		return shrink_vma(mm, vma, start, end);
	}
L
Linus Torvalds 已提交
1721

1722 1723 1724 1725
erase_whole_vma:
	delete_vma_from_mm(vma);
	delete_vma(mm, vma);
	kleave(" = 0");
L
Linus Torvalds 已提交
1726 1727
	return 0;
}
1728
EXPORT_SYMBOL(do_munmap);
L
Linus Torvalds 已提交
1729

A
Al Viro 已提交
1730
int vm_munmap(unsigned long addr, size_t len)
1731
{
A
Al Viro 已提交
1732
	struct mm_struct *mm = current->mm;
1733 1734 1735 1736 1737 1738 1739
	int ret;

	down_write(&mm->mmap_sem);
	ret = do_munmap(mm, addr, len);
	up_write(&mm->mmap_sem);
	return ret;
}
1740 1741 1742 1743
EXPORT_SYMBOL(vm_munmap);

SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
A
Al Viro 已提交
1744
	return vm_munmap(addr, len);
1745
}
1746 1747

/*
1748
 * release all the mappings made in a process's VM space
1749
 */
1750
void exit_mmap(struct mm_struct *mm)
L
Linus Torvalds 已提交
1751
{
1752
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1753

1754 1755
	if (!mm)
		return;
L
Linus Torvalds 已提交
1756

1757
	kenter("");
L
Linus Torvalds 已提交
1758

1759
	mm->total_vm = 0;
L
Linus Torvalds 已提交
1760

1761 1762 1763 1764
	while ((vma = mm->mmap)) {
		mm->mmap = vma->vm_next;
		delete_vma_from_mm(vma);
		delete_vma(mm, vma);
1765
		cond_resched();
L
Linus Torvalds 已提交
1766
	}
1767 1768

	kleave("");
L
Linus Torvalds 已提交
1769 1770
}

1771
unsigned long vm_brk(unsigned long addr, unsigned long len)
L
Linus Torvalds 已提交
1772 1773 1774 1775 1776
{
	return -ENOMEM;
}

/*
1777 1778
 * expand (or shrink) an existing mapping, potentially moving it at the same
 * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
L
Linus Torvalds 已提交
1779
 *
1780
 * under NOMMU conditions, we only permit changing a mapping's size, and only
1781 1782
 * as long as it stays within the region allocated by do_mmap_private() and the
 * block is not shareable
L
Linus Torvalds 已提交
1783
 *
1784
 * MREMAP_FIXED is not supported under NOMMU conditions
L
Linus Torvalds 已提交
1785
 */
A
Al Viro 已提交
1786
static unsigned long do_mremap(unsigned long addr,
L
Linus Torvalds 已提交
1787 1788 1789
			unsigned long old_len, unsigned long new_len,
			unsigned long flags, unsigned long new_addr)
{
1790
	struct vm_area_struct *vma;
L
Linus Torvalds 已提交
1791 1792

	/* insanity checks first */
B
Bob Liu 已提交
1793 1794
	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);
1795
	if (old_len == 0 || new_len == 0)
L
Linus Torvalds 已提交
1796 1797
		return (unsigned long) -EINVAL;

1798 1799 1800
	if (addr & ~PAGE_MASK)
		return -EINVAL;

L
Linus Torvalds 已提交
1801 1802 1803
	if (flags & MREMAP_FIXED && new_addr != addr)
		return (unsigned long) -EINVAL;

1804
	vma = find_vma_exact(current->mm, addr, old_len);
1805 1806
	if (!vma)
		return (unsigned long) -EINVAL;
L
Linus Torvalds 已提交
1807

1808
	if (vma->vm_end != vma->vm_start + old_len)
L
Linus Torvalds 已提交
1809 1810
		return (unsigned long) -EFAULT;

1811
	if (vma->vm_flags & VM_MAYSHARE)
L
Linus Torvalds 已提交
1812 1813
		return (unsigned long) -EPERM;

1814
	if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
L
Linus Torvalds 已提交
1815 1816 1817
		return (unsigned long) -ENOMEM;

	/* all checks complete - do it */
1818 1819 1820 1821
	vma->vm_end = vma->vm_start + new_len;
	return vma->vm_start;
}

1822 1823 1824
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
1825 1826 1827 1828 1829 1830 1831
{
	unsigned long ret;

	down_write(&current->mm->mmap_sem);
	ret = do_mremap(addr, old_len, new_len, flags, new_addr);
	up_write(&current->mm->mmap_sem);
	return ret;
L
Linus Torvalds 已提交
1832 1833
}

1834 1835 1836
struct page *follow_page_mask(struct vm_area_struct *vma,
			      unsigned long address, unsigned int flags,
			      unsigned int *page_mask)
L
Linus Torvalds 已提交
1837
{
1838
	*page_mask = 0;
L
Linus Torvalds 已提交
1839 1840 1841
	return NULL;
}

B
Bob Liu 已提交
1842 1843
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		unsigned long pfn, unsigned long size, pgprot_t prot)
L
Linus Torvalds 已提交
1844
{
B
Bob Liu 已提交
1845 1846 1847
	if (addr != (pfn << PAGE_SHIFT))
		return -EINVAL;

1848
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1849
	return 0;
L
Linus Torvalds 已提交
1850
}
1851
EXPORT_SYMBOL(remap_pfn_range);
L
Linus Torvalds 已提交
1852

1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
{
	unsigned long pfn = start >> PAGE_SHIFT;
	unsigned long vm_len = vma->vm_end - vma->vm_start;

	pfn += vma->vm_pgoff;
	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_iomap_memory);

1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
			unsigned long pgoff)
{
	unsigned int size = vma->vm_end - vma->vm_start;

	if (!(vma->vm_flags & VM_USERMAP))
		return -EINVAL;

	vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
	vma->vm_end = vma->vm_start + size;

	return 0;
}
EXPORT_SYMBOL(remap_vmalloc_range);

L
Linus Torvalds 已提交
1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888
unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
	unsigned long len, unsigned long pgoff, unsigned long flags)
{
	return -ENOMEM;
}

void unmap_mapping_range(struct address_space *mapping,
			 loff_t const holebegin, loff_t const holelen,
			 int even_cows)
{
}
1889
EXPORT_SYMBOL(unmap_mapping_range);
L
Linus Torvalds 已提交
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906

/*
 * Check that a process has enough memory to allocate a new virtual
 * mapping. 0 means there is enough memory for the allocation to
 * succeed and -ENOMEM implies there is not.
 *
 * We currently support three overcommit policies, which are set via the
 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
 *
 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 * Additional code 2002 Jul 20 by Robert Love.
 *
 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 *
 * Note this is a helper function intended to be used by LSMs which
 * wish to use this logic.
 */
1907
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
L
Linus Torvalds 已提交
1908
{
1909
	unsigned long free, allowed, reserve;
L
Linus Torvalds 已提交
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919

	vm_acct_memory(pages);

	/*
	 * Sometimes we want to use more memory than we have
	 */
	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
		return 0;

	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1920 1921 1922 1923 1924 1925 1926 1927 1928 1929
		free = global_page_state(NR_FREE_PAGES);
		free += global_page_state(NR_FILE_PAGES);

		/*
		 * shmem pages shouldn't be counted as free in this
		 * case, they can't be purged, only swapped out, and
		 * that won't affect the overall amount of available
		 * memory in the system.
		 */
		free -= global_page_state(NR_SHMEM);
L
Linus Torvalds 已提交
1930

1931
		free += get_nr_swap_pages();
L
Linus Torvalds 已提交
1932 1933 1934 1935 1936 1937 1938

		/*
		 * Any slabs which are created with the
		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
		 * which are reclaimable, under pressure.  The dentry
		 * cache and most inode caches should fall into this
		 */
1939
		free += global_page_state(NR_SLAB_RECLAIMABLE);
L
Linus Torvalds 已提交
1940

1941 1942 1943
		/*
		 * Leave reserved pages. The pages are not for anonymous pages.
		 */
1944
		if (free <= totalreserve_pages)
1945 1946
			goto error;
		else
1947
			free -= totalreserve_pages;
1948 1949

		/*
1950
		 * Reserve some for root
1951
		 */
L
Linus Torvalds 已提交
1952
		if (!cap_sys_admin)
1953
			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
L
Linus Torvalds 已提交
1954 1955 1956

		if (free > pages)
			return 0;
1957 1958

		goto error;
L
Linus Torvalds 已提交
1959 1960
	}

1961
	allowed = vm_commit_limit();
L
Linus Torvalds 已提交
1962
	/*
1963
	 * Reserve some 3% for root
L
Linus Torvalds 已提交
1964 1965
	 */
	if (!cap_sys_admin)
1966
		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
L
Linus Torvalds 已提交
1967

1968 1969 1970 1971 1972 1973 1974
	/*
	 * Don't let a single process grow so big a user can't recover
	 */
	if (mm) {
		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
		allowed -= min(mm->total_vm / 32, reserve);
	}
L
Linus Torvalds 已提交
1975

1976
	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
L
Linus Torvalds 已提交
1977
		return 0;
1978

1979
error:
L
Linus Torvalds 已提交
1980 1981 1982 1983 1984
	vm_unacct_memory(pages);

	return -ENOMEM;
}

1985
int in_gate_area_no_mm(unsigned long addr)
L
Linus Torvalds 已提交
1986 1987 1988
{
	return 0;
}
1989

N
Nick Piggin 已提交
1990
int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1991 1992
{
	BUG();
N
Nick Piggin 已提交
1993
	return 0;
1994
}
1995
EXPORT_SYMBOL(filemap_fault);
1996

1997 1998 1999 2000 2001 2002
void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	BUG();
}
EXPORT_SYMBOL(filemap_map_pages);

2003 2004 2005 2006 2007 2008 2009 2010
int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
			     unsigned long size, pgoff_t pgoff)
{
	BUG();
	return 0;
}
EXPORT_SYMBOL(generic_file_remap_pages);

2011 2012
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long addr, void *buf, int len, int write)
2013 2014 2015 2016 2017 2018
{
	struct vm_area_struct *vma;

	down_read(&mm->mmap_sem);

	/* the access must start within one of the target process's mappings */
2019 2020
	vma = find_vma(mm, addr);
	if (vma) {
2021 2022 2023 2024 2025
		/* don't overrun this mapping */
		if (addr + len >= vma->vm_end)
			len = vma->vm_end - addr;

		/* only read or write mappings where it is permitted */
2026
		if (write && vma->vm_flags & VM_MAYWRITE)
2027 2028
			copy_to_user_page(vma, NULL, addr,
					 (void *) addr, buf, len);
2029
		else if (!write && vma->vm_flags & VM_MAYREAD)
2030 2031
			copy_from_user_page(vma, NULL, addr,
					    buf, (void *) addr, len);
2032 2033 2034 2035 2036 2037 2038
		else
			len = 0;
	} else {
		len = 0;
	}

	up_read(&mm->mmap_sem);
2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075

	return len;
}

/**
 * @access_remote_vm - access another process' address space
 * @mm:		the mm_struct of the target address space
 * @addr:	start address to access
 * @buf:	source or destination buffer
 * @len:	number of bytes to transfer
 * @write:	whether the access is a write
 *
 * The caller must hold a reference on @mm.
 */
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
		void *buf, int len, int write)
{
	return __access_remote_vm(NULL, mm, addr, buf, len, write);
}

/*
 * Access another process' address space.
 * - source/target buffer must be kernel space
 */
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;

	if (addr + len < addr)
		return 0;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	len = __access_remote_vm(tsk, mm, addr, buf, len, write);

2076 2077 2078
	mmput(mm);
	return len;
}
2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102

/**
 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
 * @inode: The inode to check
 * @size: The current filesize of the inode
 * @newsize: The proposed filesize of the inode
 *
 * Check the shared mappings on an inode on behalf of a shrinking truncate to
 * make sure that that any outstanding VMAs aren't broken and then shrink the
 * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
 * automatically grant mappings that are too large.
 */
int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
				size_t newsize)
{
	struct vm_area_struct *vma;
	struct vm_region *region;
	pgoff_t low, high;
	size_t r_size, r_top;

	low = newsize >> PAGE_SHIFT;
	high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;

	down_write(&nommu_region_sem);
2103
	mutex_lock(&inode->i_mapping->i_mmap_mutex);
2104 2105

	/* search for VMAs that fall within the dead zone */
2106
	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2107 2108 2109
		/* found one - only interested if it's shared out of the page
		 * cache */
		if (vma->vm_flags & VM_SHARED) {
2110
			mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121
			up_write(&nommu_region_sem);
			return -ETXTBSY; /* not quite true, but near enough */
		}
	}

	/* reduce any regions that overlap the dead zone - if in existence,
	 * these will be pointed to by VMAs that don't overlap the dead zone
	 *
	 * we don't check for any regions that start beyond the EOF as there
	 * shouldn't be any
	 */
2122 2123
	vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap,
				  0, ULONG_MAX) {
2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
		if (!(vma->vm_flags & VM_SHARED))
			continue;

		region = vma->vm_region;
		r_size = region->vm_top - region->vm_start;
		r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;

		if (r_top > newsize) {
			region->vm_top -= r_top - newsize;
			if (region->vm_end > region->vm_top)
				region->vm_end = region->vm_top;
		}
	}

2138
	mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2139 2140 2141
	up_write(&nommu_region_sem);
	return 0;
}
2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162

/*
 * Initialise sysctl_user_reserve_kbytes.
 *
 * This is intended to prevent a user from starting a single memory hogging
 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
 * mode.
 *
 * The default value is min(3% of free memory, 128MB)
 * 128MB is enough to recover with sshd/login, bash, and top/kill.
 */
static int __meminit init_user_reserve(void)
{
	unsigned long free_kbytes;

	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);

	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
	return 0;
}
module_init(init_user_reserve)
2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183

/*
 * Initialise sysctl_admin_reserve_kbytes.
 *
 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
 * to log in and kill a memory hogging process.
 *
 * Systems with more than 256MB will reserve 8MB, enough to recover
 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
 * only reserve 3% of free pages by default.
 */
static int __meminit init_admin_reserve(void)
{
	unsigned long free_kbytes;

	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);

	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
	return 0;
}
module_init(init_admin_reserve)