mremap.c 15.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	mm/mremap.c
 *
 *	(C) Copyright 1996 Linus Torvalds
 *
A
Alan Cox 已提交
6
 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
7 8 9 10 11 12
 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
13
#include <linux/ksm.h>
L
Linus Torvalds 已提交
14 15
#include <linux/mman.h>
#include <linux/swap.h>
16
#include <linux/capability.h>
L
Linus Torvalds 已提交
17
#include <linux/fs.h>
18
#include <linux/swapops.h>
L
Linus Torvalds 已提交
19 20 21
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Andrea Arcangeli 已提交
22
#include <linux/mmu_notifier.h>
23
#include <linux/sched/sysctl.h>
P
Paul McQuade 已提交
24
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
25 26 27 28

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

29 30
#include "internal.h"

31
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, addr);
	if (pgd_none_or_clear_bad(pgd))
		return NULL;

	pud = pud_offset(pgd, addr);
	if (pud_none_or_clear_bad(pud))
		return NULL;

	pmd = pmd_offset(pud, addr);
46
	if (pmd_none(*pmd))
L
Linus Torvalds 已提交
47 48
		return NULL;

49
	return pmd;
L
Linus Torvalds 已提交
50 51
}

52 53
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
			    unsigned long addr)
L
Linus Torvalds 已提交
54 55 56
{
	pgd_t *pgd;
	pud_t *pud;
H
Hugh Dickins 已提交
57
	pmd_t *pmd;
L
Linus Torvalds 已提交
58 59 60 61

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
H
Hugh Dickins 已提交
62
		return NULL;
63

L
Linus Torvalds 已提交
64
	pmd = pmd_alloc(mm, pud, addr);
H
Hugh Dickins 已提交
65
	if (!pmd)
H
Hugh Dickins 已提交
66
		return NULL;
67

68
	VM_BUG_ON(pmd_trans_huge(*pmd));
H
Hugh Dickins 已提交
69

70
	return pmd;
L
Linus Torvalds 已提交
71 72
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static pte_t move_soft_dirty_pte(pte_t pte)
{
	/*
	 * Set soft dirty bit so we can notice
	 * in userspace the ptes were moved.
	 */
#ifdef CONFIG_MEM_SOFT_DIRTY
	if (pte_present(pte))
		pte = pte_mksoft_dirty(pte);
	else if (is_swap_pte(pte))
		pte = pte_swp_mksoft_dirty(pte);
#endif
	return pte;
}

88 89 90
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
		unsigned long old_addr, unsigned long old_end,
		struct vm_area_struct *new_vma, pmd_t *new_pmd,
91
		unsigned long new_addr, bool need_rmap_locks)
L
Linus Torvalds 已提交
92 93
{
	struct address_space *mapping = NULL;
94
	struct anon_vma *anon_vma = NULL;
L
Linus Torvalds 已提交
95
	struct mm_struct *mm = vma->vm_mm;
96
	pte_t *old_pte, *new_pte, pte;
H
Hugh Dickins 已提交
97
	spinlock_t *old_ptl, *new_ptl;
L
Linus Torvalds 已提交
98

99
	/*
100
	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
	 * locks to ensure that rmap will always observe either the old or the
	 * new ptes. This is the easiest way to avoid races with
	 * truncate_pagecache(), page migration, etc...
	 *
	 * When need_rmap_locks is false, we use other ways to avoid
	 * such races:
	 *
	 * - During exec() shift_arg_pages(), we use a specially tagged vma
	 *   which rmap call sites look for using is_vma_temporary_stack().
	 *
	 * - During mremap(), new_vma is often known to be placed after vma
	 *   in rmap traversal order. This ensures rmap will always observe
	 *   either the old pte, or the new pte, or both (the page table locks
	 *   serialize access to individual ptes, but only rmap traversal
	 *   order guarantees that we won't miss both the old and new ptes).
	 */
	if (need_rmap_locks) {
		if (vma->vm_file) {
			mapping = vma->vm_file->f_mapping;
120
			i_mmap_lock_write(mapping);
121 122 123
		}
		if (vma->anon_vma) {
			anon_vma = vma->anon_vma;
124
			anon_vma_lock_write(anon_vma);
125
		}
L
Linus Torvalds 已提交
126 127
	}

H
Hugh Dickins 已提交
128 129 130 131
	/*
	 * We don't have to worry about the ordering of src and dst
	 * pte locks because exclusive mmap_sem prevents deadlock.
	 */
H
Hugh Dickins 已提交
132
	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
P
Peter Zijlstra 已提交
133
	new_pte = pte_offset_map(new_pmd, new_addr);
H
Hugh Dickins 已提交
134 135
	new_ptl = pte_lockptr(mm, new_pmd);
	if (new_ptl != old_ptl)
I
Ingo Molnar 已提交
136
		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
137
	arch_enter_lazy_mmu_mode();
138 139 140 141 142

	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
				   new_pte++, new_addr += PAGE_SIZE) {
		if (pte_none(*old_pte))
			continue;
143
		pte = ptep_get_and_clear(mm, old_addr, old_pte);
144
		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
145 146
		pte = move_soft_dirty_pte(pte);
		set_pte_at(mm, new_addr, new_pte, pte);
L
Linus Torvalds 已提交
147
	}
148

149
	arch_leave_lazy_mmu_mode();
H
Hugh Dickins 已提交
150 151
	if (new_ptl != old_ptl)
		spin_unlock(new_ptl);
P
Peter Zijlstra 已提交
152
	pte_unmap(new_pte - 1);
H
Hugh Dickins 已提交
153
	pte_unmap_unlock(old_pte - 1, old_ptl);
154
	if (anon_vma)
155
		anon_vma_unlock_write(anon_vma);
L
Linus Torvalds 已提交
156
	if (mapping)
157
		i_mmap_unlock_write(mapping);
L
Linus Torvalds 已提交
158 159
}

160 161
#define LATENCY_LIMIT	(64 * PAGE_SIZE)

162
unsigned long move_page_tables(struct vm_area_struct *vma,
L
Linus Torvalds 已提交
163
		unsigned long old_addr, struct vm_area_struct *new_vma,
164 165
		unsigned long new_addr, unsigned long len,
		bool need_rmap_locks)
L
Linus Torvalds 已提交
166
{
167 168
	unsigned long extent, next, old_end;
	pmd_t *old_pmd, *new_pmd;
169
	bool need_flush = false;
170 171
	unsigned long mmun_start;	/* For mmu_notifiers */
	unsigned long mmun_end;		/* For mmu_notifiers */
L
Linus Torvalds 已提交
172

173 174
	old_end = old_addr + len;
	flush_cache_range(vma, old_addr, old_end);
L
Linus Torvalds 已提交
175

176 177 178
	mmun_start = old_addr;
	mmun_end   = old_end;
	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
179

180
	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
L
Linus Torvalds 已提交
181
		cond_resched();
182
		next = (old_addr + PMD_SIZE) & PMD_MASK;
183
		/* even if next overflowed, extent below will be ok */
184
		extent = next - old_addr;
185 186
		if (extent > old_end - old_addr)
			extent = old_end - old_addr;
187 188 189
		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
		if (!old_pmd)
			continue;
190
		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
191 192
		if (!new_pmd)
			break;
193 194
		if (pmd_trans_huge(*old_pmd)) {
			int err = 0;
195
			if (extent == HPAGE_PMD_SIZE) {
196 197
				VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
					      vma);
198 199 200
				/* See comment in move_ptes() */
				if (need_rmap_locks)
					anon_vma_lock_write(vma->anon_vma);
201 202 203
				err = move_huge_pmd(vma, new_vma, old_addr,
						    new_addr, old_end,
						    old_pmd, new_pmd);
204 205 206
				if (need_rmap_locks)
					anon_vma_unlock_write(vma->anon_vma);
			}
207 208 209 210
			if (err > 0) {
				need_flush = true;
				continue;
			} else if (!err) {
211
				split_huge_page_pmd(vma, old_addr, old_pmd);
212 213 214 215 216 217
			}
			VM_BUG_ON(pmd_trans_huge(*old_pmd));
		}
		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
						      new_pmd, new_addr))
			break;
218 219 220 221 222 223
		next = (new_addr + PMD_SIZE) & PMD_MASK;
		if (extent > next - new_addr)
			extent = next - new_addr;
		if (extent > LATENCY_LIMIT)
			extent = LATENCY_LIMIT;
		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
224
			  new_vma, new_pmd, new_addr, need_rmap_locks);
225
		need_flush = true;
L
Linus Torvalds 已提交
226
	}
227 228 229
	if (likely(need_flush))
		flush_tlb_range(vma, old_end-len, old_addr);

230
	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
231 232

	return len + old_addr - old_end;	/* how much done */
L
Linus Torvalds 已提交
233 234 235 236
}

static unsigned long move_vma(struct vm_area_struct *vma,
		unsigned long old_addr, unsigned long old_len,
237
		unsigned long new_len, unsigned long new_addr, bool *locked)
L
Linus Torvalds 已提交
238 239 240 241 242 243 244
{
	struct mm_struct *mm = vma->vm_mm;
	struct vm_area_struct *new_vma;
	unsigned long vm_flags = vma->vm_flags;
	unsigned long new_pgoff;
	unsigned long moved_len;
	unsigned long excess = 0;
245
	unsigned long hiwater_vm;
L
Linus Torvalds 已提交
246
	int split = 0;
247
	int err;
248
	bool need_rmap_locks;
L
Linus Torvalds 已提交
249 250 251 252 253 254 255 256

	/*
	 * We'd prefer to avoid failure later on in do_munmap:
	 * which may split one vma into three before unmapping.
	 */
	if (mm->map_count >= sysctl_max_map_count - 3)
		return -ENOMEM;

257 258 259 260 261 262 263
	/*
	 * Advise KSM to break any KSM pages in the area to be moved:
	 * it would be confusing if they were to turn up at the new
	 * location, where they happen to coincide with different KSM
	 * pages recently unmapped.  But leave vma->vm_flags as it was,
	 * so KSM can come around to merge on vma and new_vma afterwards.
	 */
264 265 266 267
	err = ksm_madvise(vma, old_addr, old_addr + old_len,
						MADV_UNMERGEABLE, &vm_flags);
	if (err)
		return err;
268

L
Linus Torvalds 已提交
269
	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
270 271
	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
			   &need_rmap_locks);
L
Linus Torvalds 已提交
272 273 274
	if (!new_vma)
		return -ENOMEM;

275 276
	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
				     need_rmap_locks);
L
Linus Torvalds 已提交
277 278 279 280 281 282
	if (moved_len < old_len) {
		/*
		 * On error, move entries back from new area to old,
		 * which will succeed since page tables still there,
		 * and then proceed to unmap new area instead of old.
		 */
283 284
		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
				 true);
L
Linus Torvalds 已提交
285 286 287 288
		vma = new_vma;
		old_len = new_len;
		old_addr = new_addr;
		new_addr = -ENOMEM;
A
Al Viro 已提交
289 290 291 292 293 294 295 296
	} else if (vma->vm_file && vma->vm_file->f_op->mremap) {
		err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
		if (err < 0) {
			move_page_tables(new_vma, new_addr, vma, old_addr,
					 moved_len, true);
			return err;
		}
	}
L
Linus Torvalds 已提交
297 298 299 300 301 302 303 304 305 306

	/* Conceal VM_ACCOUNT so old reservation is not undone */
	if (vm_flags & VM_ACCOUNT) {
		vma->vm_flags &= ~VM_ACCOUNT;
		excess = vma->vm_end - vma->vm_start - old_len;
		if (old_addr > vma->vm_start &&
		    old_addr + old_len < vma->vm_end)
			split = 1;
	}

K
Kirill Korotaev 已提交
307
	/*
308 309 310 311 312 313 314
	 * If we failed to move page tables we still do total_vm increment
	 * since do_munmap() will decrement it by old_len == new_len.
	 *
	 * Since total_vm is about to be raised artificially high for a
	 * moment, we need to restore high watermark afterwards: if stats
	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
	 * If this were a serious issue, we'd add a flag to do_munmap().
K
Kirill Korotaev 已提交
315
	 */
316
	hiwater_vm = mm->hiwater_vm;
317
	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
K
Kirill Korotaev 已提交
318

L
Linus Torvalds 已提交
319 320 321 322 323
	if (do_munmap(mm, old_addr, old_len) < 0) {
		/* OOM: unable to split vma, just get accounts right */
		vm_unacct_memory(excess >> PAGE_SHIFT);
		excess = 0;
	}
324
	mm->hiwater_vm = hiwater_vm;
L
Linus Torvalds 已提交
325 326 327 328 329 330 331 332 333 334

	/* Restore VM_ACCOUNT if one or two pieces of vma left */
	if (excess) {
		vma->vm_flags |= VM_ACCOUNT;
		if (split)
			vma->vm_next->vm_flags |= VM_ACCOUNT;
	}

	if (vm_flags & VM_LOCKED) {
		mm->locked_vm += new_len >> PAGE_SHIFT;
335
		*locked = true;
L
Linus Torvalds 已提交
336 337 338 339 340
	}

	return new_addr;
}

A
Al Viro 已提交
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
static struct vm_area_struct *vma_to_resize(unsigned long addr,
	unsigned long old_len, unsigned long new_len, unsigned long *p)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = find_vma(mm, addr);

	if (!vma || vma->vm_start > addr)
		goto Efault;

	if (is_vm_hugetlb_page(vma))
		goto Einval;

	/* We can't remap across vm area boundaries */
	if (old_len > vma->vm_end - addr)
		goto Efault;

357 358 359 360 361
	/* Need to be careful about a growing mapping */
	if (new_len > old_len) {
		unsigned long pgoff;

		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
A
Al Viro 已提交
362
			goto Efault;
363 364 365 366
		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
		pgoff += vma->vm_pgoff;
		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
			goto Einval;
A
Al Viro 已提交
367 368 369 370 371
	}

	if (vma->vm_flags & VM_LOCKED) {
		unsigned long locked, lock_limit;
		locked = mm->locked_vm << PAGE_SHIFT;
J
Jiri Slaby 已提交
372
		lock_limit = rlimit(RLIMIT_MEMLOCK);
A
Al Viro 已提交
373 374 375 376 377 378 379 380 381 382
		locked += new_len - old_len;
		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
			goto Eagain;
	}

	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
		goto Enomem;

	if (vma->vm_flags & VM_ACCOUNT) {
		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
383
		if (security_vm_enough_memory_mm(mm, charged))
A
Al Viro 已提交
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
			goto Efault;
		*p = charged;
	}

	return vma;

Efault:	/* very odd choice for most of the cases, but... */
	return ERR_PTR(-EFAULT);
Einval:
	return ERR_PTR(-EINVAL);
Enomem:
	return ERR_PTR(-ENOMEM);
Eagain:
	return ERR_PTR(-EAGAIN);
}

400 401
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
		unsigned long new_addr, unsigned long new_len, bool *locked)
A
Al Viro 已提交
402 403 404 405 406
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
A
Al Viro 已提交
407
	unsigned long map_flags;
A
Al Viro 已提交
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

	if (new_addr & ~PAGE_MASK)
		goto out;

	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
		goto out;

	/* Check if the location we're moving into overlaps the
	 * old location at all, and fail if it does.
	 */
	if ((new_addr <= addr) && (new_addr+new_len) > addr)
		goto out;

	if ((addr <= new_addr) && (addr+old_len) > new_addr)
		goto out;

	ret = do_munmap(mm, new_addr, new_len);
	if (ret)
		goto out;

	if (old_len >= new_len) {
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
		if (ret && old_len != new_len)
			goto out;
		old_len = new_len;
	}

	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out;
	}

A
Al Viro 已提交
441 442 443
	map_flags = MAP_FIXED;
	if (vma->vm_flags & VM_MAYSHARE)
		map_flags |= MAP_SHARED;
444

A
Al Viro 已提交
445 446 447
	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
				((addr - vma->vm_start) >> PAGE_SHIFT),
				map_flags);
A
Al Viro 已提交
448
	if (ret & ~PAGE_MASK)
A
Al Viro 已提交
449 450
		goto out1;

451
	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
A
Al Viro 已提交
452 453 454 455
	if (!(ret & ~PAGE_MASK))
		goto out;
out1:
	vm_unacct_memory(charged);
A
Al Viro 已提交
456 457 458 459 460

out:
	return ret;
}

A
Al Viro 已提交
461 462
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
A
Al Viro 已提交
463
	unsigned long end = vma->vm_end + delta;
464
	if (end < vma->vm_end) /* overflow */
A
Al Viro 已提交
465
		return 0;
466
	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
A
Al Viro 已提交
467 468 469
		return 0;
	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
			      0, MAP_FIXED) & ~PAGE_MASK)
A
Al Viro 已提交
470 471 472 473
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
474 475 476 477 478 479 480
/*
 * Expand (or shrink) an existing mapping, potentially moving it at the
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
 *
 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
 * This option implies MREMAP_MAYMOVE.
 */
A
Al Viro 已提交
481 482 483
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
L
Linus Torvalds 已提交
484
{
H
Hugh Dickins 已提交
485
	struct mm_struct *mm = current->mm;
L
Linus Torvalds 已提交
486 487 488
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
489
	bool locked = false;
L
Linus Torvalds 已提交
490 491

	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
492 493 494 495
		return ret;

	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
		return ret;
L
Linus Torvalds 已提交
496 497

	if (addr & ~PAGE_MASK)
498
		return ret;
L
Linus Torvalds 已提交
499 500 501 502 503 504 505 506 507 508

	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);

	/*
	 * We allow a zero old-len as a special case
	 * for DOS-emu "duplicate shm area" thing. But
	 * a zero new-len is nonsensical.
	 */
	if (!new_len)
509 510 511
		return ret;

	down_write(&current->mm->mmap_sem);
L
Linus Torvalds 已提交
512 513

	if (flags & MREMAP_FIXED) {
514 515
		ret = mremap_to(addr, old_len, new_addr, new_len,
				&locked);
A
Al Viro 已提交
516
		goto out;
L
Linus Torvalds 已提交
517 518 519 520 521 522 523 524
	}

	/*
	 * Always allow a shrinking remap: that just unmaps
	 * the unnecessary pages..
	 * do_munmap does all the needed commit accounting
	 */
	if (old_len >= new_len) {
H
Hugh Dickins 已提交
525
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
L
Linus Torvalds 已提交
526 527 528
		if (ret && old_len != new_len)
			goto out;
		ret = addr;
A
Al Viro 已提交
529
		goto out;
L
Linus Torvalds 已提交
530 531 532
	}

	/*
A
Al Viro 已提交
533
	 * Ok, we need to grow..
L
Linus Torvalds 已提交
534
	 */
A
Al Viro 已提交
535 536 537
	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
L
Linus Torvalds 已提交
538
		goto out;
A
akpm@osdl.org 已提交
539
	}
L
Linus Torvalds 已提交
540 541 542

	/* old_len exactly to the end of the area..
	 */
A
Al Viro 已提交
543
	if (old_len == vma->vm_end - addr) {
L
Linus Torvalds 已提交
544
		/* can we just expand the current mapping? */
A
Al Viro 已提交
545
		if (vma_expandable(vma, new_len - old_len)) {
L
Linus Torvalds 已提交
546 547
			int pages = (new_len - old_len) >> PAGE_SHIFT;

548 549 550 551 552
			if (vma_adjust(vma, vma->vm_start, addr + new_len,
				       vma->vm_pgoff, NULL)) {
				ret = -ENOMEM;
				goto out;
			}
L
Linus Torvalds 已提交
553

H
Hugh Dickins 已提交
554
			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
L
Linus Torvalds 已提交
555
			if (vma->vm_flags & VM_LOCKED) {
H
Hugh Dickins 已提交
556
				mm->locked_vm += pages;
557 558
				locked = true;
				new_addr = addr;
L
Linus Torvalds 已提交
559 560 561 562 563 564 565 566 567 568 569 570
			}
			ret = addr;
			goto out;
		}
	}

	/*
	 * We weren't able to just expand or shrink the area,
	 * we need to create a new one and move it..
	 */
	ret = -ENOMEM;
	if (flags & MREMAP_MAYMOVE) {
A
Al Viro 已提交
571 572 573 574 575
		unsigned long map_flags = 0;
		if (vma->vm_flags & VM_MAYSHARE)
			map_flags |= MAP_SHARED;

		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
576 577 578
					vma->vm_pgoff +
					((addr - vma->vm_start) >> PAGE_SHIFT),
					map_flags);
A
Al Viro 已提交
579 580 581
		if (new_addr & ~PAGE_MASK) {
			ret = new_addr;
			goto out;
L
Linus Torvalds 已提交
582
		}
A
Al Viro 已提交
583

584
		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
L
Linus Torvalds 已提交
585 586 587 588 589
	}
out:
	if (ret & ~PAGE_MASK)
		vm_unacct_memory(charged);
	up_write(&current->mm->mmap_sem);
590 591
	if (locked && new_len > old_len)
		mm_populate(new_addr + old_len, new_len - old_len);
L
Linus Torvalds 已提交
592 593
	return ret;
}