mremap.c 15.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	mm/mremap.c
 *
 *	(C) Copyright 1996 Linus Torvalds
 *
A
Alan Cox 已提交
6
 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
7 8 9 10 11 12
 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
13
#include <linux/ksm.h>
L
Linus Torvalds 已提交
14 15
#include <linux/mman.h>
#include <linux/swap.h>
16
#include <linux/capability.h>
L
Linus Torvalds 已提交
17
#include <linux/fs.h>
18
#include <linux/swapops.h>
L
Linus Torvalds 已提交
19 20 21
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Andrea Arcangeli 已提交
22
#include <linux/mmu_notifier.h>
23
#include <linux/sched/sysctl.h>
P
Paul McQuade 已提交
24
#include <linux/uaccess.h>
L
Laurent Dufour 已提交
25
#include <linux/mm-arch-hooks.h>
L
Linus Torvalds 已提交
26 27 28 29

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

30 31
#include "internal.h"

32
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, addr);
	if (pgd_none_or_clear_bad(pgd))
		return NULL;

	pud = pud_offset(pgd, addr);
	if (pud_none_or_clear_bad(pud))
		return NULL;

	pmd = pmd_offset(pud, addr);
47
	if (pmd_none(*pmd))
L
Linus Torvalds 已提交
48 49
		return NULL;

50
	return pmd;
L
Linus Torvalds 已提交
51 52
}

53 54
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
			    unsigned long addr)
L
Linus Torvalds 已提交
55 56 57
{
	pgd_t *pgd;
	pud_t *pud;
H
Hugh Dickins 已提交
58
	pmd_t *pmd;
L
Linus Torvalds 已提交
59 60 61 62

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
H
Hugh Dickins 已提交
63
		return NULL;
64

L
Linus Torvalds 已提交
65
	pmd = pmd_alloc(mm, pud, addr);
H
Hugh Dickins 已提交
66
	if (!pmd)
H
Hugh Dickins 已提交
67
		return NULL;
68

69
	VM_BUG_ON(pmd_trans_huge(*pmd));
H
Hugh Dickins 已提交
70

71
	return pmd;
L
Linus Torvalds 已提交
72 73
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
static pte_t move_soft_dirty_pte(pte_t pte)
{
	/*
	 * Set soft dirty bit so we can notice
	 * in userspace the ptes were moved.
	 */
#ifdef CONFIG_MEM_SOFT_DIRTY
	if (pte_present(pte))
		pte = pte_mksoft_dirty(pte);
	else if (is_swap_pte(pte))
		pte = pte_swp_mksoft_dirty(pte);
#endif
	return pte;
}

89 90 91
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
		unsigned long old_addr, unsigned long old_end,
		struct vm_area_struct *new_vma, pmd_t *new_pmd,
92
		unsigned long new_addr, bool need_rmap_locks)
L
Linus Torvalds 已提交
93 94
{
	struct address_space *mapping = NULL;
95
	struct anon_vma *anon_vma = NULL;
L
Linus Torvalds 已提交
96
	struct mm_struct *mm = vma->vm_mm;
97
	pte_t *old_pte, *new_pte, pte;
H
Hugh Dickins 已提交
98
	spinlock_t *old_ptl, *new_ptl;
L
Linus Torvalds 已提交
99

100
	/*
101
	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
	 * locks to ensure that rmap will always observe either the old or the
	 * new ptes. This is the easiest way to avoid races with
	 * truncate_pagecache(), page migration, etc...
	 *
	 * When need_rmap_locks is false, we use other ways to avoid
	 * such races:
	 *
	 * - During exec() shift_arg_pages(), we use a specially tagged vma
	 *   which rmap call sites look for using is_vma_temporary_stack().
	 *
	 * - During mremap(), new_vma is often known to be placed after vma
	 *   in rmap traversal order. This ensures rmap will always observe
	 *   either the old pte, or the new pte, or both (the page table locks
	 *   serialize access to individual ptes, but only rmap traversal
	 *   order guarantees that we won't miss both the old and new ptes).
	 */
	if (need_rmap_locks) {
		if (vma->vm_file) {
			mapping = vma->vm_file->f_mapping;
121
			i_mmap_lock_write(mapping);
122 123 124
		}
		if (vma->anon_vma) {
			anon_vma = vma->anon_vma;
125
			anon_vma_lock_write(anon_vma);
126
		}
L
Linus Torvalds 已提交
127 128
	}

H
Hugh Dickins 已提交
129 130 131 132
	/*
	 * We don't have to worry about the ordering of src and dst
	 * pte locks because exclusive mmap_sem prevents deadlock.
	 */
H
Hugh Dickins 已提交
133
	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
P
Peter Zijlstra 已提交
134
	new_pte = pte_offset_map(new_pmd, new_addr);
H
Hugh Dickins 已提交
135 136
	new_ptl = pte_lockptr(mm, new_pmd);
	if (new_ptl != old_ptl)
I
Ingo Molnar 已提交
137
		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
138
	arch_enter_lazy_mmu_mode();
139 140 141 142 143

	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
				   new_pte++, new_addr += PAGE_SIZE) {
		if (pte_none(*old_pte))
			continue;
144
		pte = ptep_get_and_clear(mm, old_addr, old_pte);
145
		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
146 147
		pte = move_soft_dirty_pte(pte);
		set_pte_at(mm, new_addr, new_pte, pte);
L
Linus Torvalds 已提交
148
	}
149

150
	arch_leave_lazy_mmu_mode();
H
Hugh Dickins 已提交
151 152
	if (new_ptl != old_ptl)
		spin_unlock(new_ptl);
P
Peter Zijlstra 已提交
153
	pte_unmap(new_pte - 1);
H
Hugh Dickins 已提交
154
	pte_unmap_unlock(old_pte - 1, old_ptl);
155
	if (anon_vma)
156
		anon_vma_unlock_write(anon_vma);
L
Linus Torvalds 已提交
157
	if (mapping)
158
		i_mmap_unlock_write(mapping);
L
Linus Torvalds 已提交
159 160
}

161 162
#define LATENCY_LIMIT	(64 * PAGE_SIZE)

163
unsigned long move_page_tables(struct vm_area_struct *vma,
L
Linus Torvalds 已提交
164
		unsigned long old_addr, struct vm_area_struct *new_vma,
165 166
		unsigned long new_addr, unsigned long len,
		bool need_rmap_locks)
L
Linus Torvalds 已提交
167
{
168 169
	unsigned long extent, next, old_end;
	pmd_t *old_pmd, *new_pmd;
170
	bool need_flush = false;
171 172
	unsigned long mmun_start;	/* For mmu_notifiers */
	unsigned long mmun_end;		/* For mmu_notifiers */
L
Linus Torvalds 已提交
173

174 175
	old_end = old_addr + len;
	flush_cache_range(vma, old_addr, old_end);
L
Linus Torvalds 已提交
176

177 178 179
	mmun_start = old_addr;
	mmun_end   = old_end;
	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
180

181
	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
L
Linus Torvalds 已提交
182
		cond_resched();
183
		next = (old_addr + PMD_SIZE) & PMD_MASK;
184
		/* even if next overflowed, extent below will be ok */
185
		extent = next - old_addr;
186 187
		if (extent > old_end - old_addr)
			extent = old_end - old_addr;
188 189 190
		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
		if (!old_pmd)
			continue;
191
		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
192 193
		if (!new_pmd)
			break;
194 195
		if (pmd_trans_huge(*old_pmd)) {
			int err = 0;
196
			if (extent == HPAGE_PMD_SIZE) {
197 198
				VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
					      vma);
199 200 201
				/* See comment in move_ptes() */
				if (need_rmap_locks)
					anon_vma_lock_write(vma->anon_vma);
202 203 204
				err = move_huge_pmd(vma, new_vma, old_addr,
						    new_addr, old_end,
						    old_pmd, new_pmd);
205 206 207
				if (need_rmap_locks)
					anon_vma_unlock_write(vma->anon_vma);
			}
208 209 210 211
			if (err > 0) {
				need_flush = true;
				continue;
			} else if (!err) {
212
				split_huge_page_pmd(vma, old_addr, old_pmd);
213 214 215 216 217 218
			}
			VM_BUG_ON(pmd_trans_huge(*old_pmd));
		}
		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
						      new_pmd, new_addr))
			break;
219 220 221 222 223 224
		next = (new_addr + PMD_SIZE) & PMD_MASK;
		if (extent > next - new_addr)
			extent = next - new_addr;
		if (extent > LATENCY_LIMIT)
			extent = LATENCY_LIMIT;
		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
225
			  new_vma, new_pmd, new_addr, need_rmap_locks);
226
		need_flush = true;
L
Linus Torvalds 已提交
227
	}
228 229 230
	if (likely(need_flush))
		flush_tlb_range(vma, old_end-len, old_addr);

231
	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
232 233

	return len + old_addr - old_end;	/* how much done */
L
Linus Torvalds 已提交
234 235 236 237
}

static unsigned long move_vma(struct vm_area_struct *vma,
		unsigned long old_addr, unsigned long old_len,
238
		unsigned long new_len, unsigned long new_addr, bool *locked)
L
Linus Torvalds 已提交
239 240 241 242 243 244 245
{
	struct mm_struct *mm = vma->vm_mm;
	struct vm_area_struct *new_vma;
	unsigned long vm_flags = vma->vm_flags;
	unsigned long new_pgoff;
	unsigned long moved_len;
	unsigned long excess = 0;
246
	unsigned long hiwater_vm;
L
Linus Torvalds 已提交
247
	int split = 0;
248
	int err;
249
	bool need_rmap_locks;
L
Linus Torvalds 已提交
250 251 252 253 254 255 256 257

	/*
	 * We'd prefer to avoid failure later on in do_munmap:
	 * which may split one vma into three before unmapping.
	 */
	if (mm->map_count >= sysctl_max_map_count - 3)
		return -ENOMEM;

258 259 260 261 262 263 264
	/*
	 * Advise KSM to break any KSM pages in the area to be moved:
	 * it would be confusing if they were to turn up at the new
	 * location, where they happen to coincide with different KSM
	 * pages recently unmapped.  But leave vma->vm_flags as it was,
	 * so KSM can come around to merge on vma and new_vma afterwards.
	 */
265 266 267 268
	err = ksm_madvise(vma, old_addr, old_addr + old_len,
						MADV_UNMERGEABLE, &vm_flags);
	if (err)
		return err;
269

L
Linus Torvalds 已提交
270
	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
271 272
	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
			   &need_rmap_locks);
L
Linus Torvalds 已提交
273 274 275
	if (!new_vma)
		return -ENOMEM;

276 277
	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
				     need_rmap_locks);
L
Linus Torvalds 已提交
278
	if (moved_len < old_len) {
279
		err = -ENOMEM;
280 281
	} else if (vma->vm_ops && vma->vm_ops->mremap) {
		err = vma->vm_ops->mremap(new_vma);
282 283 284
	}

	if (unlikely(err)) {
L
Linus Torvalds 已提交
285 286 287 288 289
		/*
		 * On error, move entries back from new area to old,
		 * which will succeed since page tables still there,
		 * and then proceed to unmap new area instead of old.
		 */
290 291
		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
				 true);
L
Linus Torvalds 已提交
292 293 294
		vma = new_vma;
		old_len = new_len;
		old_addr = new_addr;
295
		new_addr = err;
L
Laurent Dufour 已提交
296 297 298
	} else {
		arch_remap(mm, old_addr, old_addr + old_len,
			   new_addr, new_addr + new_len);
A
Al Viro 已提交
299
	}
L
Linus Torvalds 已提交
300 301 302 303 304 305 306 307 308 309

	/* Conceal VM_ACCOUNT so old reservation is not undone */
	if (vm_flags & VM_ACCOUNT) {
		vma->vm_flags &= ~VM_ACCOUNT;
		excess = vma->vm_end - vma->vm_start - old_len;
		if (old_addr > vma->vm_start &&
		    old_addr + old_len < vma->vm_end)
			split = 1;
	}

K
Kirill Korotaev 已提交
310
	/*
311 312 313 314 315 316 317
	 * If we failed to move page tables we still do total_vm increment
	 * since do_munmap() will decrement it by old_len == new_len.
	 *
	 * Since total_vm is about to be raised artificially high for a
	 * moment, we need to restore high watermark afterwards: if stats
	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
	 * If this were a serious issue, we'd add a flag to do_munmap().
K
Kirill Korotaev 已提交
318
	 */
319
	hiwater_vm = mm->hiwater_vm;
320
	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
K
Kirill Korotaev 已提交
321

L
Linus Torvalds 已提交
322 323 324 325 326
	if (do_munmap(mm, old_addr, old_len) < 0) {
		/* OOM: unable to split vma, just get accounts right */
		vm_unacct_memory(excess >> PAGE_SHIFT);
		excess = 0;
	}
327
	mm->hiwater_vm = hiwater_vm;
L
Linus Torvalds 已提交
328 329 330 331 332 333 334 335 336 337

	/* Restore VM_ACCOUNT if one or two pieces of vma left */
	if (excess) {
		vma->vm_flags |= VM_ACCOUNT;
		if (split)
			vma->vm_next->vm_flags |= VM_ACCOUNT;
	}

	if (vm_flags & VM_LOCKED) {
		mm->locked_vm += new_len >> PAGE_SHIFT;
338
		*locked = true;
L
Linus Torvalds 已提交
339 340 341 342 343
	}

	return new_addr;
}

A
Al Viro 已提交
344 345 346 347 348
static struct vm_area_struct *vma_to_resize(unsigned long addr,
	unsigned long old_len, unsigned long new_len, unsigned long *p)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = find_vma(mm, addr);
349
	unsigned long pgoff;
A
Al Viro 已提交
350 351

	if (!vma || vma->vm_start > addr)
352
		return ERR_PTR(-EFAULT);
A
Al Viro 已提交
353 354

	if (is_vm_hugetlb_page(vma))
355
		return ERR_PTR(-EINVAL);
A
Al Viro 已提交
356 357 358

	/* We can't remap across vm area boundaries */
	if (old_len > vma->vm_end - addr)
359
		return ERR_PTR(-EFAULT);
A
Al Viro 已提交
360

361 362 363
	if (new_len == old_len)
		return vma;

364
	/* Need to be careful about a growing mapping */
365 366 367 368 369 370 371
	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
	pgoff += vma->vm_pgoff;
	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
		return ERR_PTR(-EINVAL);

	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
		return ERR_PTR(-EFAULT);
A
Al Viro 已提交
372 373 374 375

	if (vma->vm_flags & VM_LOCKED) {
		unsigned long locked, lock_limit;
		locked = mm->locked_vm << PAGE_SHIFT;
J
Jiri Slaby 已提交
376
		lock_limit = rlimit(RLIMIT_MEMLOCK);
A
Al Viro 已提交
377 378
		locked += new_len - old_len;
		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
379
			return ERR_PTR(-EAGAIN);
A
Al Viro 已提交
380 381 382
	}

	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
383
		return ERR_PTR(-ENOMEM);
A
Al Viro 已提交
384 385 386

	if (vma->vm_flags & VM_ACCOUNT) {
		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
387
		if (security_vm_enough_memory_mm(mm, charged))
388
			return ERR_PTR(-ENOMEM);
A
Al Viro 已提交
389 390 391 392 393 394
		*p = charged;
	}

	return vma;
}

395 396
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
		unsigned long new_addr, unsigned long new_len, bool *locked)
A
Al Viro 已提交
397 398 399 400 401
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
A
Al Viro 已提交
402
	unsigned long map_flags;
A
Al Viro 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	if (new_addr & ~PAGE_MASK)
		goto out;

	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
		goto out;

	/* Check if the location we're moving into overlaps the
	 * old location at all, and fail if it does.
	 */
	if ((new_addr <= addr) && (new_addr+new_len) > addr)
		goto out;

	if ((addr <= new_addr) && (addr+old_len) > new_addr)
		goto out;

	ret = do_munmap(mm, new_addr, new_len);
	if (ret)
		goto out;

	if (old_len >= new_len) {
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
		if (ret && old_len != new_len)
			goto out;
		old_len = new_len;
	}

	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out;
	}

A
Al Viro 已提交
436 437 438
	map_flags = MAP_FIXED;
	if (vma->vm_flags & VM_MAYSHARE)
		map_flags |= MAP_SHARED;
439

A
Al Viro 已提交
440 441 442
	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
				((addr - vma->vm_start) >> PAGE_SHIFT),
				map_flags);
A
Al Viro 已提交
443
	if (ret & ~PAGE_MASK)
A
Al Viro 已提交
444 445
		goto out1;

446
	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
A
Al Viro 已提交
447 448 449 450
	if (!(ret & ~PAGE_MASK))
		goto out;
out1:
	vm_unacct_memory(charged);
A
Al Viro 已提交
451 452 453 454 455

out:
	return ret;
}

A
Al Viro 已提交
456 457
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
A
Al Viro 已提交
458
	unsigned long end = vma->vm_end + delta;
459
	if (end < vma->vm_end) /* overflow */
A
Al Viro 已提交
460
		return 0;
461
	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
A
Al Viro 已提交
462 463 464
		return 0;
	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
			      0, MAP_FIXED) & ~PAGE_MASK)
A
Al Viro 已提交
465 466 467 468
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
469 470 471 472 473 474 475
/*
 * Expand (or shrink) an existing mapping, potentially moving it at the
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
 *
 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
 * This option implies MREMAP_MAYMOVE.
 */
A
Al Viro 已提交
476 477 478
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
L
Linus Torvalds 已提交
479
{
H
Hugh Dickins 已提交
480
	struct mm_struct *mm = current->mm;
L
Linus Torvalds 已提交
481 482 483
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
484
	bool locked = false;
L
Linus Torvalds 已提交
485 486

	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
487 488 489 490
		return ret;

	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
		return ret;
L
Linus Torvalds 已提交
491 492

	if (addr & ~PAGE_MASK)
493
		return ret;
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502 503

	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);

	/*
	 * We allow a zero old-len as a special case
	 * for DOS-emu "duplicate shm area" thing. But
	 * a zero new-len is nonsensical.
	 */
	if (!new_len)
504 505 506
		return ret;

	down_write(&current->mm->mmap_sem);
L
Linus Torvalds 已提交
507 508

	if (flags & MREMAP_FIXED) {
509 510
		ret = mremap_to(addr, old_len, new_addr, new_len,
				&locked);
A
Al Viro 已提交
511
		goto out;
L
Linus Torvalds 已提交
512 513 514 515 516 517 518 519
	}

	/*
	 * Always allow a shrinking remap: that just unmaps
	 * the unnecessary pages..
	 * do_munmap does all the needed commit accounting
	 */
	if (old_len >= new_len) {
H
Hugh Dickins 已提交
520
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
L
Linus Torvalds 已提交
521 522 523
		if (ret && old_len != new_len)
			goto out;
		ret = addr;
A
Al Viro 已提交
524
		goto out;
L
Linus Torvalds 已提交
525 526 527
	}

	/*
A
Al Viro 已提交
528
	 * Ok, we need to grow..
L
Linus Torvalds 已提交
529
	 */
A
Al Viro 已提交
530 531 532
	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
L
Linus Torvalds 已提交
533
		goto out;
A
akpm@osdl.org 已提交
534
	}
L
Linus Torvalds 已提交
535 536 537

	/* old_len exactly to the end of the area..
	 */
A
Al Viro 已提交
538
	if (old_len == vma->vm_end - addr) {
L
Linus Torvalds 已提交
539
		/* can we just expand the current mapping? */
A
Al Viro 已提交
540
		if (vma_expandable(vma, new_len - old_len)) {
L
Linus Torvalds 已提交
541 542
			int pages = (new_len - old_len) >> PAGE_SHIFT;

543 544 545 546 547
			if (vma_adjust(vma, vma->vm_start, addr + new_len,
				       vma->vm_pgoff, NULL)) {
				ret = -ENOMEM;
				goto out;
			}
L
Linus Torvalds 已提交
548

H
Hugh Dickins 已提交
549
			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
L
Linus Torvalds 已提交
550
			if (vma->vm_flags & VM_LOCKED) {
H
Hugh Dickins 已提交
551
				mm->locked_vm += pages;
552 553
				locked = true;
				new_addr = addr;
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561 562 563 564 565
			}
			ret = addr;
			goto out;
		}
	}

	/*
	 * We weren't able to just expand or shrink the area,
	 * we need to create a new one and move it..
	 */
	ret = -ENOMEM;
	if (flags & MREMAP_MAYMOVE) {
A
Al Viro 已提交
566 567 568 569 570
		unsigned long map_flags = 0;
		if (vma->vm_flags & VM_MAYSHARE)
			map_flags |= MAP_SHARED;

		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
571 572 573
					vma->vm_pgoff +
					((addr - vma->vm_start) >> PAGE_SHIFT),
					map_flags);
A
Al Viro 已提交
574 575 576
		if (new_addr & ~PAGE_MASK) {
			ret = new_addr;
			goto out;
L
Linus Torvalds 已提交
577
		}
A
Al Viro 已提交
578

579
		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
L
Linus Torvalds 已提交
580 581
	}
out:
582
	if (ret & ~PAGE_MASK) {
L
Linus Torvalds 已提交
583
		vm_unacct_memory(charged);
584 585
		locked = 0;
	}
L
Linus Torvalds 已提交
586
	up_write(&current->mm->mmap_sem);
587 588
	if (locked && new_len > old_len)
		mm_populate(new_addr + old_len, new_len - old_len);
L
Linus Torvalds 已提交
589 590
	return ret;
}