mremap.c 15.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	mm/mremap.c
 *
 *	(C) Copyright 1996 Linus Torvalds
 *
A
Alan Cox 已提交
6
 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
7 8 9 10 11 12
 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
13
#include <linux/ksm.h>
L
Linus Torvalds 已提交
14 15
#include <linux/mman.h>
#include <linux/swap.h>
16
#include <linux/capability.h>
L
Linus Torvalds 已提交
17
#include <linux/fs.h>
18
#include <linux/swapops.h>
L
Linus Torvalds 已提交
19 20 21
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Andrea Arcangeli 已提交
22
#include <linux/mmu_notifier.h>
23
#include <linux/sched/sysctl.h>
P
Paul McQuade 已提交
24
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
25 26 27 28

#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

29 30
#include "internal.h"

31
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, addr);
	if (pgd_none_or_clear_bad(pgd))
		return NULL;

	pud = pud_offset(pgd, addr);
	if (pud_none_or_clear_bad(pud))
		return NULL;

	pmd = pmd_offset(pud, addr);
46
	if (pmd_none(*pmd))
L
Linus Torvalds 已提交
47 48
		return NULL;

49
	return pmd;
L
Linus Torvalds 已提交
50 51
}

52 53
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
			    unsigned long addr)
L
Linus Torvalds 已提交
54 55 56
{
	pgd_t *pgd;
	pud_t *pud;
H
Hugh Dickins 已提交
57
	pmd_t *pmd;
L
Linus Torvalds 已提交
58 59 60 61

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
H
Hugh Dickins 已提交
62
		return NULL;
63

L
Linus Torvalds 已提交
64
	pmd = pmd_alloc(mm, pud, addr);
H
Hugh Dickins 已提交
65
	if (!pmd)
H
Hugh Dickins 已提交
66
		return NULL;
67

68
	VM_BUG_ON(pmd_trans_huge(*pmd));
H
Hugh Dickins 已提交
69

70
	return pmd;
L
Linus Torvalds 已提交
71 72
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
static pte_t move_soft_dirty_pte(pte_t pte)
{
	/*
	 * Set soft dirty bit so we can notice
	 * in userspace the ptes were moved.
	 */
#ifdef CONFIG_MEM_SOFT_DIRTY
	if (pte_present(pte))
		pte = pte_mksoft_dirty(pte);
	else if (is_swap_pte(pte))
		pte = pte_swp_mksoft_dirty(pte);
	else if (pte_file(pte))
		pte = pte_file_mksoft_dirty(pte);
#endif
	return pte;
}

90 91 92
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
		unsigned long old_addr, unsigned long old_end,
		struct vm_area_struct *new_vma, pmd_t *new_pmd,
93
		unsigned long new_addr, bool need_rmap_locks)
L
Linus Torvalds 已提交
94 95
{
	struct address_space *mapping = NULL;
96
	struct anon_vma *anon_vma = NULL;
L
Linus Torvalds 已提交
97
	struct mm_struct *mm = vma->vm_mm;
98
	pte_t *old_pte, *new_pte, pte;
H
Hugh Dickins 已提交
99
	spinlock_t *old_ptl, *new_ptl;
L
Linus Torvalds 已提交
100

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	/*
	 * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
	 * locks to ensure that rmap will always observe either the old or the
	 * new ptes. This is the easiest way to avoid races with
	 * truncate_pagecache(), page migration, etc...
	 *
	 * When need_rmap_locks is false, we use other ways to avoid
	 * such races:
	 *
	 * - During exec() shift_arg_pages(), we use a specially tagged vma
	 *   which rmap call sites look for using is_vma_temporary_stack().
	 *
	 * - During mremap(), new_vma is often known to be placed after vma
	 *   in rmap traversal order. This ensures rmap will always observe
	 *   either the old pte, or the new pte, or both (the page table locks
	 *   serialize access to individual ptes, but only rmap traversal
	 *   order guarantees that we won't miss both the old and new ptes).
	 */
	if (need_rmap_locks) {
		if (vma->vm_file) {
			mapping = vma->vm_file->f_mapping;
122
			i_mmap_lock_write(mapping);
123 124 125
		}
		if (vma->anon_vma) {
			anon_vma = vma->anon_vma;
126
			anon_vma_lock_write(anon_vma);
127
		}
L
Linus Torvalds 已提交
128 129
	}

H
Hugh Dickins 已提交
130 131 132 133
	/*
	 * We don't have to worry about the ordering of src and dst
	 * pte locks because exclusive mmap_sem prevents deadlock.
	 */
H
Hugh Dickins 已提交
134
	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
P
Peter Zijlstra 已提交
135
	new_pte = pte_offset_map(new_pmd, new_addr);
H
Hugh Dickins 已提交
136 137
	new_ptl = pte_lockptr(mm, new_pmd);
	if (new_ptl != old_ptl)
I
Ingo Molnar 已提交
138
		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
139
	arch_enter_lazy_mmu_mode();
140 141 142 143 144

	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
				   new_pte++, new_addr += PAGE_SIZE) {
		if (pte_none(*old_pte))
			continue;
145
		pte = ptep_get_and_clear(mm, old_addr, old_pte);
146
		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
147 148
		pte = move_soft_dirty_pte(pte);
		set_pte_at(mm, new_addr, new_pte, pte);
L
Linus Torvalds 已提交
149
	}
150

151
	arch_leave_lazy_mmu_mode();
H
Hugh Dickins 已提交
152 153
	if (new_ptl != old_ptl)
		spin_unlock(new_ptl);
P
Peter Zijlstra 已提交
154
	pte_unmap(new_pte - 1);
H
Hugh Dickins 已提交
155
	pte_unmap_unlock(old_pte - 1, old_ptl);
156
	if (anon_vma)
157
		anon_vma_unlock_write(anon_vma);
L
Linus Torvalds 已提交
158
	if (mapping)
159
		i_mmap_unlock_write(mapping);
L
Linus Torvalds 已提交
160 161
}

162 163
#define LATENCY_LIMIT	(64 * PAGE_SIZE)

164
unsigned long move_page_tables(struct vm_area_struct *vma,
L
Linus Torvalds 已提交
165
		unsigned long old_addr, struct vm_area_struct *new_vma,
166 167
		unsigned long new_addr, unsigned long len,
		bool need_rmap_locks)
L
Linus Torvalds 已提交
168
{
169 170
	unsigned long extent, next, old_end;
	pmd_t *old_pmd, *new_pmd;
171
	bool need_flush = false;
172 173
	unsigned long mmun_start;	/* For mmu_notifiers */
	unsigned long mmun_end;		/* For mmu_notifiers */
L
Linus Torvalds 已提交
174

175 176
	old_end = old_addr + len;
	flush_cache_range(vma, old_addr, old_end);
L
Linus Torvalds 已提交
177

178 179 180
	mmun_start = old_addr;
	mmun_end   = old_end;
	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
181

182
	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
L
Linus Torvalds 已提交
183
		cond_resched();
184
		next = (old_addr + PMD_SIZE) & PMD_MASK;
185
		/* even if next overflowed, extent below will be ok */
186
		extent = next - old_addr;
187 188
		if (extent > old_end - old_addr)
			extent = old_end - old_addr;
189 190 191
		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
		if (!old_pmd)
			continue;
192
		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
193 194
		if (!new_pmd)
			break;
195 196
		if (pmd_trans_huge(*old_pmd)) {
			int err = 0;
197
			if (extent == HPAGE_PMD_SIZE) {
198 199
				VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma,
					      vma);
200 201 202
				/* See comment in move_ptes() */
				if (need_rmap_locks)
					anon_vma_lock_write(vma->anon_vma);
203 204 205
				err = move_huge_pmd(vma, new_vma, old_addr,
						    new_addr, old_end,
						    old_pmd, new_pmd);
206 207 208
				if (need_rmap_locks)
					anon_vma_unlock_write(vma->anon_vma);
			}
209 210 211 212
			if (err > 0) {
				need_flush = true;
				continue;
			} else if (!err) {
213
				split_huge_page_pmd(vma, old_addr, old_pmd);
214 215 216 217 218 219
			}
			VM_BUG_ON(pmd_trans_huge(*old_pmd));
		}
		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
						      new_pmd, new_addr))
			break;
220 221 222 223 224 225
		next = (new_addr + PMD_SIZE) & PMD_MASK;
		if (extent > next - new_addr)
			extent = next - new_addr;
		if (extent > LATENCY_LIMIT)
			extent = LATENCY_LIMIT;
		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
226
			  new_vma, new_pmd, new_addr, need_rmap_locks);
227
		need_flush = true;
L
Linus Torvalds 已提交
228
	}
229 230 231
	if (likely(need_flush))
		flush_tlb_range(vma, old_end-len, old_addr);

232
	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
233 234

	return len + old_addr - old_end;	/* how much done */
L
Linus Torvalds 已提交
235 236 237 238
}

static unsigned long move_vma(struct vm_area_struct *vma,
		unsigned long old_addr, unsigned long old_len,
239
		unsigned long new_len, unsigned long new_addr, bool *locked)
L
Linus Torvalds 已提交
240 241 242 243 244 245 246
{
	struct mm_struct *mm = vma->vm_mm;
	struct vm_area_struct *new_vma;
	unsigned long vm_flags = vma->vm_flags;
	unsigned long new_pgoff;
	unsigned long moved_len;
	unsigned long excess = 0;
247
	unsigned long hiwater_vm;
L
Linus Torvalds 已提交
248
	int split = 0;
249
	int err;
250
	bool need_rmap_locks;
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258

	/*
	 * We'd prefer to avoid failure later on in do_munmap:
	 * which may split one vma into three before unmapping.
	 */
	if (mm->map_count >= sysctl_max_map_count - 3)
		return -ENOMEM;

259 260 261 262 263 264 265
	/*
	 * Advise KSM to break any KSM pages in the area to be moved:
	 * it would be confusing if they were to turn up at the new
	 * location, where they happen to coincide with different KSM
	 * pages recently unmapped.  But leave vma->vm_flags as it was,
	 * so KSM can come around to merge on vma and new_vma afterwards.
	 */
266 267 268 269
	err = ksm_madvise(vma, old_addr, old_addr + old_len,
						MADV_UNMERGEABLE, &vm_flags);
	if (err)
		return err;
270

L
Linus Torvalds 已提交
271
	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
272 273
	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
			   &need_rmap_locks);
L
Linus Torvalds 已提交
274 275 276
	if (!new_vma)
		return -ENOMEM;

277 278
	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
				     need_rmap_locks);
L
Linus Torvalds 已提交
279 280 281 282 283 284
	if (moved_len < old_len) {
		/*
		 * On error, move entries back from new area to old,
		 * which will succeed since page tables still there,
		 * and then proceed to unmap new area instead of old.
		 */
285 286
		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
				 true);
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
		vma = new_vma;
		old_len = new_len;
		old_addr = new_addr;
		new_addr = -ENOMEM;
	}

	/* Conceal VM_ACCOUNT so old reservation is not undone */
	if (vm_flags & VM_ACCOUNT) {
		vma->vm_flags &= ~VM_ACCOUNT;
		excess = vma->vm_end - vma->vm_start - old_len;
		if (old_addr > vma->vm_start &&
		    old_addr + old_len < vma->vm_end)
			split = 1;
	}

K
Kirill Korotaev 已提交
302
	/*
303 304 305 306 307 308 309
	 * If we failed to move page tables we still do total_vm increment
	 * since do_munmap() will decrement it by old_len == new_len.
	 *
	 * Since total_vm is about to be raised artificially high for a
	 * moment, we need to restore high watermark afterwards: if stats
	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
	 * If this were a serious issue, we'd add a flag to do_munmap().
K
Kirill Korotaev 已提交
310
	 */
311
	hiwater_vm = mm->hiwater_vm;
312
	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
K
Kirill Korotaev 已提交
313

L
Linus Torvalds 已提交
314 315 316 317 318
	if (do_munmap(mm, old_addr, old_len) < 0) {
		/* OOM: unable to split vma, just get accounts right */
		vm_unacct_memory(excess >> PAGE_SHIFT);
		excess = 0;
	}
319
	mm->hiwater_vm = hiwater_vm;
L
Linus Torvalds 已提交
320 321 322 323 324 325 326 327 328 329

	/* Restore VM_ACCOUNT if one or two pieces of vma left */
	if (excess) {
		vma->vm_flags |= VM_ACCOUNT;
		if (split)
			vma->vm_next->vm_flags |= VM_ACCOUNT;
	}

	if (vm_flags & VM_LOCKED) {
		mm->locked_vm += new_len >> PAGE_SHIFT;
330
		*locked = true;
L
Linus Torvalds 已提交
331 332 333 334 335
	}

	return new_addr;
}

A
Al Viro 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
static struct vm_area_struct *vma_to_resize(unsigned long addr,
	unsigned long old_len, unsigned long new_len, unsigned long *p)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = find_vma(mm, addr);

	if (!vma || vma->vm_start > addr)
		goto Efault;

	if (is_vm_hugetlb_page(vma))
		goto Einval;

	/* We can't remap across vm area boundaries */
	if (old_len > vma->vm_end - addr)
		goto Efault;

352 353 354 355 356
	/* Need to be careful about a growing mapping */
	if (new_len > old_len) {
		unsigned long pgoff;

		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
A
Al Viro 已提交
357
			goto Efault;
358 359 360 361
		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
		pgoff += vma->vm_pgoff;
		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
			goto Einval;
A
Al Viro 已提交
362 363 364 365 366
	}

	if (vma->vm_flags & VM_LOCKED) {
		unsigned long locked, lock_limit;
		locked = mm->locked_vm << PAGE_SHIFT;
J
Jiri Slaby 已提交
367
		lock_limit = rlimit(RLIMIT_MEMLOCK);
A
Al Viro 已提交
368 369 370 371 372 373 374 375 376 377
		locked += new_len - old_len;
		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
			goto Eagain;
	}

	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
		goto Enomem;

	if (vma->vm_flags & VM_ACCOUNT) {
		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
378
		if (security_vm_enough_memory_mm(mm, charged))
A
Al Viro 已提交
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
			goto Efault;
		*p = charged;
	}

	return vma;

Efault:	/* very odd choice for most of the cases, but... */
	return ERR_PTR(-EFAULT);
Einval:
	return ERR_PTR(-EINVAL);
Enomem:
	return ERR_PTR(-ENOMEM);
Eagain:
	return ERR_PTR(-EAGAIN);
}

395 396
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
		unsigned long new_addr, unsigned long new_len, bool *locked)
A
Al Viro 已提交
397 398 399 400 401
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
A
Al Viro 已提交
402
	unsigned long map_flags;
A
Al Viro 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

	if (new_addr & ~PAGE_MASK)
		goto out;

	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
		goto out;

	/* Check if the location we're moving into overlaps the
	 * old location at all, and fail if it does.
	 */
	if ((new_addr <= addr) && (new_addr+new_len) > addr)
		goto out;

	if ((addr <= new_addr) && (addr+old_len) > new_addr)
		goto out;

	ret = do_munmap(mm, new_addr, new_len);
	if (ret)
		goto out;

	if (old_len >= new_len) {
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
		if (ret && old_len != new_len)
			goto out;
		old_len = new_len;
	}

	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out;
	}

A
Al Viro 已提交
436 437 438
	map_flags = MAP_FIXED;
	if (vma->vm_flags & VM_MAYSHARE)
		map_flags |= MAP_SHARED;
439

A
Al Viro 已提交
440 441 442
	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
				((addr - vma->vm_start) >> PAGE_SHIFT),
				map_flags);
A
Al Viro 已提交
443
	if (ret & ~PAGE_MASK)
A
Al Viro 已提交
444 445
		goto out1;

446
	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
A
Al Viro 已提交
447 448 449 450
	if (!(ret & ~PAGE_MASK))
		goto out;
out1:
	vm_unacct_memory(charged);
A
Al Viro 已提交
451 452 453 454 455

out:
	return ret;
}

A
Al Viro 已提交
456 457
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
A
Al Viro 已提交
458
	unsigned long end = vma->vm_end + delta;
459
	if (end < vma->vm_end) /* overflow */
A
Al Viro 已提交
460
		return 0;
461
	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
A
Al Viro 已提交
462 463 464
		return 0;
	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
			      0, MAP_FIXED) & ~PAGE_MASK)
A
Al Viro 已提交
465 466 467 468
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
469 470 471 472 473 474 475
/*
 * Expand (or shrink) an existing mapping, potentially moving it at the
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
 *
 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
 * This option implies MREMAP_MAYMOVE.
 */
A
Al Viro 已提交
476 477 478
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
L
Linus Torvalds 已提交
479
{
H
Hugh Dickins 已提交
480
	struct mm_struct *mm = current->mm;
L
Linus Torvalds 已提交
481 482 483
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
484
	bool locked = false;
L
Linus Torvalds 已提交
485 486

	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
487 488 489 490
		return ret;

	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
		return ret;
L
Linus Torvalds 已提交
491 492

	if (addr & ~PAGE_MASK)
493
		return ret;
L
Linus Torvalds 已提交
494 495 496 497 498 499 500 501 502 503

	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);

	/*
	 * We allow a zero old-len as a special case
	 * for DOS-emu "duplicate shm area" thing. But
	 * a zero new-len is nonsensical.
	 */
	if (!new_len)
504 505 506
		return ret;

	down_write(&current->mm->mmap_sem);
L
Linus Torvalds 已提交
507 508

	if (flags & MREMAP_FIXED) {
509 510
		ret = mremap_to(addr, old_len, new_addr, new_len,
				&locked);
A
Al Viro 已提交
511
		goto out;
L
Linus Torvalds 已提交
512 513 514 515 516 517 518 519
	}

	/*
	 * Always allow a shrinking remap: that just unmaps
	 * the unnecessary pages..
	 * do_munmap does all the needed commit accounting
	 */
	if (old_len >= new_len) {
H
Hugh Dickins 已提交
520
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
L
Linus Torvalds 已提交
521 522 523
		if (ret && old_len != new_len)
			goto out;
		ret = addr;
A
Al Viro 已提交
524
		goto out;
L
Linus Torvalds 已提交
525 526 527
	}

	/*
A
Al Viro 已提交
528
	 * Ok, we need to grow..
L
Linus Torvalds 已提交
529
	 */
A
Al Viro 已提交
530 531 532
	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
L
Linus Torvalds 已提交
533
		goto out;
A
akpm@osdl.org 已提交
534
	}
L
Linus Torvalds 已提交
535 536 537

	/* old_len exactly to the end of the area..
	 */
A
Al Viro 已提交
538
	if (old_len == vma->vm_end - addr) {
L
Linus Torvalds 已提交
539
		/* can we just expand the current mapping? */
A
Al Viro 已提交
540
		if (vma_expandable(vma, new_len - old_len)) {
L
Linus Torvalds 已提交
541 542
			int pages = (new_len - old_len) >> PAGE_SHIFT;

543 544 545 546 547
			if (vma_adjust(vma, vma->vm_start, addr + new_len,
				       vma->vm_pgoff, NULL)) {
				ret = -ENOMEM;
				goto out;
			}
L
Linus Torvalds 已提交
548

H
Hugh Dickins 已提交
549
			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
L
Linus Torvalds 已提交
550
			if (vma->vm_flags & VM_LOCKED) {
H
Hugh Dickins 已提交
551
				mm->locked_vm += pages;
552 553
				locked = true;
				new_addr = addr;
L
Linus Torvalds 已提交
554 555 556 557 558 559 560 561 562 563 564 565
			}
			ret = addr;
			goto out;
		}
	}

	/*
	 * We weren't able to just expand or shrink the area,
	 * we need to create a new one and move it..
	 */
	ret = -ENOMEM;
	if (flags & MREMAP_MAYMOVE) {
A
Al Viro 已提交
566 567 568 569 570
		unsigned long map_flags = 0;
		if (vma->vm_flags & VM_MAYSHARE)
			map_flags |= MAP_SHARED;

		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
571 572 573
					vma->vm_pgoff +
					((addr - vma->vm_start) >> PAGE_SHIFT),
					map_flags);
A
Al Viro 已提交
574 575 576
		if (new_addr & ~PAGE_MASK) {
			ret = new_addr;
			goto out;
L
Linus Torvalds 已提交
577
		}
A
Al Viro 已提交
578

579
		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
L
Linus Torvalds 已提交
580 581 582 583 584
	}
out:
	if (ret & ~PAGE_MASK)
		vm_unacct_memory(charged);
	up_write(&current->mm->mmap_sem);
585 586
	if (locked && new_len > old_len)
		mm_populate(new_addr + old_len, new_len - old_len);
L
Linus Torvalds 已提交
587 588
	return ret;
}