mremap.c 14.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	mm/mremap.c
 *
 *	(C) Copyright 1996 Linus Torvalds
 *
A
Alan Cox 已提交
6
 *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
7 8 9 10 11 12
 *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
13
#include <linux/ksm.h>
L
Linus Torvalds 已提交
14 15
#include <linux/mman.h>
#include <linux/swap.h>
16
#include <linux/capability.h>
L
Linus Torvalds 已提交
17
#include <linux/fs.h>
18
#include <linux/swapops.h>
L
Linus Torvalds 已提交
19 20 21
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
A
Andrea Arcangeli 已提交
22
#include <linux/mmu_notifier.h>
23
#include <linux/sched/sysctl.h>
L
Linus Torvalds 已提交
24 25 26 27

#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
28
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
29

30 31
#include "internal.h"

32
static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;

	pgd = pgd_offset(mm, addr);
	if (pgd_none_or_clear_bad(pgd))
		return NULL;

	pud = pud_offset(pgd, addr);
	if (pud_none_or_clear_bad(pud))
		return NULL;

	pmd = pmd_offset(pud, addr);
47
	if (pmd_none(*pmd))
L
Linus Torvalds 已提交
48 49
		return NULL;

50
	return pmd;
L
Linus Torvalds 已提交
51 52
}

53 54
static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
			    unsigned long addr)
L
Linus Torvalds 已提交
55 56 57
{
	pgd_t *pgd;
	pud_t *pud;
H
Hugh Dickins 已提交
58
	pmd_t *pmd;
L
Linus Torvalds 已提交
59 60 61 62

	pgd = pgd_offset(mm, addr);
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
H
Hugh Dickins 已提交
63
		return NULL;
64

L
Linus Torvalds 已提交
65
	pmd = pmd_alloc(mm, pud, addr);
66 67
	if (!pmd) {
		pud_free(mm, pud);
H
Hugh Dickins 已提交
68
		return NULL;
69
	}
70

71
	VM_BUG_ON(pmd_trans_huge(*pmd));
H
Hugh Dickins 已提交
72

73
	return pmd;
L
Linus Torvalds 已提交
74 75
}

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
static pte_t move_soft_dirty_pte(pte_t pte)
{
	/*
	 * Set soft dirty bit so we can notice
	 * in userspace the ptes were moved.
	 */
#ifdef CONFIG_MEM_SOFT_DIRTY
	if (pte_present(pte))
		pte = pte_mksoft_dirty(pte);
	else if (is_swap_pte(pte))
		pte = pte_swp_mksoft_dirty(pte);
	else if (pte_file(pte))
		pte = pte_file_mksoft_dirty(pte);
#endif
	return pte;
}

93 94 95
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
		unsigned long old_addr, unsigned long old_end,
		struct vm_area_struct *new_vma, pmd_t *new_pmd,
96
		unsigned long new_addr, bool need_rmap_locks)
L
Linus Torvalds 已提交
97 98
{
	struct address_space *mapping = NULL;
99
	struct anon_vma *anon_vma = NULL;
L
Linus Torvalds 已提交
100
	struct mm_struct *mm = vma->vm_mm;
101
	pte_t *old_pte, *new_pte, pte;
H
Hugh Dickins 已提交
102
	spinlock_t *old_ptl, *new_ptl;
L
Linus Torvalds 已提交
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
	/*
	 * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
	 * locks to ensure that rmap will always observe either the old or the
	 * new ptes. This is the easiest way to avoid races with
	 * truncate_pagecache(), page migration, etc...
	 *
	 * When need_rmap_locks is false, we use other ways to avoid
	 * such races:
	 *
	 * - During exec() shift_arg_pages(), we use a specially tagged vma
	 *   which rmap call sites look for using is_vma_temporary_stack().
	 *
	 * - During mremap(), new_vma is often known to be placed after vma
	 *   in rmap traversal order. This ensures rmap will always observe
	 *   either the old pte, or the new pte, or both (the page table locks
	 *   serialize access to individual ptes, but only rmap traversal
	 *   order guarantees that we won't miss both the old and new ptes).
	 */
	if (need_rmap_locks) {
		if (vma->vm_file) {
			mapping = vma->vm_file->f_mapping;
			mutex_lock(&mapping->i_mmap_mutex);
		}
		if (vma->anon_vma) {
			anon_vma = vma->anon_vma;
129
			anon_vma_lock_write(anon_vma);
130
		}
L
Linus Torvalds 已提交
131 132
	}

H
Hugh Dickins 已提交
133 134 135 136
	/*
	 * We don't have to worry about the ordering of src and dst
	 * pte locks because exclusive mmap_sem prevents deadlock.
	 */
H
Hugh Dickins 已提交
137
	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
P
Peter Zijlstra 已提交
138
	new_pte = pte_offset_map(new_pmd, new_addr);
H
Hugh Dickins 已提交
139 140
	new_ptl = pte_lockptr(mm, new_pmd);
	if (new_ptl != old_ptl)
I
Ingo Molnar 已提交
141
		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
142
	arch_enter_lazy_mmu_mode();
143 144 145 146 147

	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
				   new_pte++, new_addr += PAGE_SIZE) {
		if (pte_none(*old_pte))
			continue;
148
		pte = ptep_get_and_clear(mm, old_addr, old_pte);
149
		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
150 151
		pte = move_soft_dirty_pte(pte);
		set_pte_at(mm, new_addr, new_pte, pte);
L
Linus Torvalds 已提交
152
	}
153

154
	arch_leave_lazy_mmu_mode();
H
Hugh Dickins 已提交
155 156
	if (new_ptl != old_ptl)
		spin_unlock(new_ptl);
P
Peter Zijlstra 已提交
157
	pte_unmap(new_pte - 1);
H
Hugh Dickins 已提交
158
	pte_unmap_unlock(old_pte - 1, old_ptl);
159
	if (anon_vma)
160
		anon_vma_unlock_write(anon_vma);
L
Linus Torvalds 已提交
161
	if (mapping)
162
		mutex_unlock(&mapping->i_mmap_mutex);
L
Linus Torvalds 已提交
163 164
}

165 166
#define LATENCY_LIMIT	(64 * PAGE_SIZE)

167
unsigned long move_page_tables(struct vm_area_struct *vma,
L
Linus Torvalds 已提交
168
		unsigned long old_addr, struct vm_area_struct *new_vma,
169 170
		unsigned long new_addr, unsigned long len,
		bool need_rmap_locks)
L
Linus Torvalds 已提交
171
{
172 173
	unsigned long extent, next, old_end;
	pmd_t *old_pmd, *new_pmd;
174
	bool need_flush = false;
175 176
	unsigned long mmun_start;	/* For mmu_notifiers */
	unsigned long mmun_end;		/* For mmu_notifiers */
L
Linus Torvalds 已提交
177

178 179
	old_end = old_addr + len;
	flush_cache_range(vma, old_addr, old_end);
L
Linus Torvalds 已提交
180

181 182 183
	mmun_start = old_addr;
	mmun_end   = old_end;
	mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
184

185
	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
L
Linus Torvalds 已提交
186
		cond_resched();
187
		next = (old_addr + PMD_SIZE) & PMD_MASK;
188
		/* even if next overflowed, extent below will be ok */
189
		extent = next - old_addr;
190 191
		if (extent > old_end - old_addr)
			extent = old_end - old_addr;
192 193 194
		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
		if (!old_pmd)
			continue;
195
		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
196 197
		if (!new_pmd)
			break;
198 199 200 201 202 203 204 205 206 207
		if (pmd_trans_huge(*old_pmd)) {
			int err = 0;
			if (extent == HPAGE_PMD_SIZE)
				err = move_huge_pmd(vma, new_vma, old_addr,
						    new_addr, old_end,
						    old_pmd, new_pmd);
			if (err > 0) {
				need_flush = true;
				continue;
			} else if (!err) {
208
				split_huge_page_pmd(vma, old_addr, old_pmd);
209 210 211 212 213 214
			}
			VM_BUG_ON(pmd_trans_huge(*old_pmd));
		}
		if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
						      new_pmd, new_addr))
			break;
215 216 217 218 219 220
		next = (new_addr + PMD_SIZE) & PMD_MASK;
		if (extent > next - new_addr)
			extent = next - new_addr;
		if (extent > LATENCY_LIMIT)
			extent = LATENCY_LIMIT;
		move_ptes(vma, old_pmd, old_addr, old_addr + extent,
221
			  new_vma, new_pmd, new_addr, need_rmap_locks);
222
		need_flush = true;
L
Linus Torvalds 已提交
223
	}
224 225 226
	if (likely(need_flush))
		flush_tlb_range(vma, old_end-len, old_addr);

227
	mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
228 229

	return len + old_addr - old_end;	/* how much done */
L
Linus Torvalds 已提交
230 231 232 233
}

static unsigned long move_vma(struct vm_area_struct *vma,
		unsigned long old_addr, unsigned long old_len,
234
		unsigned long new_len, unsigned long new_addr, bool *locked)
L
Linus Torvalds 已提交
235 236 237 238 239 240 241
{
	struct mm_struct *mm = vma->vm_mm;
	struct vm_area_struct *new_vma;
	unsigned long vm_flags = vma->vm_flags;
	unsigned long new_pgoff;
	unsigned long moved_len;
	unsigned long excess = 0;
242
	unsigned long hiwater_vm;
L
Linus Torvalds 已提交
243
	int split = 0;
244
	int err;
245
	bool need_rmap_locks;
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253

	/*
	 * We'd prefer to avoid failure later on in do_munmap:
	 * which may split one vma into three before unmapping.
	 */
	if (mm->map_count >= sysctl_max_map_count - 3)
		return -ENOMEM;

254 255 256 257 258 259 260
	/*
	 * Advise KSM to break any KSM pages in the area to be moved:
	 * it would be confusing if they were to turn up at the new
	 * location, where they happen to coincide with different KSM
	 * pages recently unmapped.  But leave vma->vm_flags as it was,
	 * so KSM can come around to merge on vma and new_vma afterwards.
	 */
261 262 263 264
	err = ksm_madvise(vma, old_addr, old_addr + old_len,
						MADV_UNMERGEABLE, &vm_flags);
	if (err)
		return err;
265

L
Linus Torvalds 已提交
266
	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
267 268
	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
			   &need_rmap_locks);
L
Linus Torvalds 已提交
269 270 271
	if (!new_vma)
		return -ENOMEM;

272 273
	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
				     need_rmap_locks);
L
Linus Torvalds 已提交
274 275 276 277 278 279
	if (moved_len < old_len) {
		/*
		 * On error, move entries back from new area to old,
		 * which will succeed since page tables still there,
		 * and then proceed to unmap new area instead of old.
		 */
280 281
		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
				 true);
L
Linus Torvalds 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
		vma = new_vma;
		old_len = new_len;
		old_addr = new_addr;
		new_addr = -ENOMEM;
	}

	/* Conceal VM_ACCOUNT so old reservation is not undone */
	if (vm_flags & VM_ACCOUNT) {
		vma->vm_flags &= ~VM_ACCOUNT;
		excess = vma->vm_end - vma->vm_start - old_len;
		if (old_addr > vma->vm_start &&
		    old_addr + old_len < vma->vm_end)
			split = 1;
	}

K
Kirill Korotaev 已提交
297
	/*
298 299 300 301 302 303 304
	 * If we failed to move page tables we still do total_vm increment
	 * since do_munmap() will decrement it by old_len == new_len.
	 *
	 * Since total_vm is about to be raised artificially high for a
	 * moment, we need to restore high watermark afterwards: if stats
	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
	 * If this were a serious issue, we'd add a flag to do_munmap().
K
Kirill Korotaev 已提交
305
	 */
306
	hiwater_vm = mm->hiwater_vm;
307
	vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
K
Kirill Korotaev 已提交
308

L
Linus Torvalds 已提交
309 310 311 312 313
	if (do_munmap(mm, old_addr, old_len) < 0) {
		/* OOM: unable to split vma, just get accounts right */
		vm_unacct_memory(excess >> PAGE_SHIFT);
		excess = 0;
	}
314
	mm->hiwater_vm = hiwater_vm;
L
Linus Torvalds 已提交
315 316 317 318 319 320 321 322 323 324

	/* Restore VM_ACCOUNT if one or two pieces of vma left */
	if (excess) {
		vma->vm_flags |= VM_ACCOUNT;
		if (split)
			vma->vm_next->vm_flags |= VM_ACCOUNT;
	}

	if (vm_flags & VM_LOCKED) {
		mm->locked_vm += new_len >> PAGE_SHIFT;
325
		*locked = true;
L
Linus Torvalds 已提交
326 327 328 329 330
	}

	return new_addr;
}

A
Al Viro 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
static struct vm_area_struct *vma_to_resize(unsigned long addr,
	unsigned long old_len, unsigned long new_len, unsigned long *p)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma = find_vma(mm, addr);

	if (!vma || vma->vm_start > addr)
		goto Efault;

	if (is_vm_hugetlb_page(vma))
		goto Einval;

	/* We can't remap across vm area boundaries */
	if (old_len > vma->vm_end - addr)
		goto Efault;

347 348 349 350 351
	/* Need to be careful about a growing mapping */
	if (new_len > old_len) {
		unsigned long pgoff;

		if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
A
Al Viro 已提交
352
			goto Efault;
353 354 355 356
		pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
		pgoff += vma->vm_pgoff;
		if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
			goto Einval;
A
Al Viro 已提交
357 358 359 360 361
	}

	if (vma->vm_flags & VM_LOCKED) {
		unsigned long locked, lock_limit;
		locked = mm->locked_vm << PAGE_SHIFT;
J
Jiri Slaby 已提交
362
		lock_limit = rlimit(RLIMIT_MEMLOCK);
A
Al Viro 已提交
363 364 365 366 367 368 369 370 371 372
		locked += new_len - old_len;
		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
			goto Eagain;
	}

	if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
		goto Enomem;

	if (vma->vm_flags & VM_ACCOUNT) {
		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
373
		if (security_vm_enough_memory_mm(mm, charged))
A
Al Viro 已提交
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
			goto Efault;
		*p = charged;
	}

	return vma;

Efault:	/* very odd choice for most of the cases, but... */
	return ERR_PTR(-EFAULT);
Einval:
	return ERR_PTR(-EINVAL);
Enomem:
	return ERR_PTR(-ENOMEM);
Eagain:
	return ERR_PTR(-EAGAIN);
}

390 391
static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
		unsigned long new_addr, unsigned long new_len, bool *locked)
A
Al Viro 已提交
392 393 394 395 396
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
A
Al Viro 已提交
397
	unsigned long map_flags;
A
Al Viro 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430

	if (new_addr & ~PAGE_MASK)
		goto out;

	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
		goto out;

	/* Check if the location we're moving into overlaps the
	 * old location at all, and fail if it does.
	 */
	if ((new_addr <= addr) && (new_addr+new_len) > addr)
		goto out;

	if ((addr <= new_addr) && (addr+old_len) > new_addr)
		goto out;

	ret = do_munmap(mm, new_addr, new_len);
	if (ret)
		goto out;

	if (old_len >= new_len) {
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
		if (ret && old_len != new_len)
			goto out;
		old_len = new_len;
	}

	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto out;
	}

A
Al Viro 已提交
431 432 433
	map_flags = MAP_FIXED;
	if (vma->vm_flags & VM_MAYSHARE)
		map_flags |= MAP_SHARED;
434

A
Al Viro 已提交
435 436 437
	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
				((addr - vma->vm_start) >> PAGE_SHIFT),
				map_flags);
A
Al Viro 已提交
438
	if (ret & ~PAGE_MASK)
A
Al Viro 已提交
439 440
		goto out1;

441
	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
A
Al Viro 已提交
442 443 444 445
	if (!(ret & ~PAGE_MASK))
		goto out;
out1:
	vm_unacct_memory(charged);
A
Al Viro 已提交
446 447 448 449 450

out:
	return ret;
}

A
Al Viro 已提交
451 452
static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
{
A
Al Viro 已提交
453
	unsigned long end = vma->vm_end + delta;
454
	if (end < vma->vm_end) /* overflow */
A
Al Viro 已提交
455
		return 0;
456
	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
A
Al Viro 已提交
457 458 459
		return 0;
	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
			      0, MAP_FIXED) & ~PAGE_MASK)
A
Al Viro 已提交
460 461 462 463
		return 0;
	return 1;
}

L
Linus Torvalds 已提交
464 465 466 467 468 469 470
/*
 * Expand (or shrink) an existing mapping, potentially moving it at the
 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
 *
 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
 * This option implies MREMAP_MAYMOVE.
 */
A
Al Viro 已提交
471 472 473
SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
		unsigned long, new_len, unsigned long, flags,
		unsigned long, new_addr)
L
Linus Torvalds 已提交
474
{
H
Hugh Dickins 已提交
475
	struct mm_struct *mm = current->mm;
L
Linus Torvalds 已提交
476 477 478
	struct vm_area_struct *vma;
	unsigned long ret = -EINVAL;
	unsigned long charged = 0;
479
	bool locked = false;
L
Linus Torvalds 已提交
480 481

	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
482 483 484 485
		return ret;

	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
		return ret;
L
Linus Torvalds 已提交
486 487

	if (addr & ~PAGE_MASK)
488
		return ret;
L
Linus Torvalds 已提交
489 490 491 492 493 494 495 496 497 498

	old_len = PAGE_ALIGN(old_len);
	new_len = PAGE_ALIGN(new_len);

	/*
	 * We allow a zero old-len as a special case
	 * for DOS-emu "duplicate shm area" thing. But
	 * a zero new-len is nonsensical.
	 */
	if (!new_len)
499 500 501
		return ret;

	down_write(&current->mm->mmap_sem);
L
Linus Torvalds 已提交
502 503

	if (flags & MREMAP_FIXED) {
504 505
		ret = mremap_to(addr, old_len, new_addr, new_len,
				&locked);
A
Al Viro 已提交
506
		goto out;
L
Linus Torvalds 已提交
507 508 509 510 511 512 513 514
	}

	/*
	 * Always allow a shrinking remap: that just unmaps
	 * the unnecessary pages..
	 * do_munmap does all the needed commit accounting
	 */
	if (old_len >= new_len) {
H
Hugh Dickins 已提交
515
		ret = do_munmap(mm, addr+new_len, old_len - new_len);
L
Linus Torvalds 已提交
516 517 518
		if (ret && old_len != new_len)
			goto out;
		ret = addr;
A
Al Viro 已提交
519
		goto out;
L
Linus Torvalds 已提交
520 521 522
	}

	/*
A
Al Viro 已提交
523
	 * Ok, we need to grow..
L
Linus Torvalds 已提交
524
	 */
A
Al Viro 已提交
525 526 527
	vma = vma_to_resize(addr, old_len, new_len, &charged);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
L
Linus Torvalds 已提交
528
		goto out;
A
akpm@osdl.org 已提交
529
	}
L
Linus Torvalds 已提交
530 531 532

	/* old_len exactly to the end of the area..
	 */
A
Al Viro 已提交
533
	if (old_len == vma->vm_end - addr) {
L
Linus Torvalds 已提交
534
		/* can we just expand the current mapping? */
A
Al Viro 已提交
535
		if (vma_expandable(vma, new_len - old_len)) {
L
Linus Torvalds 已提交
536 537
			int pages = (new_len - old_len) >> PAGE_SHIFT;

538 539 540 541 542
			if (vma_adjust(vma, vma->vm_start, addr + new_len,
				       vma->vm_pgoff, NULL)) {
				ret = -ENOMEM;
				goto out;
			}
L
Linus Torvalds 已提交
543

H
Hugh Dickins 已提交
544
			vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
L
Linus Torvalds 已提交
545
			if (vma->vm_flags & VM_LOCKED) {
H
Hugh Dickins 已提交
546
				mm->locked_vm += pages;
547 548
				locked = true;
				new_addr = addr;
L
Linus Torvalds 已提交
549 550 551 552 553 554 555 556 557 558 559 560
			}
			ret = addr;
			goto out;
		}
	}

	/*
	 * We weren't able to just expand or shrink the area,
	 * we need to create a new one and move it..
	 */
	ret = -ENOMEM;
	if (flags & MREMAP_MAYMOVE) {
A
Al Viro 已提交
561 562 563 564 565
		unsigned long map_flags = 0;
		if (vma->vm_flags & VM_MAYSHARE)
			map_flags |= MAP_SHARED;

		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
566 567 568
					vma->vm_pgoff +
					((addr - vma->vm_start) >> PAGE_SHIFT),
					map_flags);
A
Al Viro 已提交
569 570 571
		if (new_addr & ~PAGE_MASK) {
			ret = new_addr;
			goto out;
L
Linus Torvalds 已提交
572
		}
A
Al Viro 已提交
573

574
		ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
L
Linus Torvalds 已提交
575 576 577 578 579
	}
out:
	if (ret & ~PAGE_MASK)
		vm_unacct_memory(charged);
	up_write(&current->mm->mmap_sem);
580 581
	if (locked && new_len > old_len)
		mm_populate(new_addr + old_len, new_len - old_len);
L
Linus Torvalds 已提交
582 583
	return ret;
}