mprotect.c 12.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/pkeys.h>
27
#include <linux/ksm.h>
28
#include <linux/pkeys.h>
L
Linus Torvalds 已提交
29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
32
#include <asm/mmu_context.h>
L
Linus Torvalds 已提交
33 34
#include <asm/tlbflush.h>

35 36
#include "internal.h"

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

65
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
66
		unsigned long addr, unsigned long end, pgprot_t newprot,
67
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
68
{
69
	struct mm_struct *mm = vma->vm_mm;
70
	pte_t *pte, oldpte;
71
	spinlock_t *ptl;
72
	unsigned long pages = 0;
L
Linus Torvalds 已提交
73

74 75 76 77
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

78
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
79
	do {
80 81
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
82
			pte_t ptent;
83
			bool preserve_write = prot_numa && pte_write(oldpte);
L
Linus Torvalds 已提交
84

85 86 87 88 89 90 91 92 93 94
			/*
			 * Avoid trapping faults against the zero or KSM
			 * pages. See similar comment in change_huge_pmd.
			 */
			if (prot_numa) {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (!page || PageKsm(page))
					continue;
95 96 97 98

				/* Avoid TLB flush if possible */
				if (pte_protnone(oldpte))
					continue;
99 100
			}

101 102
			ptent = ptep_modify_prot_start(mm, addr, pte);
			ptent = pte_modify(ptent, newprot);
103 104
			if (preserve_write)
				ptent = pte_mkwrite(ptent);
105

106 107 108 109 110
			/* Avoid taking write faults for known dirty pages */
			if (dirty_accountable && pte_dirty(ptent) &&
					(pte_soft_dirty(ptent) ||
					 !(vma->vm_flags & VM_SOFTDIRTY))) {
				ptent = pte_mkwrite(ptent);
111
			}
112 113
			ptep_modify_prot_commit(mm, addr, pte, ptent);
			pages++;
114
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
115 116 117
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
118
				pte_t newpte;
119 120 121 122 123
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
124 125 126 127
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
128 129

				pages++;
130
			}
L
Linus Torvalds 已提交
131 132
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
133
	arch_leave_lazy_mmu_mode();
134
	pte_unmap_unlock(pte - 1, ptl);
135 136

	return pages;
L
Linus Torvalds 已提交
137 138
}

139 140 141
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
142 143
{
	pmd_t *pmd;
144
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
145
	unsigned long next;
146
	unsigned long pages = 0;
147
	unsigned long nr_huge_updates = 0;
148
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
149 150 151

	pmd = pmd_offset(pud, addr);
	do {
152 153
		unsigned long this_pages;

L
Linus Torvalds 已提交
154
		next = pmd_addr_end(addr, end);
155 156
		if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
				&& pmd_none_or_clear_bad(pmd))
157
			continue;
158 159 160 161 162 163 164

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

165
		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
166
			if (next - addr != HPAGE_PMD_SIZE) {
167
				split_huge_pmd(vma, pmd, addr);
168
				if (pmd_trans_unstable(pmd))
169 170
					continue;
			} else {
171
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
172
						newprot, prot_numa);
173 174

				if (nr_ptes) {
175 176 177 178
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
179 180

					/* huge pmd was handled */
181 182
					continue;
				}
183
			}
184
			/* fall through, the trans huge pmd just split */
185
		}
186
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
187
				 dirty_accountable, prot_numa);
188
		pages += this_pages;
L
Linus Torvalds 已提交
189
	} while (pmd++, addr = next, addr != end);
190

191 192 193
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

194 195
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
196
	return pages;
L
Linus Torvalds 已提交
197 198
}

199 200 201
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
202 203 204
{
	pud_t *pud;
	unsigned long next;
205
	unsigned long pages = 0;
L
Linus Torvalds 已提交
206 207 208 209 210 211

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
212
		pages += change_pmd_range(vma, pud, addr, next, newprot,
213
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
214
	} while (pud++, addr = next, addr != end);
215 216

	return pages;
L
Linus Torvalds 已提交
217 218
}

219
static unsigned long change_protection_range(struct vm_area_struct *vma,
220
		unsigned long addr, unsigned long end, pgprot_t newprot,
221
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
222 223 224 225 226
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
227
	unsigned long pages = 0;
L
Linus Torvalds 已提交
228 229 230 231

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
232
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
233 234 235 236
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
237
		pages += change_pud_range(vma, pgd, addr, next, newprot,
238
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
239
	} while (pgd++, addr = next, addr != end);
240

241 242 243
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
244
	clear_tlb_flush_pending(mm);
245 246 247 248 249 250

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
251
		       int dirty_accountable, int prot_numa)
252 253 254 255 256 257
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
258
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
259 260

	return pages;
L
Linus Torvalds 已提交
261 262
}

263
int
L
Linus Torvalds 已提交
264 265 266 267 268 269 270 271 272
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
273
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
274 275 276 277 278 279 280 281 282

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
283 284
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
285 286
	 */
	if (newflags & VM_WRITE) {
287 288 289 290
		/* Check space limits when area turns into data. */
		if (!may_expand_vm(mm, newflags, nrpages) &&
				may_expand_vm(mm, oldflags, nrpages))
			return -ENOMEM;
291
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
292
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
293
			charged = nrpages;
294
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
295 296 297 298 299 300 301 302 303 304
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
305 306
			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
			   vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
332 333
	dirty_accountable = vma_wants_writenotify(vma);
	vma_set_page_prot(vma);
334

335 336
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
337

338 339 340 341 342 343 344 345 346
	/*
	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
	 * fault on access.
	 */
	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
			(newflags & VM_WRITE)) {
		populate_vma_page_range(vma, start, end, NULL);
	}

347 348
	vm_stat_account(mm, oldflags, -nrpages);
	vm_stat_account(mm, newflags, nrpages);
349
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
350 351 352 353 354 355 356
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

357 358 359 360 361
/*
 * pkey==-1 when doing a legacy mprotect()
 */
static int do_mprotect_pkey(unsigned long start, size_t len,
		unsigned long prot, int pkey)
L
Linus Torvalds 已提交
362
{
363
	unsigned long nstart, end, tmp, reqprot;
L
Linus Torvalds 已提交
364 365 366
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
367 368 369
	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
				(prot & PROT_READ);

L
Linus Torvalds 已提交
370 371 372 373 374 375 376 377 378 379 380 381
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
382
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
383 384 385 386
		return -EINVAL;

	reqprot = prot;

387 388
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
L
Linus Torvalds 已提交
389

390 391 392 393 394 395 396 397
	/*
	 * If userspace did not allocate the pkey, do not let
	 * them use it here.
	 */
	error = -EINVAL;
	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
		goto out;

398
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
399 400 401
	error = -ENOMEM;
	if (!vma)
		goto out;
402
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
403 404 405 406 407 408 409
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
410
	} else {
L
Linus Torvalds 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
424
		unsigned long mask_off_old_flags;
L
Linus Torvalds 已提交
425
		unsigned long newflags;
426
		int new_vma_pkey;
L
Linus Torvalds 已提交
427

428
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
429

430 431 432 433
		/* Does the application expect PROT_READ to imply PROT_EXEC */
		if (rier && (vma->vm_flags & VM_MAYEXEC))
			prot |= PROT_EXEC;

434 435 436 437 438 439 440 441
		/*
		 * Each mprotect() call explicitly passes r/w/x permissions.
		 * If a permission is not passed to mprotect(), it must be
		 * cleared from the VMA.
		 */
		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
					ARCH_VM_PKEY_FLAGS;

442 443
		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
444
		newflags |= (vma->vm_flags & ~mask_off_old_flags);
L
Linus Torvalds 已提交
445

446 447
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
474
		prot = reqprot;
L
Linus Torvalds 已提交
475 476 477 478 479
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}
480 481 482 483 484 485 486 487 488 489 490 491

SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
{
	return do_mprotect_pkey(start, len, prot, -1);
}

SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
		unsigned long, prot, int, pkey)
{
	return do_mprotect_pkey(start, len, prot, pkey);
}
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536

SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
{
	int pkey;
	int ret;

	/* No flags supported yet. */
	if (flags)
		return -EINVAL;
	/* check for unsupported init values */
	if (init_val & ~PKEY_ACCESS_MASK)
		return -EINVAL;

	down_write(&current->mm->mmap_sem);
	pkey = mm_pkey_alloc(current->mm);

	ret = -ENOSPC;
	if (pkey == -1)
		goto out;

	ret = arch_set_user_pkey_access(current, pkey, init_val);
	if (ret) {
		mm_pkey_free(current->mm, pkey);
		goto out;
	}
	ret = pkey;
out:
	up_write(&current->mm->mmap_sem);
	return ret;
}

SYSCALL_DEFINE1(pkey_free, int, pkey)
{
	int ret;

	down_write(&current->mm->mmap_sem);
	ret = mm_pkey_free(current->mm, pkey);
	up_write(&current->mm->mmap_sem);

	/*
	 * We could provie warnings or errors if any VMA still
	 * has the pkey set here.
	 */
	return ret;
}