mprotect.c 13.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/pkeys.h>
27
#include <linux/ksm.h>
L
Linus Torvalds 已提交
28 29 30
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
31
#include <asm/mmu_context.h>
L
Linus Torvalds 已提交
32 33
#include <asm/tlbflush.h>

34 35
#include "internal.h"

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

64
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
65
		unsigned long addr, unsigned long end, pgprot_t newprot,
66
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
67
{
68
	struct mm_struct *mm = vma->vm_mm;
69
	pte_t *pte, oldpte;
70
	spinlock_t *ptl;
71
	unsigned long pages = 0;
L
Linus Torvalds 已提交
72

73 74 75 76
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

77
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
78
	do {
79 80
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
81
			pte_t ptent;
82
			bool preserve_write = prot_numa && pte_write(oldpte);
L
Linus Torvalds 已提交
83

84 85 86 87 88 89 90 91 92 93
			/*
			 * Avoid trapping faults against the zero or KSM
			 * pages. See similar comment in change_huge_pmd.
			 */
			if (prot_numa) {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (!page || PageKsm(page))
					continue;
94 95 96 97

				/* Avoid TLB flush if possible */
				if (pte_protnone(oldpte))
					continue;
98 99
			}

100 101
			ptent = ptep_modify_prot_start(mm, addr, pte);
			ptent = pte_modify(ptent, newprot);
102 103
			if (preserve_write)
				ptent = pte_mkwrite(ptent);
104

105 106 107 108 109
			/* Avoid taking write faults for known dirty pages */
			if (dirty_accountable && pte_dirty(ptent) &&
					(pte_soft_dirty(ptent) ||
					 !(vma->vm_flags & VM_SOFTDIRTY))) {
				ptent = pte_mkwrite(ptent);
110
			}
111 112
			ptep_modify_prot_commit(mm, addr, pte, ptent);
			pages++;
113
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
114 115 116
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
117
				pte_t newpte;
118 119 120 121 122
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
123 124 125 126
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
127 128

				pages++;
129
			}
L
Linus Torvalds 已提交
130 131
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
132
	arch_leave_lazy_mmu_mode();
133
	pte_unmap_unlock(pte - 1, ptl);
134 135

	return pages;
L
Linus Torvalds 已提交
136 137
}

138 139 140
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
141 142
{
	pmd_t *pmd;
143
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
144
	unsigned long next;
145
	unsigned long pages = 0;
146
	unsigned long nr_huge_updates = 0;
147
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
148 149 150

	pmd = pmd_offset(pud, addr);
	do {
151 152
		unsigned long this_pages;

L
Linus Torvalds 已提交
153
		next = pmd_addr_end(addr, end);
154 155
		if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
				&& pmd_none_or_clear_bad(pmd))
156
			continue;
157 158 159 160 161 162 163

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

164
		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
165
			if (next - addr != HPAGE_PMD_SIZE) {
166
				split_huge_pmd(vma, pmd, addr);
167
				if (pmd_trans_unstable(pmd))
168 169
					continue;
			} else {
170
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
171
						newprot, prot_numa);
172 173

				if (nr_ptes) {
174 175 176 177
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
178 179

					/* huge pmd was handled */
180 181
					continue;
				}
182
			}
183
			/* fall through, the trans huge pmd just split */
184
		}
185
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
186
				 dirty_accountable, prot_numa);
187
		pages += this_pages;
L
Linus Torvalds 已提交
188
	} while (pmd++, addr = next, addr != end);
189

190 191 192
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

193 194
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
195
	return pages;
L
Linus Torvalds 已提交
196 197
}

198 199 200
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
201 202 203
{
	pud_t *pud;
	unsigned long next;
204
	unsigned long pages = 0;
L
Linus Torvalds 已提交
205 206 207 208 209 210

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
211
		pages += change_pmd_range(vma, pud, addr, next, newprot,
212
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
213
	} while (pud++, addr = next, addr != end);
214 215

	return pages;
L
Linus Torvalds 已提交
216 217
}

218
static unsigned long change_protection_range(struct vm_area_struct *vma,
219
		unsigned long addr, unsigned long end, pgprot_t newprot,
220
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
221 222 223 224 225
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
226
	unsigned long pages = 0;
L
Linus Torvalds 已提交
227 228 229 230

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
231
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
232 233 234 235
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
236
		pages += change_pud_range(vma, pgd, addr, next, newprot,
237
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
238
	} while (pgd++, addr = next, addr != end);
239

240 241 242
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
243
	clear_tlb_flush_pending(mm);
244 245 246 247 248 249

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
250
		       int dirty_accountable, int prot_numa)
251 252 253 254 255 256
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
257
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
258 259

	return pages;
L
Linus Torvalds 已提交
260 261
}

262
int
L
Linus Torvalds 已提交
263 264 265 266 267 268 269 270 271
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
272
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280 281

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
282 283
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
284 285
	 */
	if (newflags & VM_WRITE) {
286 287 288 289
		/* Check space limits when area turns into data. */
		if (!may_expand_vm(mm, newflags, nrpages) &&
				may_expand_vm(mm, oldflags, nrpages))
			return -ENOMEM;
290
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
291
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
292
			charged = nrpages;
293
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
294 295 296 297 298 299 300 301 302 303
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
304 305
			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
			   vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
306 307
	if (*pprev) {
		vma = *pprev;
308
		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
332
	dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot);
333
	vma_set_page_prot(vma);
334

335 336
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
337

338 339 340 341 342 343 344 345 346
	/*
	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
	 * fault on access.
	 */
	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
			(newflags & VM_WRITE)) {
		populate_vma_page_range(vma, start, end, NULL);
	}

347 348
	vm_stat_account(mm, oldflags, -nrpages);
	vm_stat_account(mm, newflags, nrpages);
349
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
350 351 352 353 354 355 356
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

357 358 359 360 361
/*
 * pkey==-1 when doing a legacy mprotect()
 */
static int do_mprotect_pkey(unsigned long start, size_t len,
		unsigned long prot, int pkey)
L
Linus Torvalds 已提交
362
{
363
	unsigned long nstart, end, tmp, reqprot;
L
Linus Torvalds 已提交
364 365 366
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
367 368 369
	const bool rier = (current->personality & READ_IMPLIES_EXEC) &&
				(prot & PROT_READ);

L
Linus Torvalds 已提交
370 371 372 373 374 375 376 377 378 379 380 381
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
382
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
383 384 385 386
		return -EINVAL;

	reqprot = prot;

387 388
	if (down_write_killable(&current->mm->mmap_sem))
		return -EINTR;
L
Linus Torvalds 已提交
389

390 391 392 393 394 395 396 397
	/*
	 * If userspace did not allocate the pkey, do not let
	 * them use it here.
	 */
	error = -EINVAL;
	if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey))
		goto out;

398
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
399 400 401
	error = -ENOMEM;
	if (!vma)
		goto out;
402
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
403 404 405 406 407 408 409
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
410
	} else {
L
Linus Torvalds 已提交
411 412 413 414 415 416 417 418 419 420 421 422 423
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
424
		unsigned long mask_off_old_flags;
L
Linus Torvalds 已提交
425
		unsigned long newflags;
426
		int new_vma_pkey;
L
Linus Torvalds 已提交
427

428
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
429

430 431 432 433
		/* Does the application expect PROT_READ to imply PROT_EXEC */
		if (rier && (vma->vm_flags & VM_MAYEXEC))
			prot |= PROT_EXEC;

434 435 436 437 438 439 440 441
		/*
		 * Each mprotect() call explicitly passes r/w/x permissions.
		 * If a permission is not passed to mprotect(), it must be
		 * cleared from the VMA.
		 */
		mask_off_old_flags = VM_READ | VM_WRITE | VM_EXEC |
					ARCH_VM_PKEY_FLAGS;

442 443
		new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
		newflags = calc_vm_prot_bits(prot, new_vma_pkey);
444
		newflags |= (vma->vm_flags & ~mask_off_old_flags);
L
Linus Torvalds 已提交
445

446 447
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
474
		prot = reqprot;
L
Linus Torvalds 已提交
475 476 477 478 479
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}
480 481 482 483 484 485 486 487 488 489 490 491

SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
{
	return do_mprotect_pkey(start, len, prot, -1);
}

SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len,
		unsigned long, prot, int, pkey)
{
	return do_mprotect_pkey(start, len, prot, pkey);
}
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536

SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val)
{
	int pkey;
	int ret;

	/* No flags supported yet. */
	if (flags)
		return -EINVAL;
	/* check for unsupported init values */
	if (init_val & ~PKEY_ACCESS_MASK)
		return -EINVAL;

	down_write(&current->mm->mmap_sem);
	pkey = mm_pkey_alloc(current->mm);

	ret = -ENOSPC;
	if (pkey == -1)
		goto out;

	ret = arch_set_user_pkey_access(current, pkey, init_val);
	if (ret) {
		mm_pkey_free(current->mm, pkey);
		goto out;
	}
	ret = pkey;
out:
	up_write(&current->mm->mmap_sem);
	return ret;
}

SYSCALL_DEFINE1(pkey_free, int, pkey)
{
	int ret;

	down_write(&current->mm->mmap_sem);
	ret = mm_pkey_free(current->mm, pkey);
	up_write(&current->mm->mmap_sem);

	/*
	 * We could provie warnings or errors if any VMA still
	 * has the pkey set here.
	 */
	return ret;
}