mprotect.c 11.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33
#include "internal.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

62
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
63
		unsigned long addr, unsigned long end, pgprot_t newprot,
64
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
65
{
66
	struct mm_struct *mm = vma->vm_mm;
67
	pte_t *pte, oldpte;
68
	spinlock_t *ptl;
69
	unsigned long pages = 0;
L
Linus Torvalds 已提交
70

71 72 73 74
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

75
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
76
	do {
77 78
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
79
			pte_t ptent;
80
			bool preserve_write = prot_numa && pte_write(oldpte);
L
Linus Torvalds 已提交
81

82 83 84 85 86 87 88 89 90 91
			/*
			 * Avoid trapping faults against the zero or KSM
			 * pages. See similar comment in change_huge_pmd.
			 */
			if (prot_numa) {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (!page || PageKsm(page))
					continue;
92 93 94 95

				/* Avoid TLB flush if possible */
				if (pte_protnone(oldpte))
					continue;
96 97
			}

98 99
			ptent = ptep_modify_prot_start(mm, addr, pte);
			ptent = pte_modify(ptent, newprot);
100 101
			if (preserve_write)
				ptent = pte_mkwrite(ptent);
102

103 104 105 106 107
			/* Avoid taking write faults for known dirty pages */
			if (dirty_accountable && pte_dirty(ptent) &&
					(pte_soft_dirty(ptent) ||
					 !(vma->vm_flags & VM_SOFTDIRTY))) {
				ptent = pte_mkwrite(ptent);
108
			}
109 110
			ptep_modify_prot_commit(mm, addr, pte, ptent);
			pages++;
111
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
112 113 114
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
115
				pte_t newpte;
116 117 118 119 120
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
121 122 123 124
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
125 126

				pages++;
127
			}
L
Linus Torvalds 已提交
128 129
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
130
	arch_leave_lazy_mmu_mode();
131
	pte_unmap_unlock(pte - 1, ptl);
132 133

	return pages;
L
Linus Torvalds 已提交
134 135
}

136 137 138
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
139 140
{
	pmd_t *pmd;
141
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
142
	unsigned long next;
143
	unsigned long pages = 0;
144
	unsigned long nr_huge_updates = 0;
145
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
146 147 148

	pmd = pmd_offset(pud, addr);
	do {
149 150
		unsigned long this_pages;

L
Linus Torvalds 已提交
151
		next = pmd_addr_end(addr, end);
152 153
		if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
				&& pmd_none_or_clear_bad(pmd))
154
			continue;
155 156 157 158 159 160 161

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

162
		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
163
			if (next - addr != HPAGE_PMD_SIZE)
164
				split_huge_pmd(vma, pmd, addr);
165 166
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
167
						newprot, prot_numa);
168 169

				if (nr_ptes) {
170 171 172 173
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
174 175

					/* huge pmd was handled */
176 177
					continue;
				}
178
			}
179
			/* fall through, the trans huge pmd just split */
180
		}
181
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
182
				 dirty_accountable, prot_numa);
183
		pages += this_pages;
L
Linus Torvalds 已提交
184
	} while (pmd++, addr = next, addr != end);
185

186 187 188
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

189 190
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
191
	return pages;
L
Linus Torvalds 已提交
192 193
}

194 195 196
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
197 198 199
{
	pud_t *pud;
	unsigned long next;
200
	unsigned long pages = 0;
L
Linus Torvalds 已提交
201 202 203 204 205 206

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
207
		pages += change_pmd_range(vma, pud, addr, next, newprot,
208
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
209
	} while (pud++, addr = next, addr != end);
210 211

	return pages;
L
Linus Torvalds 已提交
212 213
}

214
static unsigned long change_protection_range(struct vm_area_struct *vma,
215
		unsigned long addr, unsigned long end, pgprot_t newprot,
216
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
217 218 219 220 221
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
222
	unsigned long pages = 0;
L
Linus Torvalds 已提交
223 224 225 226

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
227
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
228 229 230 231
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
232
		pages += change_pud_range(vma, pgd, addr, next, newprot,
233
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
234
	} while (pgd++, addr = next, addr != end);
235

236 237 238
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
239
	clear_tlb_flush_pending(mm);
240 241 242 243 244 245

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
246
		       int dirty_accountable, int prot_numa)
247 248 249 250 251 252
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
253
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
254 255

	return pages;
L
Linus Torvalds 已提交
256 257
}

258
int
L
Linus Torvalds 已提交
259 260 261 262 263 264 265 266 267
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
268
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
269 270 271 272 273 274 275 276 277

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
278 279
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
280 281
	 */
	if (newflags & VM_WRITE) {
282 283 284 285
		/* Check space limits when area turns into data. */
		if (!may_expand_vm(mm, newflags, nrpages) &&
				may_expand_vm(mm, oldflags, nrpages))
			return -ENOMEM;
286
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
287
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
288
			charged = nrpages;
289
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298 299
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
300 301
			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
			   vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
327 328
	dirty_accountable = vma_wants_writenotify(vma);
	vma_set_page_prot(vma);
329

330 331
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
332

333 334 335 336 337 338 339 340 341
	/*
	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
	 * fault on access.
	 */
	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
			(newflags & VM_WRITE)) {
		populate_vma_page_range(vma, start, end, NULL);
	}

342 343
	vm_stat_account(mm, oldflags, -nrpages);
	vm_stat_account(mm, newflags, nrpages);
344
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
345 346 347 348 349 350 351
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

352 353
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
371
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
372 373 374 375 376 377
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
378
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
379 380 381 382 383 384
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

385
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
386 387 388
	error = -ENOMEM;
	if (!vma)
		goto out;
389
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
390 391 392 393 394 395 396
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
397
	} else {
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

413
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
414

415 416
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
417

418 419
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}