mprotect.c 11.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33
#include "internal.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

62
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
63
		unsigned long addr, unsigned long end, pgprot_t newprot,
64
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
65
{
66
	struct mm_struct *mm = vma->vm_mm;
67
	pte_t *pte, oldpte;
68
	spinlock_t *ptl;
69
	unsigned long pages = 0;
L
Linus Torvalds 已提交
70

71 72 73 74
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

75
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
76
	do {
77 78
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
79
			pte_t ptent;
80
			bool preserve_write = prot_numa && pte_write(oldpte);
L
Linus Torvalds 已提交
81

82 83 84 85 86 87 88 89 90 91
			/*
			 * Avoid trapping faults against the zero or KSM
			 * pages. See similar comment in change_huge_pmd.
			 */
			if (prot_numa) {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (!page || PageKsm(page))
					continue;
92 93 94 95

				/* Avoid TLB flush if possible */
				if (pte_protnone(oldpte))
					continue;
96 97
			}

98 99
			ptent = ptep_modify_prot_start(mm, addr, pte);
			ptent = pte_modify(ptent, newprot);
100 101
			if (preserve_write)
				ptent = pte_mkwrite(ptent);
102

103 104 105 106 107
			/* Avoid taking write faults for known dirty pages */
			if (dirty_accountable && pte_dirty(ptent) &&
					(pte_soft_dirty(ptent) ||
					 !(vma->vm_flags & VM_SOFTDIRTY))) {
				ptent = pte_mkwrite(ptent);
108
			}
109 110
			ptep_modify_prot_commit(mm, addr, pte, ptent);
			pages++;
111
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
112 113 114
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
115
				pte_t newpte;
116 117 118 119 120
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
121 122 123 124
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
125 126

				pages++;
127
			}
L
Linus Torvalds 已提交
128 129
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
130
	arch_leave_lazy_mmu_mode();
131
	pte_unmap_unlock(pte - 1, ptl);
132 133

	return pages;
L
Linus Torvalds 已提交
134 135
}

136 137 138
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
139 140
{
	pmd_t *pmd;
141
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
142
	unsigned long next;
143
	unsigned long pages = 0;
144
	unsigned long nr_huge_updates = 0;
145
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
146 147 148

	pmd = pmd_offset(pud, addr);
	do {
149 150
		unsigned long this_pages;

L
Linus Torvalds 已提交
151
		next = pmd_addr_end(addr, end);
152 153
		if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
				&& pmd_none_or_clear_bad(pmd))
154
			continue;
155 156 157 158 159 160 161

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

162
		if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
163
			if (next - addr != HPAGE_PMD_SIZE) {
164
				split_huge_pmd(vma, pmd, addr);
165 166 167
				if (pmd_none(*pmd))
					continue;
			} else {
168
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
169
						newprot, prot_numa);
170 171

				if (nr_ptes) {
172 173 174 175
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
176 177

					/* huge pmd was handled */
178 179
					continue;
				}
180
			}
181
			/* fall through, the trans huge pmd just split */
182
		}
183
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
184
				 dirty_accountable, prot_numa);
185
		pages += this_pages;
L
Linus Torvalds 已提交
186
	} while (pmd++, addr = next, addr != end);
187

188 189 190
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

191 192
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
193
	return pages;
L
Linus Torvalds 已提交
194 195
}

196 197 198
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
199 200 201
{
	pud_t *pud;
	unsigned long next;
202
	unsigned long pages = 0;
L
Linus Torvalds 已提交
203 204 205 206 207 208

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
209
		pages += change_pmd_range(vma, pud, addr, next, newprot,
210
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
211
	} while (pud++, addr = next, addr != end);
212 213

	return pages;
L
Linus Torvalds 已提交
214 215
}

216
static unsigned long change_protection_range(struct vm_area_struct *vma,
217
		unsigned long addr, unsigned long end, pgprot_t newprot,
218
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
219 220 221 222 223
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
224
	unsigned long pages = 0;
L
Linus Torvalds 已提交
225 226 227 228

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
229
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
230 231 232 233
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
234
		pages += change_pud_range(vma, pgd, addr, next, newprot,
235
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
236
	} while (pgd++, addr = next, addr != end);
237

238 239 240
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
241
	clear_tlb_flush_pending(mm);
242 243 244 245 246 247

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
248
		       int dirty_accountable, int prot_numa)
249 250 251 252 253 254
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
255
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
256 257

	return pages;
L
Linus Torvalds 已提交
258 259
}

260
int
L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
270
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
271 272 273 274 275 276 277 278 279

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
280 281
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
282 283
	 */
	if (newflags & VM_WRITE) {
284 285 286 287
		/* Check space limits when area turns into data. */
		if (!may_expand_vm(mm, newflags, nrpages) &&
				may_expand_vm(mm, oldflags, nrpages))
			return -ENOMEM;
288
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
289
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
290
			charged = nrpages;
291
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
292 293 294 295 296 297 298 299 300 301
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
302 303
			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
			   vma->vm_userfaultfd_ctx);
L
Linus Torvalds 已提交
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
329 330
	dirty_accountable = vma_wants_writenotify(vma);
	vma_set_page_prot(vma);
331

332 333
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
334

335 336 337 338 339 340 341 342 343
	/*
	 * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major
	 * fault on access.
	 */
	if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED &&
			(newflags & VM_WRITE)) {
		populate_vma_page_range(vma, start, end, NULL);
	}

344 345
	vm_stat_account(mm, oldflags, -nrpages);
	vm_stat_account(mm, newflags, nrpages);
346
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
347 348 349 350 351 352 353
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

354 355
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
373
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
374 375 376 377 378 379
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
380
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
381 382
		prot |= PROT_EXEC;

383
	vm_flags = calc_vm_prot_bits(prot, 0);
L
Linus Torvalds 已提交
384 385 386

	down_write(&current->mm->mmap_sem);

387
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
388 389 390
	error = -ENOMEM;
	if (!vma)
		goto out;
391
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
392 393 394 395 396 397 398
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
399
	} else {
L
Linus Torvalds 已提交
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

415
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
416

417 418
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
419

420 421
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}