mprotect.c 10.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

60
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
61
		unsigned long addr, unsigned long end, pgprot_t newprot,
62
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
63
{
64
	struct mm_struct *mm = vma->vm_mm;
65
	pte_t *pte, oldpte;
66
	spinlock_t *ptl;
67
	unsigned long pages = 0;
L
Linus Torvalds 已提交
68

69 70 71 72
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

73
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
74
	do {
75 76
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
77
			pte_t ptent;
78
			bool preserve_write = prot_numa && pte_write(oldpte);
L
Linus Torvalds 已提交
79

80 81 82 83 84 85 86 87 88 89
			/*
			 * Avoid trapping faults against the zero or KSM
			 * pages. See similar comment in change_huge_pmd.
			 */
			if (prot_numa) {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (!page || PageKsm(page))
					continue;
90 91 92 93

				/* Avoid TLB flush if possible */
				if (pte_protnone(oldpte))
					continue;
94 95
			}

96 97
			ptent = ptep_modify_prot_start(mm, addr, pte);
			ptent = pte_modify(ptent, newprot);
98 99
			if (preserve_write)
				ptent = pte_mkwrite(ptent);
100

101 102 103 104 105
			/* Avoid taking write faults for known dirty pages */
			if (dirty_accountable && pte_dirty(ptent) &&
					(pte_soft_dirty(ptent) ||
					 !(vma->vm_flags & VM_SOFTDIRTY))) {
				ptent = pte_mkwrite(ptent);
106
			}
107 108
			ptep_modify_prot_commit(mm, addr, pte, ptent);
			pages++;
109
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
110 111 112
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
113
				pte_t newpte;
114 115 116 117 118
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
119 120 121 122
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
123 124

				pages++;
125
			}
L
Linus Torvalds 已提交
126 127
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
128
	arch_leave_lazy_mmu_mode();
129
	pte_unmap_unlock(pte - 1, ptl);
130 131

	return pages;
L
Linus Torvalds 已提交
132 133
}

134 135 136
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
137 138
{
	pmd_t *pmd;
139
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
140
	unsigned long next;
141
	unsigned long pages = 0;
142
	unsigned long nr_huge_updates = 0;
143
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
144 145 146

	pmd = pmd_offset(pud, addr);
	do {
147 148
		unsigned long this_pages;

L
Linus Torvalds 已提交
149
		next = pmd_addr_end(addr, end);
150 151
		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
			continue;
152 153 154 155 156 157 158

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

159 160
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
161
				split_huge_page_pmd(vma, addr, pmd);
162 163
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
164
						newprot, prot_numa);
165 166

				if (nr_ptes) {
167 168 169 170
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
171 172

					/* huge pmd was handled */
173 174
					continue;
				}
175
			}
176
			/* fall through, the trans huge pmd just split */
177
		}
178
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
179
				 dirty_accountable, prot_numa);
180
		pages += this_pages;
L
Linus Torvalds 已提交
181
	} while (pmd++, addr = next, addr != end);
182

183 184 185
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

186 187
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
188
	return pages;
L
Linus Torvalds 已提交
189 190
}

191 192 193
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
194 195 196
{
	pud_t *pud;
	unsigned long next;
197
	unsigned long pages = 0;
L
Linus Torvalds 已提交
198 199 200 201 202 203

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
204
		pages += change_pmd_range(vma, pud, addr, next, newprot,
205
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
206
	} while (pud++, addr = next, addr != end);
207 208

	return pages;
L
Linus Torvalds 已提交
209 210
}

211
static unsigned long change_protection_range(struct vm_area_struct *vma,
212
		unsigned long addr, unsigned long end, pgprot_t newprot,
213
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
214 215 216 217 218
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
219
	unsigned long pages = 0;
L
Linus Torvalds 已提交
220 221 222 223

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
224
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
225 226 227 228
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
229
		pages += change_pud_range(vma, pgd, addr, next, newprot,
230
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
231
	} while (pgd++, addr = next, addr != end);
232

233 234 235
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
236
	clear_tlb_flush_pending(mm);
237 238 239 240 241 242

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
243
		       int dirty_accountable, int prot_numa)
244 245 246 247 248 249
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
250
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
251 252

	return pages;
L
Linus Torvalds 已提交
253 254
}

255
int
L
Linus Torvalds 已提交
256 257 258 259 260 261 262 263 264
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
265
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
266 267 268 269 270 271 272 273 274

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
275 276
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
277 278
	 */
	if (newflags & VM_WRITE) {
279
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
280
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
281
			charged = nrpages;
282
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
319 320
	dirty_accountable = vma_wants_writenotify(vma);
	vma_set_page_prot(vma);
321

322 323
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
324

325 326
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
327
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
328 329 330 331 332 333 334
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

335 336
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
354
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
355 356 357 358 359 360
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
361
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
362 363 364 365 366 367
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

368
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
369 370 371
	error = -ENOMEM;
	if (!vma)
		goto out;
372
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
373 374 375 376 377 378 379
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
380
	} else {
L
Linus Torvalds 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

396
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
397

398 399
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
400

401 402
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}