mprotect.c 10.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33 34 35 36 37 38
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

67
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
68
		unsigned long addr, unsigned long end, pgprot_t newprot,
69
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
70
{
71
	struct mm_struct *mm = vma->vm_mm;
72
	pte_t *pte, oldpte;
73
	spinlock_t *ptl;
74
	unsigned long pages = 0;
L
Linus Torvalds 已提交
75

76 77 78 79
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

80
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
81
	do {
82 83
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
84
			pte_t ptent;
85
			bool updated = false;
L
Linus Torvalds 已提交
86

87
			if (!prot_numa) {
88
				ptent = ptep_modify_prot_start(mm, addr, pte);
89 90
				if (pte_numa(ptent))
					ptent = pte_mknonnuma(ptent);
91
				ptent = pte_modify(ptent, newprot);
92 93 94 95 96 97 98
				/*
				 * Avoid taking write faults for pages we
				 * know to be dirty.
				 */
				if (dirty_accountable && pte_dirty(ptent))
					ptent = pte_mkwrite(ptent);
				ptep_modify_prot_commit(mm, addr, pte, ptent);
99 100 101 102 103
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
104
				if (page && !PageKsm(page)) {
105
					if (!pte_numa(oldpte)) {
106
						ptep_set_numa(mm, addr, pte);
107 108 109 110 111 112
						updated = true;
					}
				}
			}
			if (updated)
				pages++;
113
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
114 115 116
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
117
				pte_t newpte;
118 119 120 121 122
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
123 124 125 126
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
127 128

				pages++;
129
			}
L
Linus Torvalds 已提交
130 131
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
132
	arch_leave_lazy_mmu_mode();
133
	pte_unmap_unlock(pte - 1, ptl);
134 135

	return pages;
L
Linus Torvalds 已提交
136 137
}

138 139 140
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
141 142
{
	pmd_t *pmd;
143
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
144
	unsigned long next;
145
	unsigned long pages = 0;
146
	unsigned long nr_huge_updates = 0;
147
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
148 149 150

	pmd = pmd_offset(pud, addr);
	do {
151 152
		unsigned long this_pages;

L
Linus Torvalds 已提交
153
		next = pmd_addr_end(addr, end);
154 155
		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
			continue;
156 157 158 159 160 161 162

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

163 164
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
165
				split_huge_page_pmd(vma, addr, pmd);
166 167 168 169 170
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
						newprot, prot_numa);

				if (nr_ptes) {
171 172 173 174
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
175 176

					/* huge pmd was handled */
177 178
					continue;
				}
179
			}
180
			/* fall through, the trans huge pmd just split */
181
		}
182
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
183
				 dirty_accountable, prot_numa);
184
		pages += this_pages;
L
Linus Torvalds 已提交
185
	} while (pmd++, addr = next, addr != end);
186

187 188 189
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

190 191
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
192
	return pages;
L
Linus Torvalds 已提交
193 194
}

195 196 197
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
198 199 200
{
	pud_t *pud;
	unsigned long next;
201
	unsigned long pages = 0;
L
Linus Torvalds 已提交
202 203 204 205 206 207

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
208
		pages += change_pmd_range(vma, pud, addr, next, newprot,
209
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
210
	} while (pud++, addr = next, addr != end);
211 212

	return pages;
L
Linus Torvalds 已提交
213 214
}

215
static unsigned long change_protection_range(struct vm_area_struct *vma,
216
		unsigned long addr, unsigned long end, pgprot_t newprot,
217
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
218 219 220 221 222
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
223
	unsigned long pages = 0;
L
Linus Torvalds 已提交
224 225 226 227

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
228
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
229 230 231 232
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
233
		pages += change_pud_range(vma, pgd, addr, next, newprot,
234
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
235
	} while (pgd++, addr = next, addr != end);
236

237 238 239
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
240
	clear_tlb_flush_pending(mm);
241 242 243 244 245 246

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
247
		       int dirty_accountable, int prot_numa)
248 249 250 251 252 253
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
254
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
255 256

	return pages;
L
Linus Torvalds 已提交
257 258
}

259
int
L
Linus Torvalds 已提交
260 261 262 263 264 265 266 267 268
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
269
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
270 271 272 273 274 275 276 277 278

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
279 280
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
281 282
	 */
	if (newflags & VM_WRITE) {
283
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
284
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
285
			charged = nrpages;
286
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
323 324 325
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

326
	if (vma_wants_writenotify(vma)) {
327
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
328 329
		dirty_accountable = 1;
	}
330

331 332
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
333

334 335
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
336
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
337 338 339 340 341 342 343
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

344 345
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
363
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
364 365 366 367 368 369
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
370
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
371 372 373 374 375 376
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

377
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
378 379 380
	error = -ENOMEM;
	if (!vma)
		goto out;
381
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
382 383 384 385 386 387 388
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
389
	} else {
L
Linus Torvalds 已提交
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

405
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
406

407 408
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
409

410 411
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}