mprotect.c 10.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

60
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
61
		unsigned long addr, unsigned long end, pgprot_t newprot,
62
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
63
{
64
	struct mm_struct *mm = vma->vm_mm;
65
	pte_t *pte, oldpte;
66
	spinlock_t *ptl;
67
	unsigned long pages = 0;
L
Linus Torvalds 已提交
68

69 70 71 72
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

73
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
74
	do {
75 76
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
77 78
			pte_t ptent;

79 80 81 82 83 84 85 86 87 88
			/*
			 * Avoid trapping faults against the zero or KSM
			 * pages. See similar comment in change_huge_pmd.
			 */
			if (prot_numa) {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (!page || PageKsm(page))
					continue;
89 90 91 92

				/* Avoid TLB flush if possible */
				if (pte_protnone(oldpte))
					continue;
93 94
			}

95 96
			ptent = ptep_modify_prot_start(mm, addr, pte);
			ptent = pte_modify(ptent, newprot);
97

98 99 100 101 102
			/* Avoid taking write faults for known dirty pages */
			if (dirty_accountable && pte_dirty(ptent) &&
					(pte_soft_dirty(ptent) ||
					 !(vma->vm_flags & VM_SOFTDIRTY))) {
				ptent = pte_mkwrite(ptent);
103
			}
104 105
			ptep_modify_prot_commit(mm, addr, pte, ptent);
			pages++;
106
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
107 108 109
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
110
				pte_t newpte;
111 112 113 114 115
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
116 117 118 119
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
120 121

				pages++;
122
			}
L
Linus Torvalds 已提交
123 124
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
125
	arch_leave_lazy_mmu_mode();
126
	pte_unmap_unlock(pte - 1, ptl);
127 128

	return pages;
L
Linus Torvalds 已提交
129 130
}

131 132 133
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
134 135
{
	pmd_t *pmd;
136
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
137
	unsigned long next;
138
	unsigned long pages = 0;
139
	unsigned long nr_huge_updates = 0;
140
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
141 142 143

	pmd = pmd_offset(pud, addr);
	do {
144 145
		unsigned long this_pages;

L
Linus Torvalds 已提交
146
		next = pmd_addr_end(addr, end);
147 148
		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
			continue;
149 150 151 152 153 154 155

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

156 157
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
158
				split_huge_page_pmd(vma, addr, pmd);
159 160
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
161
						newprot, prot_numa);
162 163

				if (nr_ptes) {
164 165 166 167
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
168 169

					/* huge pmd was handled */
170 171
					continue;
				}
172
			}
173
			/* fall through, the trans huge pmd just split */
174
		}
175
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
176
				 dirty_accountable, prot_numa);
177
		pages += this_pages;
L
Linus Torvalds 已提交
178
	} while (pmd++, addr = next, addr != end);
179

180 181 182
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

183 184
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
185
	return pages;
L
Linus Torvalds 已提交
186 187
}

188 189 190
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
191 192 193
{
	pud_t *pud;
	unsigned long next;
194
	unsigned long pages = 0;
L
Linus Torvalds 已提交
195 196 197 198 199 200

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
201
		pages += change_pmd_range(vma, pud, addr, next, newprot,
202
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
203
	} while (pud++, addr = next, addr != end);
204 205

	return pages;
L
Linus Torvalds 已提交
206 207
}

208
static unsigned long change_protection_range(struct vm_area_struct *vma,
209
		unsigned long addr, unsigned long end, pgprot_t newprot,
210
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
211 212 213 214 215
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
216
	unsigned long pages = 0;
L
Linus Torvalds 已提交
217 218 219 220

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
221
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
222 223 224 225
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
226
		pages += change_pud_range(vma, pgd, addr, next, newprot,
227
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
228
	} while (pgd++, addr = next, addr != end);
229

230 231 232
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
233
	clear_tlb_flush_pending(mm);
234 235 236 237 238 239

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
240
		       int dirty_accountable, int prot_numa)
241 242 243 244 245 246
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
247
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
248 249

	return pages;
L
Linus Torvalds 已提交
250 251
}

252
int
L
Linus Torvalds 已提交
253 254 255 256 257 258 259 260 261
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
262
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
263 264 265 266 267 268 269 270 271

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
272 273
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
274 275
	 */
	if (newflags & VM_WRITE) {
276
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
277
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
278
			charged = nrpages;
279
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
316 317
	dirty_accountable = vma_wants_writenotify(vma);
	vma_set_page_prot(vma);
318

319 320
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
321

322 323
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
324
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
325 326 327 328 329 330 331
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

332 333
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
351
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
352 353 354 355 356 357
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
358
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
359 360 361 362 363 364
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

365
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
366 367 368
	error = -ENOMEM;
	if (!vma)
		goto out;
369
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
370 371 372 373 374 375 376
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
377
	} else {
L
Linus Torvalds 已提交
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

393
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
394

395 396
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
397

398 399
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}