mprotect.c 10.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
/*
 * For a prot_numa update we only hold mmap_sem for read so there is a
 * potential race with faulting where a pmd was temporarily none. This
 * function checks for a transhuge pmd under the appropriate lock. It
 * returns a pte if it was successfully locked or NULL if it raced with
 * a transhuge insertion.
 */
static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, int prot_numa, spinlock_t **ptl)
{
	pte_t *pte;
	spinlock_t *pmdl;

	/* !prot_numa is protected by mmap_sem held for write */
	if (!prot_numa)
		return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);

	pmdl = pmd_lock(vma->vm_mm, pmd);
	if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) {
		spin_unlock(pmdl);
		return NULL;
	}

	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
	spin_unlock(pmdl);
	return pte;
}

60
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
61
		unsigned long addr, unsigned long end, pgprot_t newprot,
62
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
63
{
64
	struct mm_struct *mm = vma->vm_mm;
65
	pte_t *pte, oldpte;
66
	spinlock_t *ptl;
67
	unsigned long pages = 0;
L
Linus Torvalds 已提交
68

69 70 71 72
	pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
	if (!pte)
		return 0;

73
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
74
	do {
75 76
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
77
			pte_t ptent;
78
			bool updated = false;
L
Linus Torvalds 已提交
79

80
			if (!prot_numa) {
81
				ptent = ptep_modify_prot_start(mm, addr, pte);
82 83
				if (pte_numa(ptent))
					ptent = pte_mknonnuma(ptent);
84
				ptent = pte_modify(ptent, newprot);
85 86 87 88
				/*
				 * Avoid taking write faults for pages we
				 * know to be dirty.
				 */
89 90 91
				if (dirty_accountable && pte_dirty(ptent) &&
				    (pte_soft_dirty(ptent) ||
				     !(vma->vm_flags & VM_SOFTDIRTY)))
92 93
					ptent = pte_mkwrite(ptent);
				ptep_modify_prot_commit(mm, addr, pte, ptent);
94 95 96 97 98
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
99
				if (page && !PageKsm(page)) {
100
					if (!pte_numa(oldpte)) {
101
						ptep_set_numa(mm, addr, pte);
102 103 104 105 106 107
						updated = true;
					}
				}
			}
			if (updated)
				pages++;
108
		} else if (IS_ENABLED(CONFIG_MIGRATION)) {
109 110 111
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
112
				pte_t newpte;
113 114 115 116 117
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
118 119 120 121
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
122 123

				pages++;
124
			}
L
Linus Torvalds 已提交
125 126
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
127
	arch_leave_lazy_mmu_mode();
128
	pte_unmap_unlock(pte - 1, ptl);
129 130

	return pages;
L
Linus Torvalds 已提交
131 132
}

133 134 135
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
136 137
{
	pmd_t *pmd;
138
	struct mm_struct *mm = vma->vm_mm;
L
Linus Torvalds 已提交
139
	unsigned long next;
140
	unsigned long pages = 0;
141
	unsigned long nr_huge_updates = 0;
142
	unsigned long mni_start = 0;
L
Linus Torvalds 已提交
143 144 145

	pmd = pmd_offset(pud, addr);
	do {
146 147
		unsigned long this_pages;

L
Linus Torvalds 已提交
148
		next = pmd_addr_end(addr, end);
149 150
		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
			continue;
151 152 153 154 155 156 157

		/* invoke the mmu notifier if the pmd is populated */
		if (!mni_start) {
			mni_start = addr;
			mmu_notifier_invalidate_range_start(mm, mni_start, end);
		}

158 159
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
160
				split_huge_page_pmd(vma, addr, pmd);
161 162 163 164 165
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
						newprot, prot_numa);

				if (nr_ptes) {
166 167 168 169
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
170 171

					/* huge pmd was handled */
172 173
					continue;
				}
174
			}
175
			/* fall through, the trans huge pmd just split */
176
		}
177
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
178
				 dirty_accountable, prot_numa);
179
		pages += this_pages;
L
Linus Torvalds 已提交
180
	} while (pmd++, addr = next, addr != end);
181

182 183 184
	if (mni_start)
		mmu_notifier_invalidate_range_end(mm, mni_start, end);

185 186
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
187
	return pages;
L
Linus Torvalds 已提交
188 189
}

190 191 192
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
193 194 195
{
	pud_t *pud;
	unsigned long next;
196
	unsigned long pages = 0;
L
Linus Torvalds 已提交
197 198 199 200 201 202

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
203
		pages += change_pmd_range(vma, pud, addr, next, newprot,
204
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
205
	} while (pud++, addr = next, addr != end);
206 207

	return pages;
L
Linus Torvalds 已提交
208 209
}

210
static unsigned long change_protection_range(struct vm_area_struct *vma,
211
		unsigned long addr, unsigned long end, pgprot_t newprot,
212
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
213 214 215 216 217
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
218
	unsigned long pages = 0;
L
Linus Torvalds 已提交
219 220 221 222

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
223
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
224 225 226 227
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
228
		pages += change_pud_range(vma, pgd, addr, next, newprot,
229
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
230
	} while (pgd++, addr = next, addr != end);
231

232 233 234
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
235
	clear_tlb_flush_pending(mm);
236 237 238 239 240 241

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
242
		       int dirty_accountable, int prot_numa)
243 244 245 246 247 248
{
	unsigned long pages;

	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
249
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
250 251

	return pages;
L
Linus Torvalds 已提交
252 253
}

254
int
L
Linus Torvalds 已提交
255 256 257 258 259 260 261 262 263
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
264
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272 273

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
274 275
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
276 277
	 */
	if (newflags & VM_WRITE) {
278
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
279
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
280
			charged = nrpages;
281
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
318 319
	dirty_accountable = vma_wants_writenotify(vma);
	vma_set_page_prot(vma);
320

321 322
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
323

324 325
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
326
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
327 328 329 330 331 332 333
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

334 335
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
353
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
354 355 356 357 358 359
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
360
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
361 362 363 364 365 366
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

367
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
368 369 370
	error = -ENOMEM;
	if (!vma)
		goto out;
371
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
372 373 374 375 376 377 378
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
379
	} else {
L
Linus Torvalds 已提交
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

395
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
396

397 398
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
399

400 401
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}