mprotect.c 9.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
26 27 28 29 30
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

31 32 33 34 35 36 37
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

38
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39
		unsigned long addr, unsigned long end, pgprot_t newprot,
40
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
41
{
42
	struct mm_struct *mm = vma->vm_mm;
43
	pte_t *pte, oldpte;
44
	spinlock_t *ptl;
45
	unsigned long pages = 0;
L
Linus Torvalds 已提交
46

47
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
48
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
49
	do {
50 51
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
52
			pte_t ptent;
53
			bool updated = false;
L
Linus Torvalds 已提交
54

55
			if (!prot_numa) {
56
				ptent = ptep_modify_prot_start(mm, addr, pte);
57 58
				if (pte_numa(ptent))
					ptent = pte_mknonnuma(ptent);
59 60 61 62 63
				ptent = pte_modify(ptent, newprot);
				updated = true;
			} else {
				struct page *page;

64
				ptent = *pte;
65 66
				page = vm_normal_page(vma, addr, oldpte);
				if (page) {
67
					if (!pte_numa(oldpte)) {
68
						ptent = pte_mknuma(ptent);
69
						set_pte_at(mm, addr, pte, ptent);
70 71 72 73
						updated = true;
					}
				}
			}
74

75 76 77 78
			/*
			 * Avoid taking write faults for pages we know to be
			 * dirty.
			 */
79
			if (dirty_accountable && pte_dirty(ptent)) {
80
				ptent = pte_mkwrite(ptent);
81 82
				updated = true;
			}
83

84 85
			if (updated)
				pages++;
86 87 88 89

			/* Only !prot_numa always clears the pte */
			if (!prot_numa)
				ptep_modify_prot_commit(mm, addr, pte, ptent);
90
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
91 92 93
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
94
				pte_t newpte;
95 96 97 98 99
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
100 101 102 103
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
104 105

				pages++;
106
			}
L
Linus Torvalds 已提交
107 108
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
109
	arch_leave_lazy_mmu_mode();
110
	pte_unmap_unlock(pte - 1, ptl);
111 112

	return pages;
L
Linus Torvalds 已提交
113 114
}

115 116 117
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
118 119 120
{
	pmd_t *pmd;
	unsigned long next;
121
	unsigned long pages = 0;
122
	unsigned long nr_huge_updates = 0;
L
Linus Torvalds 已提交
123 124 125

	pmd = pmd_offset(pud, addr);
	do {
126 127
		unsigned long this_pages;

L
Linus Torvalds 已提交
128
		next = pmd_addr_end(addr, end);
129 130
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
131
				split_huge_page_pmd(vma, addr, pmd);
132 133 134 135 136
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
						newprot, prot_numa);

				if (nr_ptes) {
137 138 139 140
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
141 142
					continue;
				}
143
			}
144 145
			/* fall through */
		}
L
Linus Torvalds 已提交
146 147
		if (pmd_none_or_clear_bad(pmd))
			continue;
148
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
149
				 dirty_accountable, prot_numa);
150
		pages += this_pages;
L
Linus Torvalds 已提交
151
	} while (pmd++, addr = next, addr != end);
152

153 154
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
155
	return pages;
L
Linus Torvalds 已提交
156 157
}

158 159 160
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
161 162 163
{
	pud_t *pud;
	unsigned long next;
164
	unsigned long pages = 0;
L
Linus Torvalds 已提交
165 166 167 168 169 170

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
171
		pages += change_pmd_range(vma, pud, addr, next, newprot,
172
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
173
	} while (pud++, addr = next, addr != end);
174 175

	return pages;
L
Linus Torvalds 已提交
176 177
}

178
static unsigned long change_protection_range(struct vm_area_struct *vma,
179
		unsigned long addr, unsigned long end, pgprot_t newprot,
180
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
181 182 183 184 185
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
186
	unsigned long pages = 0;
L
Linus Torvalds 已提交
187 188 189 190 191 192 193 194

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
195
		pages += change_pud_range(vma, pgd, addr, next, newprot,
196
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
197
	} while (pgd++, addr = next, addr != end);
198

199 200 201
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
202 203 204 205 206 207

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
208
		       int dirty_accountable, int prot_numa)
209 210 211 212 213 214 215 216
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
217
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
218 219 220
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
L
Linus Torvalds 已提交
221 222
}

223
int
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
233
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
234 235 236 237 238 239 240 241 242

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
243 244
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
245 246
	 */
	if (newflags & VM_WRITE) {
247
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
248
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
249
			charged = nrpages;
250
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
287 288 289
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

290
	if (vma_wants_writenotify(vma)) {
291
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
292 293
		dirty_accountable = 1;
	}
294

295 296
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
297

298 299
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
300
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
301 302 303 304 305 306 307
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

308 309
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
327
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
328 329 330 331 332 333
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
334
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
335 336 337 338 339 340
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

341
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
342 343 344
	error = -ENOMEM;
	if (!vma)
		goto out;
345
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
346 347 348 349 350 351 352
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
353
	} else {
L
Linus Torvalds 已提交
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

369
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
370

371 372
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
373

374 375
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}