mprotect.c 9.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
26 27 28 29 30
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

31 32 33 34 35 36 37
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

38
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39
		unsigned long addr, unsigned long end, pgprot_t newprot,
40
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
41
{
42
	struct mm_struct *mm = vma->vm_mm;
43
	pte_t *pte, oldpte;
44
	spinlock_t *ptl;
45
	unsigned long pages = 0;
L
Linus Torvalds 已提交
46

47
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
48
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
49
	do {
50 51
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
52
			pte_t ptent;
53
			bool updated = false;
L
Linus Torvalds 已提交
54

55
			ptent = ptep_modify_prot_start(mm, addr, pte);
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
			if (!prot_numa) {
				ptent = pte_modify(ptent, newprot);
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (page) {
					/* only check non-shared pages */
					if (!pte_numa(oldpte) &&
					    page_mapcount(page) == 1) {
						ptent = pte_mknuma(ptent);
						updated = true;
					}
				}
			}
72

73 74 75 76
			/*
			 * Avoid taking write faults for pages we know to be
			 * dirty.
			 */
77
			if (dirty_accountable && pte_dirty(ptent)) {
78
				ptent = pte_mkwrite(ptent);
79 80 81 82 83
				updated = true;
			}

			if (updated)
				pages++;
84 85

			ptep_modify_prot_commit(mm, addr, pte, ptent);
86
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
87 88 89 90 91 92 93 94 95 96 97
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
				set_pte_at(mm, addr, pte,
					swp_entry_to_pte(entry));
			}
98
			pages++;
L
Linus Torvalds 已提交
99 100
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
101
	arch_leave_lazy_mmu_mode();
102
	pte_unmap_unlock(pte - 1, ptl);
103 104

	return pages;
L
Linus Torvalds 已提交
105 106
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
#ifdef CONFIG_NUMA_BALANCING
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmd)
{
	spin_lock(&mm->page_table_lock);
	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
	spin_unlock(&mm->page_table_lock);
}
#else
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmd)
{
	BUG();
}
#endif /* CONFIG_NUMA_BALANCING */

123
static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
124
		unsigned long addr, unsigned long end, pgprot_t newprot,
125
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
126 127 128
{
	pmd_t *pmd;
	unsigned long next;
129
	unsigned long pages = 0;
L
Linus Torvalds 已提交
130 131 132 133

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
134 135 136
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
				split_huge_page_pmd(vma->vm_mm, pmd);
137
			else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) {
138
				pages += HPAGE_PMD_NR;
139
				continue;
140
			}
141 142
			/* fall through */
		}
L
Linus Torvalds 已提交
143 144
		if (pmd_none_or_clear_bad(pmd))
			continue;
145 146 147 148 149
		pages += change_pte_range(vma, pmd, addr, next, newprot,
				 dirty_accountable, prot_numa);

		if (prot_numa)
			change_pmd_protnuma(vma->vm_mm, addr, pmd);
L
Linus Torvalds 已提交
150
	} while (pmd++, addr = next, addr != end);
151 152

	return pages;
L
Linus Torvalds 已提交
153 154
}

155
static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
156
		unsigned long addr, unsigned long end, pgprot_t newprot,
157
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
158 159 160
{
	pud_t *pud;
	unsigned long next;
161
	unsigned long pages = 0;
L
Linus Torvalds 已提交
162 163 164 165 166 167

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
168
		pages += change_pmd_range(vma, pud, addr, next, newprot,
169
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
170
	} while (pud++, addr = next, addr != end);
171 172

	return pages;
L
Linus Torvalds 已提交
173 174
}

175
static unsigned long change_protection_range(struct vm_area_struct *vma,
176
		unsigned long addr, unsigned long end, pgprot_t newprot,
177
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
178 179 180 181 182
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
183
	unsigned long pages = 0;
L
Linus Torvalds 已提交
184 185 186 187 188 189 190 191

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
192
		pages += change_pud_range(vma, pgd, addr, next, newprot,
193
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
194
	} while (pgd++, addr = next, addr != end);
195

196 197 198
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
199 200 201 202 203 204

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
205
		       int dirty_accountable, int prot_numa)
206 207 208 209 210 211 212 213
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
214
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
215 216 217
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
L
Linus Torvalds 已提交
218 219
}

220
int
L
Linus Torvalds 已提交
221 222 223 224 225 226 227 228 229
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
230
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
231 232 233 234 235 236 237 238 239

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
240 241
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
242 243
	 */
	if (newflags & VM_WRITE) {
244
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
245
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
246
			charged = nrpages;
247
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
284 285 286
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

287
	if (vma_wants_writenotify(vma)) {
288
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
289 290
		dirty_accountable = 1;
	}
291

292
	change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0);
293

294 295
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
296
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
297 298 299 300 301 302 303
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

304 305
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
323
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
324 325 326 327 328 329
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
330
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
331 332 333 334 335 336
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

337
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
338 339 340
	error = -ENOMEM;
	if (!vma)
		goto out;
341
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
	}
	else {
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */

		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));

370 371
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}