mprotect.c 9.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
26
#include <linux/ksm.h>
L
Linus Torvalds 已提交
27 28 29 30 31
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

32 33 34 35 36 37 38
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

39
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
40
		unsigned long addr, unsigned long end, pgprot_t newprot,
41
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
42
{
43
	struct mm_struct *mm = vma->vm_mm;
44
	pte_t *pte, oldpte;
45
	spinlock_t *ptl;
46
	unsigned long pages = 0;
L
Linus Torvalds 已提交
47

48
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
49
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
50
	do {
51 52
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
53
			pte_t ptent;
54
			bool updated = false;
L
Linus Torvalds 已提交
55

56
			if (!prot_numa) {
57
				ptent = ptep_modify_prot_start(mm, addr, pte);
58 59
				if (pte_numa(ptent))
					ptent = pte_mknonnuma(ptent);
60
				ptent = pte_modify(ptent, newprot);
61 62 63 64 65 66 67
				/*
				 * Avoid taking write faults for pages we
				 * know to be dirty.
				 */
				if (dirty_accountable && pte_dirty(ptent))
					ptent = pte_mkwrite(ptent);
				ptep_modify_prot_commit(mm, addr, pte, ptent);
68 69 70 71 72
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
73
				if (page && !PageKsm(page)) {
74
					if (!pte_numa(oldpte)) {
75
						ptep_set_numa(mm, addr, pte);
76 77 78 79 80 81
						updated = true;
					}
				}
			}
			if (updated)
				pages++;
82
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
83 84 85
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
86
				pte_t newpte;
87 88 89 90 91
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
92 93 94 95
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
96 97

				pages++;
98
			}
L
Linus Torvalds 已提交
99 100
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
101
	arch_leave_lazy_mmu_mode();
102
	pte_unmap_unlock(pte - 1, ptl);
103 104

	return pages;
L
Linus Torvalds 已提交
105 106
}

107 108 109
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
110 111 112
{
	pmd_t *pmd;
	unsigned long next;
113
	unsigned long pages = 0;
114
	unsigned long nr_huge_updates = 0;
L
Linus Torvalds 已提交
115 116 117

	pmd = pmd_offset(pud, addr);
	do {
118 119
		unsigned long this_pages;

L
Linus Torvalds 已提交
120
		next = pmd_addr_end(addr, end);
121 122
		if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd))
			continue;
123 124
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
125
				split_huge_page_pmd(vma, addr, pmd);
126 127 128 129 130
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
						newprot, prot_numa);

				if (nr_ptes) {
131 132 133 134
					if (nr_ptes == HPAGE_PMD_NR) {
						pages += HPAGE_PMD_NR;
						nr_huge_updates++;
					}
135 136
					continue;
				}
137
			}
138
			/* fall through, the trans huge pmd just split */
139
		}
140
		VM_BUG_ON(pmd_trans_huge(*pmd));
141
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
142
				 dirty_accountable, prot_numa);
143
		pages += this_pages;
L
Linus Torvalds 已提交
144
	} while (pmd++, addr = next, addr != end);
145

146 147
	if (nr_huge_updates)
		count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
148
	return pages;
L
Linus Torvalds 已提交
149 150
}

151 152 153
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
154 155 156
{
	pud_t *pud;
	unsigned long next;
157
	unsigned long pages = 0;
L
Linus Torvalds 已提交
158 159 160 161 162 163

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
164
		pages += change_pmd_range(vma, pud, addr, next, newprot,
165
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
166
	} while (pud++, addr = next, addr != end);
167 168

	return pages;
L
Linus Torvalds 已提交
169 170
}

171
static unsigned long change_protection_range(struct vm_area_struct *vma,
172
		unsigned long addr, unsigned long end, pgprot_t newprot,
173
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
174 175 176 177 178
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
179
	unsigned long pages = 0;
L
Linus Torvalds 已提交
180 181 182 183

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
184
	set_tlb_flush_pending(mm);
L
Linus Torvalds 已提交
185 186 187 188
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
189
		pages += change_pud_range(vma, pgd, addr, next, newprot,
190
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
191
	} while (pgd++, addr = next, addr != end);
192

193 194 195
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
196
	clear_tlb_flush_pending(mm);
197 198 199 200 201 202

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
203
		       int dirty_accountable, int prot_numa)
204 205 206 207 208 209 210 211
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
212
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
213 214 215
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
L
Linus Torvalds 已提交
216 217
}

218
int
L
Linus Torvalds 已提交
219 220 221 222 223 224 225 226 227
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
228
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
229 230 231 232 233 234 235 236 237

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
238 239
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
240 241
	 */
	if (newflags & VM_WRITE) {
242
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
243
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
244
			charged = nrpages;
245
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
282 283 284
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

285
	if (vma_wants_writenotify(vma)) {
286
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
287 288
		dirty_accountable = 1;
	}
289

290 291
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
292

293 294
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
295
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
296 297 298 299 300 301 302
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

303 304
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
322
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
323 324 325 326 327 328
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
329
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
330 331 332 333 334 335
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

336
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
337 338 339
	error = -ENOMEM;
	if (!vma)
		goto out;
340
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
341 342 343 344 345 346 347
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
348
	} else {
L
Linus Torvalds 已提交
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

364
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
365

366 367
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
368

369 370
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}