mprotect.c 10.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
26 27 28 29 30
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

31 32 33 34 35 36 37
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

38
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39
		unsigned long addr, unsigned long end, pgprot_t newprot,
40
		int dirty_accountable, int prot_numa, bool *ret_all_same_node)
L
Linus Torvalds 已提交
41
{
42
	struct mm_struct *mm = vma->vm_mm;
43
	pte_t *pte, oldpte;
44
	spinlock_t *ptl;
45
	unsigned long pages = 0;
46 47
	bool all_same_node = true;
	int last_nid = -1;
L
Linus Torvalds 已提交
48

49
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
50
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
51
	do {
52 53
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
54
			pte_t ptent;
55
			bool updated = false;
L
Linus Torvalds 已提交
56

57
			ptent = ptep_modify_prot_start(mm, addr, pte);
58 59 60 61 62 63 64 65
			if (!prot_numa) {
				ptent = pte_modify(ptent, newprot);
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (page) {
66 67 68 69 70 71
					int this_nid = page_to_nid(page);
					if (last_nid == -1)
						last_nid = this_nid;
					if (last_nid != this_nid)
						all_same_node = false;

72 73 74 75 76 77 78 79
					/* only check non-shared pages */
					if (!pte_numa(oldpte) &&
					    page_mapcount(page) == 1) {
						ptent = pte_mknuma(ptent);
						updated = true;
					}
				}
			}
80

81 82 83 84
			/*
			 * Avoid taking write faults for pages we know to be
			 * dirty.
			 */
85
			if (dirty_accountable && pte_dirty(ptent)) {
86
				ptent = pte_mkwrite(ptent);
87 88
				updated = true;
			}
89

90 91
			if (updated)
				pages++;
92
			ptep_modify_prot_commit(mm, addr, pte, ptent);
93
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
94 95 96 97 98 99 100 101 102 103
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
				set_pte_at(mm, addr, pte,
					swp_entry_to_pte(entry));
104 105

				pages++;
106
			}
L
Linus Torvalds 已提交
107 108
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
109
	arch_leave_lazy_mmu_mode();
110
	pte_unmap_unlock(pte - 1, ptl);
111

112
	*ret_all_same_node = all_same_node;
113
	return pages;
L
Linus Torvalds 已提交
114 115
}

116 117
#ifdef CONFIG_NUMA_BALANCING
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
118
				       pmd_t *pmd)
119 120 121 122 123 124 125
{
	spin_lock(&mm->page_table_lock);
	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
	spin_unlock(&mm->page_table_lock);
}
#else
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
126
				       pmd_t *pmd)
127 128
{
	BUG();
L
Linus Torvalds 已提交
129
}
130
#endif /* CONFIG_NUMA_BALANCING */
L
Linus Torvalds 已提交
131

132 133 134
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
135 136 137
{
	pmd_t *pmd;
	unsigned long next;
138
	unsigned long pages = 0;
139
	bool all_same_node;
L
Linus Torvalds 已提交
140 141 142 143

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
144 145
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
146
				split_huge_page_pmd(vma, addr, pmd);
147 148 149 150 151 152 153 154 155 156
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
						newprot, prot_numa);

				if (nr_ptes) {
					if (nr_ptes == HPAGE_PMD_NR)
						pages++;

					continue;
				}
157
			}
158 159
			/* fall through */
		}
L
Linus Torvalds 已提交
160 161
		if (pmd_none_or_clear_bad(pmd))
			continue;
162
		pages += change_pte_range(vma, pmd, addr, next, newprot,
163
				 dirty_accountable, prot_numa, &all_same_node);
164

165 166 167 168 169 170 171
		/*
		 * If we are changing protections for NUMA hinting faults then
		 * set pmd_numa if the examined pages were all on the same
		 * node. This allows a regular PMD to be handled as one fault
		 * and effectively batches the taking of the PTL
		 */
		if (prot_numa && all_same_node)
172
			change_pmd_protnuma(vma->vm_mm, addr, pmd);
L
Linus Torvalds 已提交
173
	} while (pmd++, addr = next, addr != end);
174 175

	return pages;
L
Linus Torvalds 已提交
176 177
}

178 179 180
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
181 182 183
{
	pud_t *pud;
	unsigned long next;
184
	unsigned long pages = 0;
L
Linus Torvalds 已提交
185 186 187 188 189 190

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
191
		pages += change_pmd_range(vma, pud, addr, next, newprot,
192
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
193
	} while (pud++, addr = next, addr != end);
194 195

	return pages;
L
Linus Torvalds 已提交
196 197
}

198
static unsigned long change_protection_range(struct vm_area_struct *vma,
199
		unsigned long addr, unsigned long end, pgprot_t newprot,
200
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
201 202 203 204 205
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
206
	unsigned long pages = 0;
L
Linus Torvalds 已提交
207 208 209 210 211 212 213 214

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
215
		pages += change_pud_range(vma, pgd, addr, next, newprot,
216
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
217
	} while (pgd++, addr = next, addr != end);
218

219 220 221
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
222 223 224 225 226 227

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
228
		       int dirty_accountable, int prot_numa)
229 230 231 232 233 234 235 236
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
237
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
238 239 240
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
L
Linus Torvalds 已提交
241 242
}

243
int
L
Linus Torvalds 已提交
244 245 246 247 248 249 250 251 252
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
253
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
254 255 256 257 258 259 260 261 262

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
263 264
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
265 266
	 */
	if (newflags & VM_WRITE) {
267
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
268
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
269
			charged = nrpages;
270
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
307 308 309
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

310
	if (vma_wants_writenotify(vma)) {
311
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
312 313
		dirty_accountable = 1;
	}
314

315 316
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
317

318 319
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
320
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
321 322 323 324 325 326 327
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

328 329
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
347
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
348 349 350 351 352 353
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
354
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
355 356 357 358 359 360
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

361
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
362 363 364
	error = -ENOMEM;
	if (!vma)
		goto out;
365
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
366 367 368 369 370 371 372
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
373
	} else {
L
Linus Torvalds 已提交
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

389
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
390

391 392
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
393

394 395
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}