mprotect.c 10.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
26 27 28 29 30
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

31 32 33 34 35 36 37
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

38
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39
		unsigned long addr, unsigned long end, pgprot_t newprot,
40
		int dirty_accountable, int prot_numa, bool *ret_all_same_node)
L
Linus Torvalds 已提交
41
{
42
	struct mm_struct *mm = vma->vm_mm;
43
	pte_t *pte, oldpte;
44
	spinlock_t *ptl;
45
	unsigned long pages = 0;
46 47
	bool all_same_node = true;
	int last_nid = -1;
L
Linus Torvalds 已提交
48

49
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
50
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
51
	do {
52 53
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
54
			pte_t ptent;
55
			bool updated = false;
L
Linus Torvalds 已提交
56

57
			ptent = ptep_modify_prot_start(mm, addr, pte);
58 59 60 61 62 63 64 65
			if (!prot_numa) {
				ptent = pte_modify(ptent, newprot);
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (page) {
66 67 68 69 70 71
					int this_nid = page_to_nid(page);
					if (last_nid == -1)
						last_nid = this_nid;
					if (last_nid != this_nid)
						all_same_node = false;

72 73 74 75 76 77 78 79
					/* only check non-shared pages */
					if (!pte_numa(oldpte) &&
					    page_mapcount(page) == 1) {
						ptent = pte_mknuma(ptent);
						updated = true;
					}
				}
			}
80

81 82 83 84
			/*
			 * Avoid taking write faults for pages we know to be
			 * dirty.
			 */
85
			if (dirty_accountable && pte_dirty(ptent)) {
86
				ptent = pte_mkwrite(ptent);
87 88 89 90 91
				updated = true;
			}

			if (updated)
				pages++;
92
			ptep_modify_prot_commit(mm, addr, pte, ptent);
93
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
94 95 96 97 98 99 100 101 102 103 104
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
				set_pte_at(mm, addr, pte,
					swp_entry_to_pte(entry));
			}
105
			pages++;
L
Linus Torvalds 已提交
106 107
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
108
	arch_leave_lazy_mmu_mode();
109
	pte_unmap_unlock(pte - 1, ptl);
110

111
	*ret_all_same_node = all_same_node;
112
	return pages;
L
Linus Torvalds 已提交
113 114
}

115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#ifdef CONFIG_NUMA_BALANCING
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmd)
{
	spin_lock(&mm->page_table_lock);
	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
	spin_unlock(&mm->page_table_lock);
}
#else
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
		pmd_t *pmd)
{
	BUG();
}
#endif /* CONFIG_NUMA_BALANCING */

131
static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pud_t *pud,
132
		unsigned long addr, unsigned long end, pgprot_t newprot,
133
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
134 135 136
{
	pmd_t *pmd;
	unsigned long next;
137
	unsigned long pages = 0;
138
	bool all_same_node;
L
Linus Torvalds 已提交
139 140 141 142

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
143 144 145
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
				split_huge_page_pmd(vma->vm_mm, pmd);
146
			else if (change_huge_pmd(vma, pmd, addr, newprot, prot_numa)) {
147
				pages += HPAGE_PMD_NR;
148
				continue;
149
			}
150 151
			/* fall through */
		}
L
Linus Torvalds 已提交
152 153
		if (pmd_none_or_clear_bad(pmd))
			continue;
154
		pages += change_pte_range(vma, pmd, addr, next, newprot,
155
				 dirty_accountable, prot_numa, &all_same_node);
156

157 158 159 160 161 162 163
		/*
		 * If we are changing protections for NUMA hinting faults then
		 * set pmd_numa if the examined pages were all on the same
		 * node. This allows a regular PMD to be handled as one fault
		 * and effectively batches the taking of the PTL
		 */
		if (prot_numa && all_same_node)
164
			change_pmd_protnuma(vma->vm_mm, addr, pmd);
L
Linus Torvalds 已提交
165
	} while (pmd++, addr = next, addr != end);
166 167

	return pages;
L
Linus Torvalds 已提交
168 169
}

170
static inline unsigned long change_pud_range(struct vm_area_struct *vma, pgd_t *pgd,
171
		unsigned long addr, unsigned long end, pgprot_t newprot,
172
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
173 174 175
{
	pud_t *pud;
	unsigned long next;
176
	unsigned long pages = 0;
L
Linus Torvalds 已提交
177 178 179 180 181 182

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
183
		pages += change_pmd_range(vma, pud, addr, next, newprot,
184
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
185
	} while (pud++, addr = next, addr != end);
186 187

	return pages;
L
Linus Torvalds 已提交
188 189
}

190
static unsigned long change_protection_range(struct vm_area_struct *vma,
191
		unsigned long addr, unsigned long end, pgprot_t newprot,
192
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
193 194 195 196 197
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
198
	unsigned long pages = 0;
L
Linus Torvalds 已提交
199 200 201 202 203 204 205 206

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
207
		pages += change_pud_range(vma, pgd, addr, next, newprot,
208
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
209
	} while (pgd++, addr = next, addr != end);
210

211 212 213
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
214 215 216 217 218 219

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
220
		       int dirty_accountable, int prot_numa)
221 222 223 224 225 226 227 228
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
229
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
230 231 232
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
L
Linus Torvalds 已提交
233 234
}

235
int
L
Linus Torvalds 已提交
236 237 238 239 240 241 242 243 244
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
245
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
246 247 248 249 250 251 252 253 254

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
255 256
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
257 258
	 */
	if (newflags & VM_WRITE) {
259
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
260
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
261
			charged = nrpages;
262
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
299 300 301
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

302
	if (vma_wants_writenotify(vma)) {
303
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
304 305
		dirty_accountable = 1;
	}
306

307
	change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable, 0);
308

309 310
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
311
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
312 313 314 315 316 317 318
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

319 320
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
338
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
339 340 341 342 343 344
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
345
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
346 347 348 349 350 351
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

352
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
353 354 355
	error = -ENOMEM;
	if (!vma)
		goto out;
356
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
	}
	else {
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

		/* Here we know that  vma->vm_start <= nstart < vma->vm_end. */

		newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));

385 386
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}