mprotect.c 9.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
A
Alan Cox 已提交
7
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
21 22
#include <linux/swap.h>
#include <linux/swapops.h>
A
Andrea Arcangeli 已提交
23
#include <linux/mmu_notifier.h>
24
#include <linux/migrate.h>
25
#include <linux/perf_event.h>
L
Linus Torvalds 已提交
26 27 28 29 30
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

31 32 33 34 35 36 37
#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

38
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
39
		unsigned long addr, unsigned long end, pgprot_t newprot,
40
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
41
{
42
	struct mm_struct *mm = vma->vm_mm;
43
	pte_t *pte, oldpte;
44
	spinlock_t *ptl;
45
	unsigned long pages = 0;
L
Linus Torvalds 已提交
46

47
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
48
	arch_enter_lazy_mmu_mode();
L
Linus Torvalds 已提交
49
	do {
50 51
		oldpte = *pte;
		if (pte_present(oldpte)) {
L
Linus Torvalds 已提交
52
			pte_t ptent;
53
			bool updated = false;
L
Linus Torvalds 已提交
54

55
			ptent = ptep_modify_prot_start(mm, addr, pte);
56 57 58 59 60 61 62 63
			if (!prot_numa) {
				ptent = pte_modify(ptent, newprot);
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (page) {
64
					if (!pte_numa(oldpte)) {
65 66 67 68 69
						ptent = pte_mknuma(ptent);
						updated = true;
					}
				}
			}
70

71 72 73 74
			/*
			 * Avoid taking write faults for pages we know to be
			 * dirty.
			 */
75
			if (dirty_accountable && pte_dirty(ptent)) {
76
				ptent = pte_mkwrite(ptent);
77 78
				updated = true;
			}
79

80 81
			if (updated)
				pages++;
82
			ptep_modify_prot_commit(mm, addr, pte, ptent);
83
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
84 85 86
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
87
				pte_t newpte;
88 89 90 91 92
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
93 94 95 96
				newpte = swp_entry_to_pte(entry);
				if (pte_swp_soft_dirty(oldpte))
					newpte = pte_swp_mksoft_dirty(newpte);
				set_pte_at(mm, addr, pte, newpte);
97 98

				pages++;
99
			}
L
Linus Torvalds 已提交
100 101
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
102
	arch_leave_lazy_mmu_mode();
103
	pte_unmap_unlock(pte - 1, ptl);
104 105

	return pages;
L
Linus Torvalds 已提交
106 107
}

108 109 110
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
111 112 113
{
	pmd_t *pmd;
	unsigned long next;
114
	unsigned long pages = 0;
L
Linus Torvalds 已提交
115 116 117

	pmd = pmd_offset(pud, addr);
	do {
118 119
		unsigned long this_pages;

L
Linus Torvalds 已提交
120
		next = pmd_addr_end(addr, end);
121 122
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
123
				split_huge_page_pmd(vma, addr, pmd);
124 125 126 127 128 129 130 131 132 133
			else {
				int nr_ptes = change_huge_pmd(vma, pmd, addr,
						newprot, prot_numa);

				if (nr_ptes) {
					if (nr_ptes == HPAGE_PMD_NR)
						pages++;

					continue;
				}
134
			}
135 136
			/* fall through */
		}
L
Linus Torvalds 已提交
137 138
		if (pmd_none_or_clear_bad(pmd))
			continue;
139
		this_pages = change_pte_range(vma, pmd, addr, next, newprot,
140
				 dirty_accountable, prot_numa);
141
		pages += this_pages;
L
Linus Torvalds 已提交
142
	} while (pmd++, addr = next, addr != end);
143 144

	return pages;
L
Linus Torvalds 已提交
145 146
}

147 148 149
static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
150 151 152
{
	pud_t *pud;
	unsigned long next;
153
	unsigned long pages = 0;
L
Linus Torvalds 已提交
154 155 156 157 158 159

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
160
		pages += change_pmd_range(vma, pud, addr, next, newprot,
161
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
162
	} while (pud++, addr = next, addr != end);
163 164

	return pages;
L
Linus Torvalds 已提交
165 166
}

167
static unsigned long change_protection_range(struct vm_area_struct *vma,
168
		unsigned long addr, unsigned long end, pgprot_t newprot,
169
		int dirty_accountable, int prot_numa)
L
Linus Torvalds 已提交
170 171 172 173 174
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
175
	unsigned long pages = 0;
L
Linus Torvalds 已提交
176 177 178 179 180 181 182 183

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
184
		pages += change_pud_range(vma, pgd, addr, next, newprot,
185
				 dirty_accountable, prot_numa);
L
Linus Torvalds 已提交
186
	} while (pgd++, addr = next, addr != end);
187

188 189 190
	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);
191 192 193 194 195 196

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
197
		       int dirty_accountable, int prot_numa)
198 199 200 201 202 203 204 205
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
206
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
207 208 209
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
L
Linus Torvalds 已提交
210 211
}

212
int
L
Linus Torvalds 已提交
213 214 215 216 217 218 219 220 221
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
222
	int dirty_accountable = 0;
L
Linus Torvalds 已提交
223 224 225 226 227 228 229 230 231

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
232 233
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
L
Linus Torvalds 已提交
234 235
	 */
	if (newflags & VM_WRITE) {
236
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
237
						VM_SHARED|VM_NORESERVE))) {
L
Linus Torvalds 已提交
238
			charged = nrpages;
239
			if (security_vm_enough_memory_mm(mm, charged))
L
Linus Torvalds 已提交
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}
	}

	/*
	 * First try to merge with previous and/or next vma.
	 */
	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
	*pprev = vma_merge(mm, *pprev, start, end, newflags,
			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
	if (*pprev) {
		vma = *pprev;
		goto success;
	}

	*pprev = vma;

	if (start != vma->vm_start) {
		error = split_vma(mm, vma, start, 1);
		if (error)
			goto fail;
	}

	if (end != vma->vm_end) {
		error = split_vma(mm, vma, end, 0);
		if (error)
			goto fail;
	}

success:
	/*
	 * vm_flags and vm_page_prot are protected by the mmap_sem
	 * held in write mode.
	 */
	vma->vm_flags = newflags;
276 277 278
	vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
					  vm_get_page_prot(newflags));

279
	if (vma_wants_writenotify(vma)) {
280
		vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
281 282
		dirty_accountable = 1;
	}
283

284 285
	change_protection(vma, start, end, vma->vm_page_prot,
			  dirty_accountable, 0);
286

287 288
	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
289
	perf_event_mmap(vma);
L
Linus Torvalds 已提交
290 291 292 293 294 295 296
	return 0;

fail:
	vm_unacct_memory(charged);
	return error;
}

297 298
SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
		unsigned long, prot)
L
Linus Torvalds 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
{
	unsigned long vm_flags, nstart, end, tmp, reqprot;
	struct vm_area_struct *vma, *prev;
	int error = -EINVAL;
	const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
	prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
	if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
		return -EINVAL;

	if (start & ~PAGE_MASK)
		return -EINVAL;
	if (!len)
		return 0;
	len = PAGE_ALIGN(len);
	end = start + len;
	if (end <= start)
		return -ENOMEM;
316
	if (!arch_validate_prot(prot))
L
Linus Torvalds 已提交
317 318 319 320 321 322
		return -EINVAL;

	reqprot = prot;
	/*
	 * Does the application expect PROT_READ to imply PROT_EXEC:
	 */
323
	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
L
Linus Torvalds 已提交
324 325 326 327 328 329
		prot |= PROT_EXEC;

	vm_flags = calc_vm_prot_bits(prot);

	down_write(&current->mm->mmap_sem);

330
	vma = find_vma(current->mm, start);
L
Linus Torvalds 已提交
331 332 333
	error = -ENOMEM;
	if (!vma)
		goto out;
334
	prev = vma->vm_prev;
L
Linus Torvalds 已提交
335 336 337 338 339 340 341
	if (unlikely(grows & PROT_GROWSDOWN)) {
		if (vma->vm_start >= end)
			goto out;
		start = vma->vm_start;
		error = -EINVAL;
		if (!(vma->vm_flags & VM_GROWSDOWN))
			goto out;
342
	} else {
L
Linus Torvalds 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
		if (vma->vm_start > start)
			goto out;
		if (unlikely(grows & PROT_GROWSUP)) {
			end = vma->vm_end;
			error = -EINVAL;
			if (!(vma->vm_flags & VM_GROWSUP))
				goto out;
		}
	}
	if (start > vma->vm_start)
		prev = vma;

	for (nstart = start ; ; ) {
		unsigned long newflags;

358
		/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
L
Linus Torvalds 已提交
359

360 361
		newflags = vm_flags;
		newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
L
Linus Torvalds 已提交
362

363 364
		/* newflags >> 4 shift VM_MAY% in place of VM_% */
		if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
L
Linus Torvalds 已提交
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
			error = -EACCES;
			goto out;
		}

		error = security_file_mprotect(vma, reqprot, prot);
		if (error)
			goto out;

		tmp = vma->vm_end;
		if (tmp > end)
			tmp = end;
		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
		if (error)
			goto out;
		nstart = tmp;

		if (nstart < prev->vm_end)
			nstart = prev->vm_end;
		if (nstart >= end)
			goto out;

		vma = prev->vm_next;
		if (!vma || vma->vm_start != nstart) {
			error = -ENOMEM;
			goto out;
		}
	}
out:
	up_write(&current->mm->mmap_sem);
	return error;
}