pgtable.c 10.2 KB
Newer Older
1
#include <linux/mm.h>
2
#include <linux/gfp.h>
3
#include <asm/pgalloc.h>
4
#include <asm/pgtable.h>
5
#include <asm/tlb.h>
I
Ingo Molnar 已提交
6
#include <asm/fixmap.h>
7

8 9
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO

10 11 12 13 14 15 16 17
#ifdef CONFIG_HIGHPTE
#define PGALLOC_USER_GFP __GFP_HIGHMEM
#else
#define PGALLOC_USER_GFP 0
#endif

gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;

18 19
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
20
	return (pte_t *)__get_free_page(PGALLOC_GFP);
21 22 23 24 25 26
}

pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *pte;

27
	pte = alloc_pages(__userpte_alloc_gfp, 0);
28 29 30 31 32
	if (pte)
		pgtable_page_ctor(pte);
	return pte;
}

33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
static int __init setup_userpte(char *arg)
{
	if (!arg)
		return -EINVAL;

	/*
	 * "userpte=nohigh" disables allocation of user pagetables in
	 * high memory.
	 */
	if (strcmp(arg, "nohigh") == 0)
		__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
	else
		return -EINVAL;
	return 0;
}
early_param("userpte", setup_userpte);

50
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
51 52
{
	pgtable_page_dtor(pte);
53
	paravirt_release_pte(page_to_pfn(pte));
54 55 56
	tlb_remove_page(tlb, pte);
}

57
#if PAGETABLE_LEVELS > 2
58
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
59
{
60
	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
61 62
	tlb_remove_page(tlb, virt_to_page(pmd));
}
63 64

#if PAGETABLE_LEVELS > 3
65
void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
66
{
67
	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
68 69 70
	tlb_remove_page(tlb, virt_to_page(pud));
}
#endif	/* PAGETABLE_LEVELS > 3 */
71 72
#endif	/* PAGETABLE_LEVELS > 2 */

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static inline void pgd_list_add(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_add(&page->lru, &pgd_list);
}

static inline void pgd_list_del(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_del(&page->lru);
}

#define UNSHARED_PTRS_PER_PGD				\
J
Jeremy Fitzhardinge 已提交
88
	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
89

90 91 92 93 94 95 96 97 98 99 100 101 102

static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
{
	BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
	virt_to_page(pgd)->index = (pgoff_t)mm;
}

struct mm_struct *pgd_page_get_mm(struct page *page)
{
	return (struct mm_struct *)page->index;
}

static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
103 104 105 106 107
{
	/* If the pgd points to a shared pagetable level (either the
	   ptes in non-PAE, or shared PMD in PAE), then just copy the
	   references from swapper_pg_dir. */
	if (PAGETABLE_LEVELS == 2 ||
J
Jeremy Fitzhardinge 已提交
108 109
	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
	    PAGETABLE_LEVELS == 4) {
J
Jeremy Fitzhardinge 已提交
110 111
		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
112 113 114 115
				KERNEL_PGD_PTRS);
	}

	/* list required to sync kernel mapping updates */
116 117
	if (!SHARED_KERNEL_PMD) {
		pgd_set_mm(pgd, mm);
118
		pgd_list_add(pgd);
119
	}
120 121
}

J
Jan Beulich 已提交
122
static void pgd_dtor(pgd_t *pgd)
123 124 125 126 127 128 129 130 131 132 133
{
	unsigned long flags; /* can be called from interrupt context */

	if (SHARED_KERNEL_PMD)
		return;

	spin_lock_irqsave(&pgd_lock, flags);
	pgd_list_del(pgd);
	spin_unlock_irqrestore(&pgd_lock, flags);
}

J
Jeremy Fitzhardinge 已提交
134 135 136 137 138 139 140 141 142 143 144
/*
 * List of all pgd's needed for non-PAE so it can invalidate entries
 * in both cached and uncached pgd's; not needed for PAE since the
 * kernel pmd is shared. If PAE were not to share the pmd a similar
 * tactic would be needed. This is essentially codepath-based locking
 * against pageattr.c; it is the unique case in which a valid change
 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 * vmalloc faults work because attached pagetables are never freed.
 * -- wli
 */

145
#ifdef CONFIG_X86_PAE
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
/*
 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 * updating the top-level pagetable entries to guarantee the
 * processor notices the update.  Since this is expensive, and
 * all 4 top-level entries are used almost immediately in a
 * new process's life, we just pre-populate them here.
 *
 * Also, if we're in a paravirt environment where the kernel pmd is
 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 * and initialize the kernel pmds here.
 */
#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD

void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);

	/* Note: almost everything apart from _PAGE_PRESENT is
	   reserved at the pmd (PDPT) level. */
	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));

	/*
	 * According to Intel App note "TLBs, Paging-Structure Caches,
	 * and Their Invalidation", April 2007, document 317080-001,
	 * section 8.1: in PAE mode we explicitly have to flush the
	 * TLB via cr3 if the top-level pgd is changed...
	 */
	if (mm == current->active_mm)
		write_cr3(read_cr3());
}
#else  /* !CONFIG_X86_PAE */

/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS	0

#endif	/* CONFIG_X86_PAE */

static void free_pmds(pmd_t *pmds[])
{
	int i;

	for(i = 0; i < PREALLOCATED_PMDS; i++)
		if (pmds[i])
			free_page((unsigned long)pmds[i]);
}

static int preallocate_pmds(pmd_t *pmds[])
{
	int i;
	bool failed = false;

	for(i = 0; i < PREALLOCATED_PMDS; i++) {
198
		pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
199 200 201 202 203 204 205 206 207 208 209 210 211
		if (pmd == NULL)
			failed = true;
		pmds[i] = pmd;
	}

	if (failed) {
		free_pmds(pmds);
		return -ENOMEM;
	}

	return 0;
}

212 213 214 215 216 217 218 219 220 221
/*
 * Mop up any pmd pages which may still be attached to the pgd.
 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 * preallocate which never got a corresponding vma will need to be
 * freed manually.
 */
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
	int i;

222
	for(i = 0; i < PREALLOCATED_PMDS; i++) {
223 224 225 226 227 228 229
		pgd_t pgd = pgdp[i];

		if (pgd_val(pgd) != 0) {
			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);

			pgdp[i] = native_make_pgd(0);

230
			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
231 232 233 234 235
			pmd_free(mm, pmd);
		}
	}
}

236
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
237 238 239 240 241
{
	pud_t *pud;
	unsigned long addr;
	int i;

242 243 244
	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
		return;

245 246
	pud = pud_offset(pgd, 0);

247 248 249
 	for (addr = i = 0; i < PREALLOCATED_PMDS;
	     i++, pud++, addr += PUD_SIZE) {
		pmd_t *pmd = pmds[i];
250

J
Jeremy Fitzhardinge 已提交
251
		if (i >= KERNEL_PGD_BOUNDARY)
252 253 254 255 256 257
			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
			       sizeof(pmd_t) * PTRS_PER_PMD);

		pud_populate(mm, pud, pmd);
	}
}
258

259
pgd_t *pgd_alloc(struct mm_struct *mm)
260
{
261 262 263
	pgd_t *pgd;
	pmd_t *pmds[PREALLOCATED_PMDS];
	unsigned long flags;
264

265
	pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
266 267 268 269 270 271 272 273 274 275 276

	if (pgd == NULL)
		goto out;

	mm->pgd = pgd;

	if (preallocate_pmds(pmds) != 0)
		goto out_free_pgd;

	if (paravirt_pgd_alloc(mm) != 0)
		goto out_free_pmds;
277 278

	/*
279 280 281
	 * Make sure that pre-populating the pmds is atomic with
	 * respect to anything walking the pgd_list, so that they
	 * never see a partially populated pgd.
282
	 */
283
	spin_lock_irqsave(&pgd_lock, flags);
284

285
	pgd_ctor(mm, pgd);
286
	pgd_prepopulate_pmd(mm, pgd, pmds);
287

288
	spin_unlock_irqrestore(&pgd_lock, flags);
289 290

	return pgd;
291 292 293 294 295 296 297

out_free_pmds:
	free_pmds(pmds);
out_free_pgd:
	free_page((unsigned long)pgd);
out:
	return NULL;
298 299 300 301 302 303
}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	pgd_mop_up_pmds(mm, pgd);
	pgd_dtor(pgd);
304
	paravirt_pgd_free(mm, pgd);
305 306
	free_page((unsigned long)pgd);
}
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321

int ptep_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pte_t *ptep,
			  pte_t entry, int dirty)
{
	int changed = !pte_same(*ptep, entry);

	if (changed && dirty) {
		*ptep = entry;
		pte_update_defer(vma->vm_mm, address, ptep);
		flush_tlb_page(vma, address);
	}

	return changed;
}
322

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pmd_t *pmdp,
			  pmd_t entry, int dirty)
{
	int changed = !pmd_same(*pmdp, entry);

	VM_BUG_ON(address & ~HPAGE_PMD_MASK);

	if (changed && dirty) {
		*pmdp = entry;
		pmd_update_defer(vma->vm_mm, address, pmdp);
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	}

	return changed;
}
#endif

342 343 344 345 346 347 348
int ptep_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long addr, pte_t *ptep)
{
	int ret = 0;

	if (pte_young(*ptep))
		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
349
					 (unsigned long *) &ptep->pte);
350 351 352 353 354 355

	if (ret)
		pte_update(vma->vm_mm, addr, ptep);

	return ret;
}
356

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long addr, pmd_t *pmdp)
{
	int ret = 0;

	if (pmd_young(*pmdp))
		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
					 (unsigned long *) &pmdp->pmd);

	if (ret)
		pmd_update(vma->vm_mm, addr, pmdp);

	return ret;
}
#endif

374 375 376 377 378 379 380 381 382 383 384
int ptep_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pte_t *ptep)
{
	int young;

	young = ptep_test_and_clear_young(vma, address, ptep);
	if (young)
		flush_tlb_page(vma, address);

	return young;
}
J
Jeremy Fitzhardinge 已提交
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int pmdp_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pmd_t *pmdp)
{
	int young;

	VM_BUG_ON(address & ~HPAGE_PMD_MASK);

	young = pmdp_test_and_clear_young(vma, address, pmdp);
	if (young)
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);

	return young;
}

void pmdp_splitting_flush(struct vm_area_struct *vma,
			  unsigned long address, pmd_t *pmdp)
{
	int set;
	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
	set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
				(unsigned long *)&pmdp->pmd);
	if (set) {
		pmd_update(vma->vm_mm, address, pmdp);
		/* need tlb flush only to serialize against gup-fast */
		flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
	}
}
#endif

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
/**
 * reserve_top_address - reserves a hole in the top of kernel address space
 * @reserve - size of hole to reserve
 *
 * Can be used to relocate the fixmap area and poke a hole in the top
 * of kernel address space to make room for a hypervisor.
 */
void __init reserve_top_address(unsigned long reserve)
{
#ifdef CONFIG_X86_32
	BUG_ON(fixmaps_set > 0);
	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
	       (int)-reserve);
	__FIXADDR_TOP = -reserve - PAGE_SIZE;
#endif
}

J
Jeremy Fitzhardinge 已提交
433 434
int fixmaps_set;

435
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
J
Jeremy Fitzhardinge 已提交
436 437 438 439 440 441 442
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
443
	set_pte_vaddr(address, pte);
J
Jeremy Fitzhardinge 已提交
444 445
	fixmaps_set++;
}
446

447 448
void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
		       pgprot_t flags)
449 450 451
{
	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}