pgtable.c 8.2 KB
Newer Older
1 2
#include <linux/mm.h>
#include <asm/pgalloc.h>
3
#include <asm/pgtable.h>
4
#include <asm/tlb.h>
I
Ingo Molnar 已提交
5
#include <asm/fixmap.h>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25

pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}

pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *pte;

#ifdef CONFIG_HIGHPTE
	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
#else
	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
	if (pte)
		pgtable_page_ctor(pte);
	return pte;
}

26 27 28
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
	pgtable_page_dtor(pte);
29
	paravirt_release_pte(page_to_pfn(pte));
30 31 32
	tlb_remove_page(tlb, pte);
}

33 34 35
#if PAGETABLE_LEVELS > 2
void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
36
	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
37 38
	tlb_remove_page(tlb, virt_to_page(pmd));
}
39 40 41 42

#if PAGETABLE_LEVELS > 3
void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
43
	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
44 45 46
	tlb_remove_page(tlb, virt_to_page(pud));
}
#endif	/* PAGETABLE_LEVELS > 3 */
47 48
#endif	/* PAGETABLE_LEVELS > 2 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static inline void pgd_list_add(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_add(&page->lru, &pgd_list);
}

static inline void pgd_list_del(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_del(&page->lru);
}

#define UNSHARED_PTRS_PER_PGD				\
J
Jeremy Fitzhardinge 已提交
64
	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65

J
Jan Beulich 已提交
66
static void pgd_ctor(pgd_t *pgd)
67 68 69 70 71
{
	/* If the pgd points to a shared pagetable level (either the
	   ptes in non-PAE, or shared PMD in PAE), then just copy the
	   references from swapper_pg_dir. */
	if (PAGETABLE_LEVELS == 2 ||
J
Jeremy Fitzhardinge 已提交
72 73
	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
	    PAGETABLE_LEVELS == 4) {
J
Jeremy Fitzhardinge 已提交
74 75
		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
76
				KERNEL_PGD_PTRS);
77 78
		paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
					 __pa(swapper_pg_dir) >> PAGE_SHIFT,
J
Jeremy Fitzhardinge 已提交
79
					 KERNEL_PGD_BOUNDARY,
80
					 KERNEL_PGD_PTRS);
81 82 83 84 85 86 87
	}

	/* list required to sync kernel mapping updates */
	if (!SHARED_KERNEL_PMD)
		pgd_list_add(pgd);
}

J
Jan Beulich 已提交
88
static void pgd_dtor(pgd_t *pgd)
89 90 91 92 93 94 95 96 97 98 99
{
	unsigned long flags; /* can be called from interrupt context */

	if (SHARED_KERNEL_PMD)
		return;

	spin_lock_irqsave(&pgd_lock, flags);
	pgd_list_del(pgd);
	spin_unlock_irqrestore(&pgd_lock, flags);
}

J
Jeremy Fitzhardinge 已提交
100 101 102 103 104 105 106 107 108 109 110
/*
 * List of all pgd's needed for non-PAE so it can invalidate entries
 * in both cached and uncached pgd's; not needed for PAE since the
 * kernel pmd is shared. If PAE were not to share the pmd a similar
 * tactic would be needed. This is essentially codepath-based locking
 * against pageattr.c; it is the unique case in which a valid change
 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 * vmalloc faults work because attached pagetables are never freed.
 * -- wli
 */

111
#ifdef CONFIG_X86_PAE
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/*
 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 * updating the top-level pagetable entries to guarantee the
 * processor notices the update.  Since this is expensive, and
 * all 4 top-level entries are used almost immediately in a
 * new process's life, we just pre-populate them here.
 *
 * Also, if we're in a paravirt environment where the kernel pmd is
 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 * and initialize the kernel pmds here.
 */
#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD

void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);

	/* Note: almost everything apart from _PAGE_PRESENT is
	   reserved at the pmd (PDPT) level. */
	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));

	/*
	 * According to Intel App note "TLBs, Paging-Structure Caches,
	 * and Their Invalidation", April 2007, document 317080-001,
	 * section 8.1: in PAE mode we explicitly have to flush the
	 * TLB via cr3 if the top-level pgd is changed...
	 */
	if (mm == current->active_mm)
		write_cr3(read_cr3());
}
#else  /* !CONFIG_X86_PAE */

/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS	0

#endif	/* CONFIG_X86_PAE */

static void free_pmds(pmd_t *pmds[])
{
	int i;

	for(i = 0; i < PREALLOCATED_PMDS; i++)
		if (pmds[i])
			free_page((unsigned long)pmds[i]);
}

static int preallocate_pmds(pmd_t *pmds[])
{
	int i;
	bool failed = false;

	for(i = 0; i < PREALLOCATED_PMDS; i++) {
		pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
		if (pmd == NULL)
			failed = true;
		pmds[i] = pmd;
	}

	if (failed) {
		free_pmds(pmds);
		return -ENOMEM;
	}

	return 0;
}

178 179 180 181 182 183 184 185 186 187
/*
 * Mop up any pmd pages which may still be attached to the pgd.
 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 * preallocate which never got a corresponding vma will need to be
 * freed manually.
 */
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
	int i;

188
	for(i = 0; i < PREALLOCATED_PMDS; i++) {
189 190 191 192 193 194 195
		pgd_t pgd = pgdp[i];

		if (pgd_val(pgd) != 0) {
			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);

			pgdp[i] = native_make_pgd(0);

196
			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
197 198 199 200 201
			pmd_free(mm, pmd);
		}
	}
}

202
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
203 204 205 206 207
{
	pud_t *pud;
	unsigned long addr;
	int i;

208 209 210
	if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
		return;

211 212
	pud = pud_offset(pgd, 0);

213 214 215
 	for (addr = i = 0; i < PREALLOCATED_PMDS;
	     i++, pud++, addr += PUD_SIZE) {
		pmd_t *pmd = pmds[i];
216

J
Jeremy Fitzhardinge 已提交
217
		if (i >= KERNEL_PGD_BOUNDARY)
218 219 220 221 222 223
			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
			       sizeof(pmd_t) * PTRS_PER_PMD);

		pud_populate(mm, pud, pmd);
	}
}
224

225
pgd_t *pgd_alloc(struct mm_struct *mm)
226
{
227 228 229
	pgd_t *pgd;
	pmd_t *pmds[PREALLOCATED_PMDS];
	unsigned long flags;
230

231 232 233 234 235 236 237 238 239 240 241 242
	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);

	if (pgd == NULL)
		goto out;

	mm->pgd = pgd;

	if (preallocate_pmds(pmds) != 0)
		goto out_free_pgd;

	if (paravirt_pgd_alloc(mm) != 0)
		goto out_free_pmds;
243 244

	/*
245 246 247
	 * Make sure that pre-populating the pmds is atomic with
	 * respect to anything walking the pgd_list, so that they
	 * never see a partially populated pgd.
248
	 */
249
	spin_lock_irqsave(&pgd_lock, flags);
250

251 252
	pgd_ctor(pgd);
	pgd_prepopulate_pmd(mm, pgd, pmds);
253

254
	spin_unlock_irqrestore(&pgd_lock, flags);
255 256

	return pgd;
257 258 259 260 261 262 263

out_free_pmds:
	free_pmds(pmds);
out_free_pgd:
	free_page((unsigned long)pgd);
out:
	return NULL;
264 265 266 267 268 269
}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	pgd_mop_up_pmds(mm, pgd);
	pgd_dtor(pgd);
270
	paravirt_pgd_free(mm, pgd);
271 272
	free_page((unsigned long)pgd);
}
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287

int ptep_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pte_t *ptep,
			  pte_t entry, int dirty)
{
	int changed = !pte_same(*ptep, entry);

	if (changed && dirty) {
		*ptep = entry;
		pte_update_defer(vma->vm_mm, address, ptep);
		flush_tlb_page(vma, address);
	}

	return changed;
}
288 289 290 291 292 293 294 295

int ptep_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long addr, pte_t *ptep)
{
	int ret = 0;

	if (pte_young(*ptep))
		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
296
					 (unsigned long *) &ptep->pte);
297 298 299 300 301 302

	if (ret)
		pte_update(vma->vm_mm, addr, ptep);

	return ret;
}
303 304 305 306 307 308 309 310 311 312 313 314

int ptep_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pte_t *ptep)
{
	int young;

	young = ptep_test_and_clear_young(vma, address, ptep);
	if (young)
		flush_tlb_page(vma, address);

	return young;
}
J
Jeremy Fitzhardinge 已提交
315

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
/**
 * reserve_top_address - reserves a hole in the top of kernel address space
 * @reserve - size of hole to reserve
 *
 * Can be used to relocate the fixmap area and poke a hole in the top
 * of kernel address space to make room for a hypervisor.
 */
void __init reserve_top_address(unsigned long reserve)
{
#ifdef CONFIG_X86_32
	BUG_ON(fixmaps_set > 0);
	printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
	       (int)-reserve);
	__FIXADDR_TOP = -reserve - PAGE_SIZE;
	__VMALLOC_RESERVE += reserve;
#endif
}

J
Jeremy Fitzhardinge 已提交
334 335
int fixmaps_set;

336
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
J
Jeremy Fitzhardinge 已提交
337 338 339 340 341 342 343
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
344
	set_pte_vaddr(address, pte);
J
Jeremy Fitzhardinge 已提交
345 346
	fixmaps_set++;
}
347

348 349
void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
		       pgprot_t flags)
350 351 352
{
	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}