pgtable.c 7.0 KB
Newer Older
1 2
#include <linux/mm.h>
#include <asm/pgalloc.h>
3
#include <asm/pgtable.h>
4
#include <asm/tlb.h>
I
Ingo Molnar 已提交
5
#include <asm/fixmap.h>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25

pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}

pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *pte;

#ifdef CONFIG_HIGHPTE
	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
#else
	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
	if (pte)
		pgtable_page_ctor(pte);
	return pte;
}

26 27 28
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
	pgtable_page_dtor(pte);
29
	paravirt_release_pte(page_to_pfn(pte));
30 31 32
	tlb_remove_page(tlb, pte);
}

33 34 35
#if PAGETABLE_LEVELS > 2
void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
36
	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
37 38
	tlb_remove_page(tlb, virt_to_page(pmd));
}
39 40 41 42

#if PAGETABLE_LEVELS > 3
void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
43
	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
44 45 46
	tlb_remove_page(tlb, virt_to_page(pud));
}
#endif	/* PAGETABLE_LEVELS > 3 */
47 48
#endif	/* PAGETABLE_LEVELS > 2 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static inline void pgd_list_add(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_add(&page->lru, &pgd_list);
}

static inline void pgd_list_del(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_del(&page->lru);
}

#define UNSHARED_PTRS_PER_PGD				\
J
Jeremy Fitzhardinge 已提交
64
	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65 66 67 68 69 70 71

static void pgd_ctor(void *p)
{
	pgd_t *pgd = p;
	unsigned long flags;

	/* Clear usermode parts of PGD */
J
Jeremy Fitzhardinge 已提交
72
	memset(pgd, 0, KERNEL_PGD_BOUNDARY*sizeof(pgd_t));
73 74 75 76 77 78 79

	spin_lock_irqsave(&pgd_lock, flags);

	/* If the pgd points to a shared pagetable level (either the
	   ptes in non-PAE, or shared PMD in PAE), then just copy the
	   references from swapper_pg_dir. */
	if (PAGETABLE_LEVELS == 2 ||
J
Jeremy Fitzhardinge 已提交
80 81
	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
	    PAGETABLE_LEVELS == 4) {
J
Jeremy Fitzhardinge 已提交
82 83
		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
84
				KERNEL_PGD_PTRS);
85 86
		paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
					 __pa(swapper_pg_dir) >> PAGE_SHIFT,
J
Jeremy Fitzhardinge 已提交
87
					 KERNEL_PGD_BOUNDARY,
88
					 KERNEL_PGD_PTRS);
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	}

	/* list required to sync kernel mapping updates */
	if (!SHARED_KERNEL_PMD)
		pgd_list_add(pgd);

	spin_unlock_irqrestore(&pgd_lock, flags);
}

static void pgd_dtor(void *pgd)
{
	unsigned long flags; /* can be called from interrupt context */

	if (SHARED_KERNEL_PMD)
		return;

	spin_lock_irqsave(&pgd_lock, flags);
	pgd_list_del(pgd);
	spin_unlock_irqrestore(&pgd_lock, flags);
}

J
Jeremy Fitzhardinge 已提交
110 111 112 113 114 115 116 117 118 119 120
/*
 * List of all pgd's needed for non-PAE so it can invalidate entries
 * in both cached and uncached pgd's; not needed for PAE since the
 * kernel pmd is shared. If PAE were not to share the pmd a similar
 * tactic would be needed. This is essentially codepath-based locking
 * against pageattr.c; it is the unique case in which a valid change
 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 * vmalloc faults work because attached pagetables are never freed.
 * -- wli
 */

121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
#ifdef CONFIG_X86_PAE
/*
 * Mop up any pmd pages which may still be attached to the pgd.
 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 * preallocate which never got a corresponding vma will need to be
 * freed manually.
 */
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
	int i;

	for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
		pgd_t pgd = pgdp[i];

		if (pgd_val(pgd) != 0) {
			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);

			pgdp[i] = native_make_pgd(0);

140
			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
			pmd_free(mm, pmd);
		}
	}
}

/*
 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 * updating the top-level pagetable entries to guarantee the
 * processor notices the update.  Since this is expensive, and
 * all 4 top-level entries are used almost immediately in a
 * new process's life, we just pre-populate them here.
 *
 * Also, if we're in a paravirt environment where the kernel pmd is
 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 * and initialize the kernel pmds here.
 */
static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
{
	pud_t *pud;
	unsigned long addr;
	int i;

	pud = pud_offset(pgd, 0);
 	for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
	     i++, pud++, addr += PUD_SIZE) {
		pmd_t *pmd = pmd_alloc_one(mm, addr);

		if (!pmd) {
			pgd_mop_up_pmds(mm, pgd);
			return 0;
		}

J
Jeremy Fitzhardinge 已提交
173
		if (i >= KERNEL_PGD_BOUNDARY)
174 175 176 177 178 179 180 181
			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
			       sizeof(pmd_t) * PTRS_PER_PMD);

		pud_populate(mm, pud, pmd);
	}

	return 1;
}
182 183 184

void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
185
	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
186 187 188 189 190 191 192 193 194 195 196 197 198 199

	/* Note: almost everything apart from _PAGE_PRESENT is
	   reserved at the pmd (PDPT) level. */
	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));

	/*
	 * According to Intel App note "TLBs, Paging-Structure Caches,
	 * and Their Invalidation", April 2007, document 317080-001,
	 * section 8.1: in PAE mode we explicitly have to flush the
	 * TLB via cr3 if the top-level pgd is changed...
	 */
	if (mm == current->active_mm)
		write_cr3(read_cr3());
}
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
#else  /* !CONFIG_X86_PAE */
/* No need to prepopulate any pagetable entries in non-PAE modes. */
static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
{
	return 1;
}

static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
{
}
#endif	/* CONFIG_X86_PAE */

pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);

216
	/* so that alloc_pmd can use it */
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
	mm->pgd = pgd;
	if (pgd)
		pgd_ctor(pgd);

	if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
		pgd_dtor(pgd);
		free_page((unsigned long)pgd);
		pgd = NULL;
	}

	return pgd;
}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	pgd_mop_up_pmds(mm, pgd);
	pgd_dtor(pgd);
	free_page((unsigned long)pgd);
}
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250

int ptep_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pte_t *ptep,
			  pte_t entry, int dirty)
{
	int changed = !pte_same(*ptep, entry);

	if (changed && dirty) {
		*ptep = entry;
		pte_update_defer(vma->vm_mm, address, ptep);
		flush_tlb_page(vma, address);
	}

	return changed;
}
251 252 253 254 255 256 257 258

int ptep_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long addr, pte_t *ptep)
{
	int ret = 0;

	if (pte_young(*ptep))
		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
259
					 (unsigned long *) &ptep->pte);
260 261 262 263 264 265

	if (ret)
		pte_update(vma->vm_mm, addr, ptep);

	return ret;
}
266 267 268 269 270 271 272 273 274 275 276 277

int ptep_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pte_t *ptep)
{
	int young;

	young = ptep_test_and_clear_young(vma, address, ptep);
	if (young)
		flush_tlb_page(vma, address);

	return young;
}
J
Jeremy Fitzhardinge 已提交
278 279 280

int fixmaps_set;

281
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
J
Jeremy Fitzhardinge 已提交
282 283 284 285 286 287 288
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
289
	set_pte_vaddr(address, pte);
J
Jeremy Fitzhardinge 已提交
290 291
	fixmaps_set++;
}
292 293 294 295 296

void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
{
	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}