pgtable.c 7.6 KB
Newer Older
1 2
#include <linux/mm.h>
#include <asm/pgalloc.h>
3
#include <asm/pgtable.h>
4
#include <asm/tlb.h>
I
Ingo Molnar 已提交
5
#include <asm/fixmap.h>
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25

pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
	return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
}

pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *pte;

#ifdef CONFIG_HIGHPTE
	pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
#else
	pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
#endif
	if (pte)
		pgtable_page_ctor(pte);
	return pte;
}

26 27 28
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
	pgtable_page_dtor(pte);
29
	paravirt_release_pte(page_to_pfn(pte));
30 31 32
	tlb_remove_page(tlb, pte);
}

33 34 35
#if PAGETABLE_LEVELS > 2
void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
36
	paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
37 38
	tlb_remove_page(tlb, virt_to_page(pmd));
}
39 40 41 42

#if PAGETABLE_LEVELS > 3
void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
{
43
	paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
44 45 46
	tlb_remove_page(tlb, virt_to_page(pud));
}
#endif	/* PAGETABLE_LEVELS > 3 */
47 48
#endif	/* PAGETABLE_LEVELS > 2 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
static inline void pgd_list_add(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_add(&page->lru, &pgd_list);
}

static inline void pgd_list_del(pgd_t *pgd)
{
	struct page *page = virt_to_page(pgd);

	list_del(&page->lru);
}

#define UNSHARED_PTRS_PER_PGD				\
J
Jeremy Fitzhardinge 已提交
64
	(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
65 66 67 68 69 70 71 72 73

static void pgd_ctor(void *p)
{
	pgd_t *pgd = p;

	/* If the pgd points to a shared pagetable level (either the
	   ptes in non-PAE, or shared PMD in PAE), then just copy the
	   references from swapper_pg_dir. */
	if (PAGETABLE_LEVELS == 2 ||
J
Jeremy Fitzhardinge 已提交
74 75
	    (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
	    PAGETABLE_LEVELS == 4) {
J
Jeremy Fitzhardinge 已提交
76 77
		clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
				swapper_pg_dir + KERNEL_PGD_BOUNDARY,
78
				KERNEL_PGD_PTRS);
79 80
		paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
					 __pa(swapper_pg_dir) >> PAGE_SHIFT,
J
Jeremy Fitzhardinge 已提交
81
					 KERNEL_PGD_BOUNDARY,
82
					 KERNEL_PGD_PTRS);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	}

	/* list required to sync kernel mapping updates */
	if (!SHARED_KERNEL_PMD)
		pgd_list_add(pgd);
}

static void pgd_dtor(void *pgd)
{
	unsigned long flags; /* can be called from interrupt context */

	if (SHARED_KERNEL_PMD)
		return;

	spin_lock_irqsave(&pgd_lock, flags);
	pgd_list_del(pgd);
	spin_unlock_irqrestore(&pgd_lock, flags);
}

J
Jeremy Fitzhardinge 已提交
102 103 104 105 106 107 108 109 110 111 112
/*
 * List of all pgd's needed for non-PAE so it can invalidate entries
 * in both cached and uncached pgd's; not needed for PAE since the
 * kernel pmd is shared. If PAE were not to share the pmd a similar
 * tactic would be needed. This is essentially codepath-based locking
 * against pageattr.c; it is the unique case in which a valid change
 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 * vmalloc faults work because attached pagetables are never freed.
 * -- wli
 */

113
#ifdef CONFIG_X86_PAE
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
/*
 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 * updating the top-level pagetable entries to guarantee the
 * processor notices the update.  Since this is expensive, and
 * all 4 top-level entries are used almost immediately in a
 * new process's life, we just pre-populate them here.
 *
 * Also, if we're in a paravirt environment where the kernel pmd is
 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 * and initialize the kernel pmds here.
 */
#define PREALLOCATED_PMDS	UNSHARED_PTRS_PER_PGD

void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
	paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);

	/* Note: almost everything apart from _PAGE_PRESENT is
	   reserved at the pmd (PDPT) level. */
	set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));

	/*
	 * According to Intel App note "TLBs, Paging-Structure Caches,
	 * and Their Invalidation", April 2007, document 317080-001,
	 * section 8.1: in PAE mode we explicitly have to flush the
	 * TLB via cr3 if the top-level pgd is changed...
	 */
	if (mm == current->active_mm)
		write_cr3(read_cr3());
}
#else  /* !CONFIG_X86_PAE */

/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS	0

#endif	/* CONFIG_X86_PAE */

static void free_pmds(pmd_t *pmds[])
{
	int i;

	for(i = 0; i < PREALLOCATED_PMDS; i++)
		if (pmds[i])
			free_page((unsigned long)pmds[i]);
}

static int preallocate_pmds(pmd_t *pmds[])
{
	int i;
	bool failed = false;

	for(i = 0; i < PREALLOCATED_PMDS; i++) {
		pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
		if (pmd == NULL)
			failed = true;
		pmds[i] = pmd;
	}

	if (failed) {
		free_pmds(pmds);
		return -ENOMEM;
	}

	return 0;
}

180 181 182 183 184 185 186 187 188 189
/*
 * Mop up any pmd pages which may still be attached to the pgd.
 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 * preallocate which never got a corresponding vma will need to be
 * freed manually.
 */
static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
{
	int i;

190
	for(i = 0; i < PREALLOCATED_PMDS; i++) {
191 192 193 194 195 196 197
		pgd_t pgd = pgdp[i];

		if (pgd_val(pgd) != 0) {
			pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);

			pgdp[i] = native_make_pgd(0);

198
			paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
199 200 201 202 203
			pmd_free(mm, pmd);
		}
	}
}

204
static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
205 206 207 208 209 210 211
{
	pud_t *pud;
	unsigned long addr;
	int i;

	pud = pud_offset(pgd, 0);

212 213 214
 	for (addr = i = 0; i < PREALLOCATED_PMDS;
	     i++, pud++, addr += PUD_SIZE) {
		pmd_t *pmd = pmds[i];
215

J
Jeremy Fitzhardinge 已提交
216
		if (i >= KERNEL_PGD_BOUNDARY)
217 218 219 220 221 222
			memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
			       sizeof(pmd_t) * PTRS_PER_PMD);

		pud_populate(mm, pud, pmd);
	}
}
223

224
pgd_t *pgd_alloc(struct mm_struct *mm)
225
{
226 227 228
	pgd_t *pgd;
	pmd_t *pmds[PREALLOCATED_PMDS];
	unsigned long flags;
229

230 231 232 233 234 235 236 237 238 239 240 241
	pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);

	if (pgd == NULL)
		goto out;

	mm->pgd = pgd;

	if (preallocate_pmds(pmds) != 0)
		goto out_free_pgd;

	if (paravirt_pgd_alloc(mm) != 0)
		goto out_free_pmds;
242 243

	/*
244 245 246
	 * Make sure that pre-populating the pmds is atomic with
	 * respect to anything walking the pgd_list, so that they
	 * never see a partially populated pgd.
247
	 */
248
	spin_lock_irqsave(&pgd_lock, flags);
249

250 251
	pgd_ctor(pgd);
	pgd_prepopulate_pmd(mm, pgd, pmds);
252

253
	spin_unlock_irqrestore(&pgd_lock, flags);
254 255

	return pgd;
256 257 258 259 260 261 262

out_free_pmds:
	free_pmds(pmds);
out_free_pgd:
	free_page((unsigned long)pgd);
out:
	return NULL;
263 264 265 266 267 268
}

void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	pgd_mop_up_pmds(mm, pgd);
	pgd_dtor(pgd);
269
	paravirt_pgd_free(mm, pgd);
270 271
	free_page((unsigned long)pgd);
}
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

int ptep_set_access_flags(struct vm_area_struct *vma,
			  unsigned long address, pte_t *ptep,
			  pte_t entry, int dirty)
{
	int changed = !pte_same(*ptep, entry);

	if (changed && dirty) {
		*ptep = entry;
		pte_update_defer(vma->vm_mm, address, ptep);
		flush_tlb_page(vma, address);
	}

	return changed;
}
287 288 289 290 291 292 293 294

int ptep_test_and_clear_young(struct vm_area_struct *vma,
			      unsigned long addr, pte_t *ptep)
{
	int ret = 0;

	if (pte_young(*ptep))
		ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
295
					 (unsigned long *) &ptep->pte);
296 297 298 299 300 301

	if (ret)
		pte_update(vma->vm_mm, addr, ptep);

	return ret;
}
302 303 304 305 306 307 308 309 310 311 312 313

int ptep_clear_flush_young(struct vm_area_struct *vma,
			   unsigned long address, pte_t *ptep)
{
	int young;

	young = ptep_test_and_clear_young(vma, address, ptep);
	if (young)
		flush_tlb_page(vma, address);

	return young;
}
J
Jeremy Fitzhardinge 已提交
314 315 316

int fixmaps_set;

317
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
J
Jeremy Fitzhardinge 已提交
318 319 320 321 322 323 324
{
	unsigned long address = __fix_to_virt(idx);

	if (idx >= __end_of_fixed_addresses) {
		BUG();
		return;
	}
325
	set_pte_vaddr(address, pte);
J
Jeremy Fitzhardinge 已提交
326 327
	fixmaps_set++;
}
328 329 330 331 332

void native_set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
{
	__native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
}