pgalloc.h 4.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H

#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/fixmap.h>

#include <asm/cache.h>

/* Allocate the top level pgd (page directory)
 *
 * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we
 * allocate the first pmd adjacent to the pgd.  This means that we can
 * subtract a constant offset to get to it.  The pmd and pgd sizes are
17
 * arranged so that a single pmd covers 4GB (giving a full 64-bit
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27 28
 * process access to 8TB) so our lookups are effectively L2 for the
 * first 4GB of the kernel (i.e. for all ILP32 processes and all the
 * kernel for machines with under 4GB of memory) */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL,
					       PGD_ALLOC_ORDER);
	pgd_t *actual_pgd = pgd;

	if (likely(pgd != NULL)) {
		memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
29
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
30 31 32 33 34 35 36 37
		actual_pgd += PTRS_PER_PGD;
		/* Populate first pmd with allocated memory.  We mark it
		 * with PxD_FLAG_ATTACHED as a signal to the system that this
		 * pmd entry may not be cleared. */
		__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | 
				        PxD_FLAG_VALID | 
					PxD_FLAG_ATTACHED) 
			+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
38
		/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
L
Linus Torvalds 已提交
39 40 41 42 43 44 45
		 * a signal that this pmd may not be freed */
		__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
	}
	return actual_pgd;
}

46
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
L
Linus Torvalds 已提交
47
{
48
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
49 50 51 52 53
	pgd -= PTRS_PER_PGD;
#endif
	free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
}

54
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72

/* Three Level Page Table Support for pmd's */

static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
	__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
		        (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
}

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
	pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
					       PMD_ORDER);
	if (pmd)
		memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
	return pmd;
}

73
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
L
Linus Torvalds 已提交
74
{
75
	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
76 77 78 79 80 81 82
		/*
		 * This is the permanent pmd attached to the pgd;
		 * cannot free it.
		 * Increment the counter to compensate for the decrement
		 * done by generic mm code.
		 */
		mm_inc_nr_pmds(mm);
L
Linus Torvalds 已提交
83
		return;
84
	}
L
Linus Torvalds 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97
	free_pages((unsigned long)pmd, PMD_ORDER);
}

#else

/* Two Level Page Table Support for pmd's */

/*
 * allocating and freeing a pmd is trivial: the 1-entry pmd is
 * inside the pgd, so has no extra memory associated with it.
 */

#define pmd_alloc_one(mm, addr)		({ BUG(); ((pmd_t *)2); })
98
#define pmd_free(mm, x)			do { } while (0)
L
Linus Torvalds 已提交
99 100 101 102 103 104 105
#define pgd_populate(mm, pmd, pte)	BUG()

#endif

static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
106
#if CONFIG_PGTABLE_LEVELS == 3
L
Linus Torvalds 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
	/* preserve the gateway marker if this is the beginning of
	 * the permanent pmd */
	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
				 PxD_FLAG_VALID |
				 PxD_FLAG_ATTACHED) 
			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
	else
#endif
		__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) 
			+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
}

#define pmd_populate(mm, pmd, pte_page) \
	pmd_populate_kernel(mm, pmd, page_address(pte_page))
122
#define pmd_pgtable(pmd) pmd_page(pmd)
L
Linus Torvalds 已提交
123

124
static inline pgtable_t
L
Linus Torvalds 已提交
125 126 127
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
	struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
128 129 130 131 132 133
	if (!page)
		return NULL;
	if (!pgtable_page_ctor(page)) {
		__free_page(page);
		return NULL;
	}
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143
	return page;
}

static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
	return pte;
}

144
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
L
Linus Torvalds 已提交
145 146 147 148
{
	free_page((unsigned long)pte);
}

K
Kyle McMartin 已提交
149
static inline void pte_free(struct mm_struct *mm, struct page *pte)
150 151
{
	pgtable_page_dtor(pte);
K
Kyle McMartin 已提交
152
	pte_free_kernel(mm, page_address(pte));
153
}
L
Linus Torvalds 已提交
154 155 156 157

#define check_pgt_cache()	do { } while (0)

#endif