pgalloc.h 6.1 KB
Newer Older
1 2
#ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H
#define _ASM_POWERPC_BOOK3S_64_PGALLOC_H
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
/*
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/percpu.h>

struct vmemmap_backing {
	struct vmemmap_backing *list;
	unsigned long phys;
	unsigned long virt_addr;
};
extern struct vmemmap_backing *vmemmap_list;

/*
 * Functions that deal with pagetables that could be at any level of
 * the table need to be passed an "index_size" so they know how to
 * handle allocation.  For PTE pages (which are linked to a struct
 * page for now, and drawn from the main get_free_pages() pool), the
 * allocation size will be (2^index_size * sizeof(pointer)) and
 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
 *
 * The maximum index size needs to be big enough to allow any
 * pagetable sizes we need, but small enough to fit in the low bits of
 * any page table pointer.  In other words all pagetables, even tiny
 * ones, must be aligned to allow at least enough low 0 bits to
 * contain this value.  This value is also used as a mask, so it must
 * be one less than a power of two.
 */
#define MAX_PGTABLE_INDEX_SIZE	0xf

extern struct kmem_cache *pgtable_cache[];
#define PGT_CACHE(shift) ({				\
			BUG_ON(!(shift));		\
			pgtable_cache[(shift) - 1];	\
		})

44 45 46 47 48 49 50
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int);
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
#endif

51 52 53
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
{
#ifdef CONFIG_PPC_64K_PAGES
54
	return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP));
55 56
#else
	struct page *page;
57
	page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL),
58
				4);
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
	if (!page)
		return NULL;
	return (pgd_t *) page_address(page);
#endif
}

static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_PPC_64K_PAGES
	free_page((unsigned long)pgd);
#else
	free_pages((unsigned long)pgd, 4);
#endif
}

74 75
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
76 77
	pgd_t *pgd;

78 79
	if (radix_enabled())
		return radix__pgd_alloc(mm);
80 81 82 83 84 85

	pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
			       pgtable_gfp_flags(mm, GFP_KERNEL));
	memset(pgd, 0, PGD_TABLE_SIZE);

	return pgd;
86 87 88 89
}

static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
90 91
	if (radix_enabled())
		return radix__pgd_free(mm, pgd);
92 93 94
	kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}

95 96
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
{
97
	pgd_set(pgd, __pgtable_ptr_val(pud) | PGD_VAL_BITS);
98
}
99 100 101

static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
102
	return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
103
		pgtable_gfp_flags(mm, GFP_KERNEL));
104 105 106 107
}

static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
108
	kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
109 110 111 112
}

static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
113
	pud_set(pud, __pgtable_ptr_val(pmd) | PUD_VAL_BITS);
114 115
}

116 117 118
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                  unsigned long address)
{
119 120 121 122 123
	/*
	 * By now all the pud entries should be none entries. So go
	 * ahead and flush the page walk cache
	 */
	flush_tlb_pgtable(tlb, address);
124
	pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
125 126 127 128
}

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
129 130
	return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
		pgtable_gfp_flags(mm, GFP_KERNEL));
131 132 133 134 135 136 137 138 139 140
}

static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
	kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
}

static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                  unsigned long address)
{
141 142 143 144 145
	/*
	 * By now all the pud entries should be none entries. So go
	 * ahead and flush the page walk cache
	 */
	flush_tlb_pgtable(tlb, address);
146 147 148
        return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
}

149 150 151
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
				       pte_t *pte)
{
152
	pmd_set(pmd, __pgtable_ptr_val(pte) | PMD_VAL_BITS);
153
}
154

155 156 157
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
				pgtable_t pte_page)
{
158
	pmd_set(pmd, __pgtable_ptr_val(pte_page) | PMD_VAL_BITS);
159 160
}

161 162
static inline pgtable_t pmd_pgtable(pmd_t pmd)
{
163
	return (pgtable_t)pmd_page_vaddr(pmd);
164
}
165

166
#ifdef CONFIG_PPC_4K_PAGES
167 168 169
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
					  unsigned long address)
{
170
	return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
171 172 173 174 175 176 177 178
}

static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
				      unsigned long address)
{
	struct page *page;
	pte_t *pte;

179
	pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT);
180 181 182 183 184 185 186
	if (!pte)
		return NULL;
	page = virt_to_page(pte);
	if (!pgtable_page_ctor(page)) {
		__free_page(page);
		return NULL;
	}
187
	return pte;
188 189 190 191 192 193
}
#else /* if CONFIG_PPC_64K_PAGES */

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
					  unsigned long address)
{
194
	return (pte_t *)pte_fragment_alloc(mm, address, 1);
195 196 197
}

static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
198
				      unsigned long address)
199
{
200
	return (pgtable_t)pte_fragment_alloc(mm, address, 0);
201
}
202
#endif
203 204 205

static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
206
	pte_fragment_free((unsigned long *)pte, 1);
207 208 209 210
}

static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
211
	pte_fragment_free((unsigned long *)ptepage, 0);
212 213 214 215 216
}

static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
				  unsigned long address)
{
217 218 219 220 221
	/*
	 * By now all the pud entries should be none entries. So go
	 * ahead and flush the page walk cache
	 */
	flush_tlb_pgtable(tlb, address);
222 223 224 225 226
	pgtable_free_tlb(tlb, table, 0);
}

#define check_pgt_cache()	do { } while (0)

227
#endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */