pageattr.c 7.1 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5
#include <linux/highmem.h>
I
Ingo Molnar 已提交
6
#include <linux/bootmem.h>
L
Linus Torvalds 已提交
7
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

I
Ingo Molnar 已提交
12 13 14 15 16 17 18 19
void clflush_cache_range(void *addr, int size)
{
	int i;

	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
		clflush(addr+i);
}

L
Linus Torvalds 已提交
20 21
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
22
#include <asm/sections.h>
23 24
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
25

I
Ingo Molnar 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/*
 * We allow the BIOS range to be executable:
 */
#define BIOS_BEGIN		0x000a0000
#define BIOS_END		0x00100000

static inline pgprot_t check_exec(pgprot_t prot, unsigned long address)
{
	if (__pa(address) >= BIOS_BEGIN && __pa(address) < BIOS_END)
		pgprot_val(prot) &= ~_PAGE_NX;
	/*
	 * Better fail early if someone sets the kernel text to NX.
	 * Does not cover __inittext
	 */
	BUG_ON(address >= (unsigned long)&_text &&
		address < (unsigned long)&_etext &&
	       (pgprot_val(prot) & _PAGE_NX));

	return prot;
}

47
pte_t *lookup_address(unsigned long address, int *level)
48
{
L
Linus Torvalds 已提交
49 50 51
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
52

T
Thomas Gleixner 已提交
53 54
	*level = PG_LEVEL_NONE;

L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
T
Thomas Gleixner 已提交
63 64

	*level = PG_LEVEL_2M;
L
Linus Torvalds 已提交
65 66 67
	if (pmd_large(*pmd))
		return (pte_t *)pmd;

T
Thomas Gleixner 已提交
68
	*level = PG_LEVEL_4K;
69 70 71
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
72
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
73 74 75
{
	/* change init_mm */
	set_pte_atomic(kpte, pte);
76
#ifdef CONFIG_X86_32
77
	if (!SHARED_KERNEL_PMD) {
78 79 80 81 82 83 84 85 86 87 88 89
		struct page *page;

		for (page = pgd_list; page; page = (struct page *)page->index) {
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;

			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			pud = pud_offset(pgd, address);
			pmd = pmd_offset(pud, address);
			set_pte_atomic((pte_t *)pmd, pte);
		}
L
Linus Torvalds 已提交
90
	}
91
#endif
L
Linus Torvalds 已提交
92 93
}

94
static int split_large_page(pte_t *kpte, unsigned long address)
95
{
96
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
97
	gfp_t gfp_flags = GFP_KERNEL;
I
Ingo Molnar 已提交
98
	unsigned long flags;
99 100 101
	unsigned long addr;
	pte_t *pbase, *tmp;
	struct page *base;
102
	int i, level;
103

104 105 106 107
#ifdef CONFIG_DEBUG_PAGEALLOC
	gfp_flags = GFP_ATOMIC;
#endif
	base = alloc_pages(gfp_flags, 0);
108 109 110
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
111
	spin_lock_irqsave(&pgd_lock, flags);
112 113 114 115 116
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
117 118
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
119
		goto out_unlock;
I
Ingo Molnar 已提交
120
	}
121 122 123 124

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
	pbase = (pte_t *)page_address(base);
125
#ifdef CONFIG_X86_32
126
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));
127
#endif
128 129 130 131 132 133 134

	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));

	/*
	 * Install the new, split up pagetable:
	 */
I
Ingo Molnar 已提交
135
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
136 137 138
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
139
	spin_unlock_irqrestore(&pgd_lock, flags);
140 141 142 143 144 145 146

	if (base)
		__free_pages(base, 0);

	return 0;
}

147
static int
I
Ingo Molnar 已提交
148
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
149
{
L
Linus Torvalds 已提交
150
	struct page *kpte_page;
151
	int level, err = 0;
152
	pte_t *kpte;
L
Linus Torvalds 已提交
153

I
Ingo Molnar 已提交
154 155 156
#ifdef CONFIG_X86_32
	BUG_ON(pfn > max_low_pfn);
#endif
L
Linus Torvalds 已提交
157

158
repeat:
159
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
160 161
	if (!kpte)
		return -EINVAL;
162

L
Linus Torvalds 已提交
163
	kpte_page = virt_to_page(kpte);
164 165 166
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

I
Ingo Molnar 已提交
167
	prot = check_exec(prot, address);
168

T
Thomas Gleixner 已提交
169
	if (level == PG_LEVEL_4K) {
I
Ingo Molnar 已提交
170
		set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
I
Ingo Molnar 已提交
171
	} else {
172
		err = split_large_page(kpte, address);
173 174
		if (!err)
			goto repeat;
L
Linus Torvalds 已提交
175
	}
176
	return err;
177
}
L
Linus Torvalds 已提交
178

179 180 181 182 183
/**
 * change_page_attr_addr - Change page table attributes in linear mapping
 * @address: Virtual address in linear mapping.
 * @numpages: Number of pages to change
 * @prot:    New page table attribute (PAGE_*)
L
Linus Torvalds 已提交
184
 *
185 186 187
 * Change page attributes of a page in the direct mapping. This is a variant
 * of change_page_attr() that also works on memory holes that do not have
 * mem_map entry (pfn_valid() is false).
188
 *
189
 * See change_page_attr() documentation for more details.
L
Linus Torvalds 已提交
190
 */
191 192

int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
L
Linus Torvalds 已提交
193
{
194 195 196 197 198
	int err = 0, kernel_map = 0, i;

#ifdef CONFIG_X86_64
	if (address >= __START_KERNEL_map &&
			address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
L
Linus Torvalds 已提交
199

200 201 202 203 204 205 206 207 208
		address = (unsigned long)__va(__pa(address));
		kernel_map = 1;
	}
#endif

	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
		unsigned long pfn = __pa(address) >> PAGE_SHIFT;

		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
I
Ingo Molnar 已提交
209
			err = __change_page_attr(address, pfn, prot);
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
			if (err)
				break;
		}
#ifdef CONFIG_X86_64
		/*
		 * Handle kernel mapping too which aliases part of
		 * lowmem:
		 */
		if (__pa(address) < KERNEL_TEXT_SIZE) {
			unsigned long addr2;
			pgprot_t prot2;

			addr2 = __START_KERNEL_map + __pa(address);
			/* Make sure the kernel mappings stay executable */
			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
I
Ingo Molnar 已提交
225
			err = __change_page_attr(addr2, pfn, prot2);
226 227
		}
#endif
228 229
	}

L
Linus Torvalds 已提交
230 231 232
	return err;
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
/**
 * change_page_attr - Change page table attributes in the linear mapping.
 * @page: First page to change
 * @numpages: Number of pages to change
 * @prot: New protection/caching type (PAGE_*)
 *
 * Returns 0 on success, otherwise a negated errno.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 *
 * Caller must call global_flush_tlb() later to make the changes active.
 *
 * The caller needs to ensure that there are no conflicting mappings elsewhere
 * (e.g. in user space) * This function only deals with the kernel linear map.
 *
 * For MMIO areas without mem_map use change_page_attr_addr() instead.
 */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
254
{
255
	unsigned long addr = (unsigned long)page_address(page);
I
Ingo Molnar 已提交
256

257
	return change_page_attr_addr(addr, numpages, prot);
I
Ingo Molnar 已提交
258
}
259
EXPORT_SYMBOL(change_page_attr);
I
Ingo Molnar 已提交
260 261 262 263 264 265 266 267 268 269 270 271 272 273 274

static void flush_kernel_map(void *arg)
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

void global_flush_tlb(void)
{
L
Linus Torvalds 已提交
275 276
	BUG_ON(irqs_disabled());

I
Ingo Molnar 已提交
277
	on_each_cpu(flush_kernel_map, NULL, 1, 1);
278
}
279
EXPORT_SYMBOL(global_flush_tlb);
L
Linus Torvalds 已提交
280 281 282 283 284 285

#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
286
	if (!enable) {
287 288
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
289
	}
290

291 292 293 294 295 296
	/*
	 * If page allocator is not up yet then do not call c_p_a():
	 */
	if (!debug_pagealloc_enabled)
		return;

297
	/*
298 299
	 * The return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time:
L
Linus Torvalds 已提交
300 301
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
302 303

	/*
304 305
	 * We should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu:
L
Linus Torvalds 已提交
306 307 308 309
	 */
	__flush_tlb_all();
}
#endif