pageattr_32.c 5.0 KB
Newer Older
1 2
/*
 * Copyright 2002 Andi Kleen, SuSE Labs.
L
Linus Torvalds 已提交
3
 * Thanks to Ben LaHaise for precious feedback.
4
 */
L
Linus Torvalds 已提交
5 6 7

#include <linux/highmem.h>
#include <linux/module.h>
8
#include <linux/sched.h>
L
Linus Torvalds 已提交
9
#include <linux/slab.h>
10 11
#include <linux/mm.h>

L
Linus Torvalds 已提交
12 13
#include <asm/processor.h>
#include <asm/tlbflush.h>
D
Dave Jones 已提交
14
#include <asm/sections.h>
15 16
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
17

18
pte_t *lookup_address(unsigned long address, int *level)
19
{
L
Linus Torvalds 已提交
20 21 22
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
23

L
Linus Torvalds 已提交
24 25 26 27 28 29 30 31
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
32
	*level = 2;
L
Linus Torvalds 已提交
33 34
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
35
	*level = 3;
L
Linus Torvalds 已提交
36

37 38 39
	return pte_offset_kernel(pmd, address);
}

I
Ingo Molnar 已提交
40
static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
41 42
{
	struct page *page;
L
Linus Torvalds 已提交
43

44 45
	/* change init_mm */
	set_pte_atomic(kpte, pte);
46
	if (SHARED_KERNEL_PMD)
L
Linus Torvalds 已提交
47 48 49 50 51 52
		return;

	for (page = pgd_list; page; page = (struct page *)page->index) {
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
53

L
Linus Torvalds 已提交
54 55 56 57 58 59 60
		pgd = (pgd_t *)page_address(page) + pgd_index(address);
		pud = pud_offset(pgd, address);
		pmd = pmd_offset(pud, address);
		set_pte_atomic((pte_t *)pmd, pte);
	}
}

61
static int split_large_page(pte_t *kpte, unsigned long address)
62
{
63
	pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
I
Ingo Molnar 已提交
64
	unsigned long flags;
65 66 67
	unsigned long addr;
	pte_t *pbase, *tmp;
	struct page *base;
68
	int i, level;
69 70 71 72 73

	base = alloc_pages(GFP_KERNEL, 0);
	if (!base)
		return -ENOMEM;

I
Ingo Molnar 已提交
74
	spin_lock_irqsave(&pgd_lock, flags);
75 76 77 78 79
	/*
	 * Check for races, another CPU might have split this page
	 * up for us already:
	 */
	tmp = lookup_address(address, &level);
I
Ingo Molnar 已提交
80 81
	if (tmp != kpte) {
		WARN_ON_ONCE(1);
82
		goto out_unlock;
I
Ingo Molnar 已提交
83
	}
84 85 86 87 88 89 90 91 92 93 94 95

	address = __pa(address);
	addr = address & LARGE_PAGE_MASK;
	pbase = (pte_t *)page_address(base);
	paravirt_alloc_pt(&init_mm, page_to_pfn(base));

	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
		set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));

	/*
	 * Install the new, split up pagetable:
	 */
I
Ingo Molnar 已提交
96
	__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
97 98 99
	base = NULL;

out_unlock:
I
Ingo Molnar 已提交
100
	spin_unlock_irqrestore(&pgd_lock, flags);
101 102 103 104 105 106 107

	if (base)
		__free_pages(base, 0);

	return 0;
}

108 109
static int __change_page_attr(struct page *page, pgprot_t prot)
{
L
Linus Torvalds 已提交
110
	struct page *kpte_page;
111
	unsigned long address;
112
	int level, err = 0;
113
	pte_t *kpte;
L
Linus Torvalds 已提交
114 115 116 117

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

118
repeat:
119
	kpte = lookup_address(address, &level);
L
Linus Torvalds 已提交
120 121
	if (!kpte)
		return -EINVAL;
122

L
Linus Torvalds 已提交
123
	kpte_page = virt_to_page(kpte);
124 125 126
	BUG_ON(PageLRU(kpte_page));
	BUG_ON(PageCompound(kpte_page));

L
Linus Torvalds 已提交
127
	/*
I
Ingo Molnar 已提交
128 129
	 * Better fail early if someone sets the kernel text to NX.
	 * Does not cover __inittext
L
Linus Torvalds 已提交
130
	 */
I
Ingo Molnar 已提交
131 132 133
	BUG_ON(address >= (unsigned long)&_text &&
		address < (unsigned long)&_etext &&
	       (pgprot_val(prot) & _PAGE_NX));
134

I
Ingo Molnar 已提交
135
	if (level == 3) {
136
		set_pte_atomic(kpte, mk_pte(page, canon_pgprot(prot)));
I
Ingo Molnar 已提交
137
	} else {
138
		err = split_large_page(kpte, address);
139 140
		if (!err)
			goto repeat;
L
Linus Torvalds 已提交
141
	}
142
	return err;
143
}
L
Linus Torvalds 已提交
144 145 146 147 148 149 150 151

/*
 * Change the page attributes of an page in the linear mapping.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
152
 *
L
Linus Torvalds 已提交
153 154
 * The caller needs to ensure that there are no conflicting mappings elsewhere.
 * This function only deals with the kernel linear map.
155
 *
L
Linus Torvalds 已提交
156 157 158 159
 * Caller must call global_flush_tlb() after this.
 */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
160
	int err = 0, i;
L
Linus Torvalds 已提交
161

162
	for (i = 0; i < numpages; i++, page++) {
L
Linus Torvalds 已提交
163
		err = __change_page_attr(page, prot);
164 165 166 167
		if (err)
			break;
	}

L
Linus Torvalds 已提交
168 169
	return err;
}
170
EXPORT_SYMBOL(change_page_attr);
L
Linus Torvalds 已提交
171

I
Ingo Molnar 已提交
172
int change_page_attr_addr(unsigned long addr, int numpages, pgprot_t prot)
173
{
I
Ingo Molnar 已提交
174
	int i;
I
Ingo Molnar 已提交
175
	unsigned long pfn = (__pa(addr) >> PAGE_SHIFT);
L
Linus Torvalds 已提交
176

I
Ingo Molnar 已提交
177 178
	for (i = 0; i < numpages; i++) {
		if (!pfn_valid(pfn + i)) {
I
Ingo Molnar 已提交
179
			WARN_ON_ONCE(1);
I
Ingo Molnar 已提交
180 181 182 183
			break;
		} else {
			int level;
			pte_t *pte = lookup_address(addr + i*PAGE_SIZE, &level);
I
Ingo Molnar 已提交
184
			BUG_ON(pte && pte_none(*pte));
I
Ingo Molnar 已提交
185 186
		}
	}
I
Ingo Molnar 已提交
187

I
Ingo Molnar 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
	return change_page_attr(virt_to_page(addr), i, prot);
}

static void flush_kernel_map(void *arg)
{
	/*
	 * Flush all to work around Errata in early athlons regarding
	 * large page flushing.
	 */
	__flush_tlb_all();

	if (boot_cpu_data.x86_model >= 4)
		wbinvd();
}

void global_flush_tlb(void)
{
L
Linus Torvalds 已提交
205 206
	BUG_ON(irqs_disabled());

I
Ingo Molnar 已提交
207
	on_each_cpu(flush_kernel_map, NULL, 1, 1);
208
}
209
EXPORT_SYMBOL(global_flush_tlb);
L
Linus Torvalds 已提交
210 211 212 213 214 215

#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
216
	if (!enable) {
217 218
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
219
	}
220

221 222
	/*
	 * the return value is ignored - the calls cannot fail,
L
Linus Torvalds 已提交
223 224 225
	 * large pages are disabled at boot time.
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
226 227 228

	/*
	 * we should perform an IPI and flush all tlbs,
L
Linus Torvalds 已提交
229 230 231 232 233
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_all();
}
#endif