pageattr.c 6.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* 
 * Copyright 2002 Andi Kleen, SuSE Labs. 
 * Thanks to Ben LaHaise for precious feedback.
 */ 

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
14
#include <asm/pgalloc.h>
D
Dave Jones 已提交
15
#include <asm/sections.h>
L
Linus Torvalds 已提交
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38

static DEFINE_SPINLOCK(cpa_lock);
static struct list_head df_list = LIST_HEAD_INIT(df_list);


pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (pud_none(*pud))
		return NULL;
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd))
		return NULL;
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
        return pte_offset_kernel(pmd, address);
} 

D
Dave Jones 已提交
39 40
static struct page *split_large_page(unsigned long address, pgprot_t prot,
					pgprot_t ref_prot)
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49 50 51 52
{ 
	int i; 
	unsigned long addr;
	struct page *base;
	pte_t *pbase;

	spin_unlock_irq(&cpa_lock);
	base = alloc_pages(GFP_KERNEL, 0);
	spin_lock_irq(&cpa_lock);
	if (!base) 
		return NULL;

53 54 55 56 57 58 59
	/*
	 * page_private is used to track the number of entries in
	 * the page table page that have non standard attributes.
	 */
	SetPagePrivate(base);
	page_private(base) = 0;

L
Linus Torvalds 已提交
60 61 62 63
	address = __pa(address);
	addr = address & LARGE_PAGE_MASK; 
	pbase = (pte_t *)page_address(base);
	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
64
               set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
D
Dave Jones 已提交
65
                                          addr == address ? prot : ref_prot));
L
Linus Torvalds 已提交
66 67 68 69
	}
	return base;
} 

70
static void flush_kernel_map(void *arg)
L
Linus Torvalds 已提交
71
{ 
72 73 74 75 76 77 78
	unsigned long adr = (unsigned long)arg;

	if (adr && cpu_has_clflush) {
		int i;
		for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
			asm volatile("clflush (%0)" :: "r" (adr + i));
	} else if (boot_cpu_data.x86_model >= 4)
79
		wbinvd();
80

L
Linus Torvalds 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	/* Flush all to work around Errata in early athlons regarding 
	 * large page flushing. 
	 */
	__flush_tlb_all(); 	
}

static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
{ 
	struct page *page;
	unsigned long flags;

	set_pte_atomic(kpte, pte); 	/* change init_mm */
	if (PTRS_PER_PMD > 1)
		return;

	spin_lock_irqsave(&pgd_lock, flags);
	for (page = pgd_list; page; page = (struct page *)page->index) {
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pgd = (pgd_t *)page_address(page) + pgd_index(address);
		pud = pud_offset(pgd, address);
		pmd = pmd_offset(pud, address);
		set_pte_atomic((pte_t *)pmd, pte);
	}
	spin_unlock_irqrestore(&pgd_lock, flags);
}

/* 
 * No more special protections in this 2/4MB area - revert to a
 * large page again. 
 */
static inline void revert_page(struct page *kpte_page, unsigned long address)
{
D
Dave Jones 已提交
115 116 117 118 119 120 121 122
	pgprot_t ref_prot;
	pte_t *linear;

	ref_prot =
	((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
		? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;

	linear = (pte_t *)
L
Linus Torvalds 已提交
123 124 125
		pmd_offset(pud_offset(pgd_offset_k(address), address), address);
	set_pmd_pte(linear,  address,
		    pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
D
Dave Jones 已提交
126
			    ref_prot));
L
Linus Torvalds 已提交
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
}

static int
__change_page_attr(struct page *page, pgprot_t prot)
{ 
	pte_t *kpte; 
	unsigned long address;
	struct page *kpte_page;

	BUG_ON(PageHighMem(page));
	address = (unsigned long)page_address(page);

	kpte = lookup_address(address);
	if (!kpte)
		return -EINVAL;
	kpte_page = virt_to_page(kpte);
	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
		if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
			set_pte_atomic(kpte, mk_pte(page, prot)); 
		} else {
D
Dave Jones 已提交
147 148 149 150 151 152 153
			pgprot_t ref_prot;
			struct page *split;

			ref_prot =
			((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
				? PAGE_KERNEL_EXEC : PAGE_KERNEL;
			split = split_large_page(address, prot, ref_prot);
L
Linus Torvalds 已提交
154 155
			if (!split)
				return -ENOMEM;
D
Dave Jones 已提交
156
			set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
L
Linus Torvalds 已提交
157
			kpte_page = split;
158 159
		}
		page_private(kpte_page)++;
L
Linus Torvalds 已提交
160 161
	} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
		set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
162 163
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
L
Linus Torvalds 已提交
164 165 166 167 168 169 170 171 172
	} else
		BUG();

	/*
	 * If the pte was reserved, it means it was created at boot
	 * time (not via split_large_page) and in turn we must not
	 * replace it with a largepage.
	 */
	if (!PageReserved(kpte_page)) {
173 174
		if (cpu_has_pse && (page_private(kpte_page) == 0)) {
			ClearPagePrivate(kpte_page);
L
Linus Torvalds 已提交
175 176 177 178 179 180 181
			list_add(&kpte_page->lru, &df_list);
			revert_page(kpte_page, address);
		}
	}
	return 0;
} 

182
static inline void flush_map(void *adr)
L
Linus Torvalds 已提交
183
{
184
	on_each_cpu(flush_kernel_map, adr, 1, 1);
L
Linus Torvalds 已提交
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
}

/*
 * Change the page attributes of an page in the linear mapping.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 * 
 * The caller needs to ensure that there are no conflicting mappings elsewhere.
 * This function only deals with the kernel linear map.
 * 
 * Caller must call global_flush_tlb() after this.
 */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
	int err = 0; 
	int i; 
	unsigned long flags;

	spin_lock_irqsave(&cpa_lock, flags);
	for (i = 0; i < numpages; i++, page++) { 
		err = __change_page_attr(page, prot);
		if (err) 
			break; 
	} 	
	spin_unlock_irqrestore(&cpa_lock, flags);
	return err;
}

void global_flush_tlb(void)
217 218
{
	struct list_head l;
L
Linus Torvalds 已提交
219 220 221 222 223
	struct page *pg, *next;

	BUG_ON(irqs_disabled());

	spin_lock_irq(&cpa_lock);
224
	list_replace_init(&df_list, &l);
L
Linus Torvalds 已提交
225
	spin_unlock_irq(&cpa_lock);
226
	if (!cpu_has_clflush)
A
Al Viro 已提交
227
		flush_map(NULL);
228 229 230
	list_for_each_entry_safe(pg, next, &l, lru) {
		if (cpu_has_clflush)
			flush_map(page_address(pg));
L
Linus Torvalds 已提交
231
		__free_page(pg);
232
	}
233
}
L
Linus Torvalds 已提交
234 235 236 237 238 239

#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
	if (PageHighMem(page))
		return;
240
	if (!enable)
241 242
		debug_check_no_locks_freed(page_address(page),
					   numpages * PAGE_SIZE);
243

L
Linus Torvalds 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256
	/* the return value is ignored - the calls cannot fail,
	 * large pages are disabled at boot time.
	 */
	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
	/* we should perform an IPI and flush all tlbs,
	 * but that can deadlock->flush only current cpu.
	 */
	__flush_tlb_all();
}
#endif

EXPORT_SYMBOL(change_page_attr);
EXPORT_SYMBOL(global_flush_tlb);