pageattr.c 6.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/* 
 * Copyright 2002 Andi Kleen, SuSE Labs. 
 * Thanks to Ben LaHaise for precious feedback.
 */ 

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

static inline pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return NULL; 
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return NULL; 
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
	pte = pte_offset_kernel(pmd, address);
	if (pte && !pte_present(*pte))
		pte = NULL; 
	return pte;
} 

static struct page *split_large_page(unsigned long address, pgprot_t prot,
				     pgprot_t ref_prot)
{ 
	int i; 
	unsigned long addr;
	struct page *base = alloc_pages(GFP_KERNEL, 0);
	pte_t *pbase;
	if (!base) 
		return NULL;
47 48 49 50 51 52 53
	/*
	 * page_private is used to track the number of entries in
	 * the page table page have non standard attributes.
	 */
	SetPagePrivate(base);
	page_private(base) = 0;

54
	address = __pa(address);
L
Linus Torvalds 已提交
55 56 57 58 59 60 61 62 63
	addr = address & LARGE_PAGE_MASK; 
	pbase = (pte_t *)page_address(base);
	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
				   addr == address ? prot : ref_prot);
	}
	return base;
} 

64
static void cache_flush_page(void *adr)
L
Linus Torvalds 已提交
65
{
66 67 68
	int i;
	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
		asm volatile("clflush (%0)" :: "r" (adr + i));
L
Linus Torvalds 已提交
69 70
}

71 72 73 74 75 76
static void flush_kernel_map(void *arg)
{
	struct list_head *l = (struct list_head *)arg;
	struct page *pg;

	/* When clflush is available always use it because it is
A
Andi Kleen 已提交
77 78 79
	   much cheaper than WBINVD. Disable clflush for now because
	   the high level code is not ready yet */
	if (1 || !cpu_has_clflush)
80
		asm volatile("wbinvd" ::: "memory");
A
Andi Kleen 已提交
81
	else list_for_each_entry(pg, l, lru) {
82 83 84 85
		void *adr = page_address(pg);
		if (cpu_has_clflush)
			cache_flush_page(adr);
	}
86
	__flush_tlb_all();
87
}
L
Linus Torvalds 已提交
88

89
static inline void flush_map(struct list_head *l)
L
Linus Torvalds 已提交
90
{	
91
	on_each_cpu(flush_kernel_map, l, 1, 1);
L
Linus Torvalds 已提交
92 93
}

94
static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
95 96

static inline void save_page(struct page *fpage)
L
Linus Torvalds 已提交
97
{
98
	list_add(&fpage->lru, &deferred_pages);
L
Linus Torvalds 已提交
99 100 101 102 103 104
}

/* 
 * No more special protections in this 2/4MB area - revert to a
 * large page again. 
 */
105
static void revert_page(unsigned long address, pgprot_t ref_prot)
L
Linus Torvalds 已提交
106 107 108 109 110
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t large_pte;
111
	unsigned long pfn;
L
Linus Torvalds 已提交
112 113 114 115 116 117 118

	pgd = pgd_offset_k(address);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd,address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
119
	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
120
	large_pte = pfn_pte(pfn, ref_prot);
121
	large_pte = pte_mkhuge(large_pte);
L
Linus Torvalds 已提交
122 123 124 125 126 127 128 129 130
	set_pte((pte_t *)pmd, large_pte);
}      

static int
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
				   pgprot_t ref_prot)
{ 
	pte_t *kpte; 
	struct page *kpte_page;
131
	pgprot_t ref_prot2;
L
Linus Torvalds 已提交
132 133 134 135
	kpte = lookup_address(address);
	if (!kpte) return 0;
	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
136
		if (!pte_huge(*kpte)) {
L
Linus Torvalds 已提交
137 138 139
			set_pte(kpte, pfn_pte(pfn, prot));
		} else {
 			/*
140 141
			 * split_large_page will take the reference for this
			 * change_page_attr on the split page.
L
Linus Torvalds 已提交
142
 			 */
143
			struct page *split;
144
			ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
145
			split = split_large_page(address, prot, ref_prot2);
L
Linus Torvalds 已提交
146 147
			if (!split)
				return -ENOMEM;
148
			set_pte(kpte, mk_pte(split, ref_prot2));
L
Linus Torvalds 已提交
149
			kpte_page = split;
150
		}
151
		page_private(kpte_page)++;
152
	} else if (!pte_huge(*kpte)) {
L
Linus Torvalds 已提交
153
		set_pte(kpte, pfn_pte(pfn, ref_prot));
154 155
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
L
Linus Torvalds 已提交
156 157 158 159 160 161
	} else
		BUG();

	/* on x86-64 the direct mapping set at boot is not using 4k pages */
 	BUG_ON(PageReserved(kpte_page));

162
	if (page_private(kpte_page) == 0) {
163
		save_page(kpte_page);
164
		revert_page(address, ref_prot);
L
Linus Torvalds 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
 	}
	return 0;
} 

/*
 * Change the page attributes of an page in the linear mapping.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 * 
 * The caller needs to ensure that there are no conflicting mappings elsewhere.
 * This function only deals with the kernel linear map.
 * 
 * Caller must call global_flush_tlb() after this.
 */
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{
184
	int err = 0, kernel_map = 0;
L
Linus Torvalds 已提交
185 186
	int i; 

187 188 189 190 191 192
	if (address >= __START_KERNEL_map
	    && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
		address = (unsigned long)__va(__pa(address));
		kernel_map = 1;
	}

L
Linus Torvalds 已提交
193 194 195 196
	down_write(&init_mm.mmap_sem);
	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
		unsigned long pfn = __pa(address) >> PAGE_SHIFT;

197 198 199 200 201
		if (!kernel_map || pte_present(pfn_pte(0, prot))) {
			err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
			if (err)
				break;
		}
L
Linus Torvalds 已提交
202 203
		/* Handle kernel mapping too which aliases part of the
		 * lowmem */
204
		if (__pa(address) < KERNEL_TEXT_SIZE) {
L
Linus Torvalds 已提交
205
			unsigned long addr2;
206
			pgprot_t prot2;
207
			addr2 = __START_KERNEL_map + __pa(address);
208 209 210 211
			/* Make sure the kernel mappings stay executable */
			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
			err = __change_page_attr(addr2, pfn, prot2,
						 PAGE_KERNEL_EXEC);
L
Linus Torvalds 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
		} 
	} 	
	up_write(&init_mm.mmap_sem); 
	return err;
}

/* Don't call this for MMIO areas that may not have a mem_map entry */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
	unsigned long addr = (unsigned long)page_address(page);
	return change_page_attr_addr(addr, numpages, prot);
}

void global_flush_tlb(void)
{ 
227 228
	struct page *pg, *next;
	struct list_head l;
L
Linus Torvalds 已提交
229 230

	down_read(&init_mm.mmap_sem);
231
	list_replace_init(&deferred_pages, &l);
L
Linus Torvalds 已提交
232
	up_read(&init_mm.mmap_sem);
233

234 235 236 237 238
	flush_map(&l);

	list_for_each_entry_safe(pg, next, &l, lru) {
		ClearPagePrivate(pg);
		__free_page(pg);
L
Linus Torvalds 已提交
239 240 241 242 243
	} 
} 

EXPORT_SYMBOL(change_page_attr);
EXPORT_SYMBOL(global_flush_tlb);