pageattr.c 5.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
/* 
 * Copyright 2002 Andi Kleen, SuSE Labs. 
 * Thanks to Ben LaHaise for precious feedback.
 */ 

#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/tlbflush.h>
#include <asm/io.h>

static inline pte_t *lookup_address(unsigned long address) 
{ 
	pgd_t *pgd = pgd_offset_k(address);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	if (pgd_none(*pgd))
		return NULL;
	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return NULL; 
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return NULL; 
	if (pmd_large(*pmd))
		return (pte_t *)pmd;
	pte = pte_offset_kernel(pmd, address);
	if (pte && !pte_present(*pte))
		pte = NULL; 
	return pte;
} 

static struct page *split_large_page(unsigned long address, pgprot_t prot,
				     pgprot_t ref_prot)
{ 
	int i; 
	unsigned long addr;
	struct page *base = alloc_pages(GFP_KERNEL, 0);
	pte_t *pbase;
	if (!base) 
		return NULL;
47 48 49 50 51 52 53
	/*
	 * page_private is used to track the number of entries in
	 * the page table page have non standard attributes.
	 */
	SetPagePrivate(base);
	page_private(base) = 0;

L
Linus Torvalds 已提交
54 55 56 57 58 59 60 61 62 63
	address = __pa(address);
	addr = address & LARGE_PAGE_MASK; 
	pbase = (pte_t *)page_address(base);
	for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
		pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
				   addr == address ? prot : ref_prot);
	}
	return base;
} 

64
static void cache_flush_page(void *adr)
L
Linus Torvalds 已提交
65
{
66 67 68
	int i;
	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
		asm volatile("clflush (%0)" :: "r" (adr + i));
L
Linus Torvalds 已提交
69 70
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static void flush_kernel_map(void *arg)
{
	struct list_head *l = (struct list_head *)arg;
	struct page *pg;

	/* When clflush is available always use it because it is
	   much cheaper than WBINVD */
	if (!cpu_has_clflush)
		asm volatile("wbinvd" ::: "memory");
	list_for_each_entry(pg, l, lru) {
		void *adr = page_address(pg);
		if (cpu_has_clflush)
			cache_flush_page(adr);
		__flush_tlb_one(adr);
	}
}
L
Linus Torvalds 已提交
87

88
static inline void flush_map(struct list_head *l)
L
Linus Torvalds 已提交
89
{	
90
	on_each_cpu(flush_kernel_map, l, 1, 1);
L
Linus Torvalds 已提交
91 92
}

93
static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
94 95

static inline void save_page(struct page *fpage)
L
Linus Torvalds 已提交
96
{
97
	list_add(&fpage->lru, &deferred_pages);
L
Linus Torvalds 已提交
98 99 100 101 102 103 104 105 106 107 108 109
}

/* 
 * No more special protections in this 2/4MB area - revert to a
 * large page again. 
 */
static void revert_page(unsigned long address, pgprot_t ref_prot)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t large_pte;
110
	unsigned long pfn;
L
Linus Torvalds 已提交
111 112 113 114 115 116 117

	pgd = pgd_offset_k(address);
	BUG_ON(pgd_none(*pgd));
	pud = pud_offset(pgd,address);
	BUG_ON(pud_none(*pud));
	pmd = pmd_offset(pud, address);
	BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
118 119
	pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
	large_pte = pfn_pte(pfn, ref_prot);
120
	large_pte = pte_mkhuge(large_pte);
L
Linus Torvalds 已提交
121 122 123 124 125 126 127 128 129
	set_pte((pte_t *)pmd, large_pte);
}      

static int
__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
				   pgprot_t ref_prot)
{ 
	pte_t *kpte; 
	struct page *kpte_page;
130
	pgprot_t ref_prot2;
L
Linus Torvalds 已提交
131 132 133 134
	kpte = lookup_address(address);
	if (!kpte) return 0;
	kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
	if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
135
		if (!pte_huge(*kpte)) {
L
Linus Torvalds 已提交
136 137 138
			set_pte(kpte, pfn_pte(pfn, prot));
		} else {
 			/*
139 140
			 * split_large_page will take the reference for this
			 * change_page_attr on the split page.
L
Linus Torvalds 已提交
141
 			 */
142
			struct page *split;
143
			ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
144
			split = split_large_page(address, prot, ref_prot2);
L
Linus Torvalds 已提交
145 146
			if (!split)
				return -ENOMEM;
147
			set_pte(kpte, mk_pte(split, ref_prot2));
L
Linus Torvalds 已提交
148
			kpte_page = split;
149
		}
150
		page_private(kpte_page)++;
151
	} else if (!pte_huge(*kpte)) {
L
Linus Torvalds 已提交
152
		set_pte(kpte, pfn_pte(pfn, ref_prot));
153 154
		BUG_ON(page_private(kpte_page) == 0);
		page_private(kpte_page)--;
L
Linus Torvalds 已提交
155 156 157 158 159 160
	} else
		BUG();

	/* on x86-64 the direct mapping set at boot is not using 4k pages */
 	BUG_ON(PageReserved(kpte_page));

161
	if (page_private(kpte_page) == 0) {
162
		save_page(kpte_page);
L
Linus Torvalds 已提交
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
		revert_page(address, ref_prot);
 	}
	return 0;
} 

/*
 * Change the page attributes of an page in the linear mapping.
 *
 * This should be used when a page is mapped with a different caching policy
 * than write-back somewhere - some CPUs do not like it when mappings with
 * different caching policies exist. This changes the page attributes of the
 * in kernel linear mapping too.
 * 
 * The caller needs to ensure that there are no conflicting mappings elsewhere.
 * This function only deals with the kernel linear map.
 * 
 * Caller must call global_flush_tlb() after this.
 */
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{
	int err = 0; 
	int i; 

	down_write(&init_mm.mmap_sem);
	for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
		unsigned long pfn = __pa(address) >> PAGE_SHIFT;

		err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
		if (err) 
			break; 
		/* Handle kernel mapping too which aliases part of the
		 * lowmem */
		if (__pa(address) < KERNEL_TEXT_SIZE) {
			unsigned long addr2;
197
			pgprot_t prot2;
L
Linus Torvalds 已提交
198
			addr2 = __START_KERNEL_map + __pa(address);
199 200 201 202
			/* Make sure the kernel mappings stay executable */
			prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
			err = __change_page_attr(addr2, pfn, prot2,
						 PAGE_KERNEL_EXEC);
L
Linus Torvalds 已提交
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
		} 
	} 	
	up_write(&init_mm.mmap_sem); 
	return err;
}

/* Don't call this for MMIO areas that may not have a mem_map entry */
int change_page_attr(struct page *page, int numpages, pgprot_t prot)
{
	unsigned long addr = (unsigned long)page_address(page);
	return change_page_attr_addr(addr, numpages, prot);
}

void global_flush_tlb(void)
{ 
218 219
	struct page *pg, *next;
	struct list_head l;
L
Linus Torvalds 已提交
220 221

	down_read(&init_mm.mmap_sem);
222
	list_replace_init(&deferred_pages, &l);
L
Linus Torvalds 已提交
223
	up_read(&init_mm.mmap_sem);
224

225 226 227 228 229
	flush_map(&l);

	list_for_each_entry_safe(pg, next, &l, lru) {
		ClearPagePrivate(pg);
		__free_page(pg);
L
Linus Torvalds 已提交
230 231 232 233 234
	} 
} 

EXPORT_SYMBOL(change_page_attr);
EXPORT_SYMBOL(global_flush_tlb);