提交 488fd995 编写于 作者: A Arjan van de Ven 提交者: Ingo Molnar

x86: fix pageattr-selftest

In Ingo's testing, he found a bug in the CPA selftest code. What would
happen is that the test would call change_page_attr_addr on a range of
memory, part of which was read only, part of which was writable. The
only thing the test wanted to change was the global bit...

What actually happened was that the selftest would take the permissions
of the first page, and then the change_page_attr_addr call would then
set the permissions of the entire range to this first page. In the
rodata section case, this resulted in pages after the .rodata becoming
read only... which made the kernel rather unhappy in many interesting
ways.

This is just another example of how dangerous the cpa API is (was); this
patch changes the test to use the incremental clear/set APIs
instead, and it changes the clear/set implementation to work on a 1 page
at a time basis.
Signed-off-by: NArjan van de Ven <arjan@linux.intel.com>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 5398f985
......@@ -162,8 +162,8 @@ static __init int exercise_pageattr(void)
continue;
}
err = change_page_attr_addr(addr[i], len[i],
pte_pgprot(pte_clrhuge(pte_clrglobal(pte0))));
err = change_page_attr_clear(addr[i], len[i],
__pgprot(_PAGE_GLOBAL));
if (err < 0) {
printk(KERN_ERR "CPA %d failed %d\n", i, err);
failed++;
......@@ -197,8 +197,8 @@ static __init int exercise_pageattr(void)
failed++;
continue;
}
err = change_page_attr_addr(addr[i], len[i],
pte_pgprot(pte_mkglobal(*pte)));
err = change_page_attr_set(addr[i], len[i],
__pgprot(_PAGE_GLOBAL));
if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err);
failed++;
......
......@@ -228,7 +228,6 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
/**
* change_page_attr_addr - Change page table attributes in linear mapping
* @address: Virtual address in linear mapping.
* @numpages: Number of pages to change
* @prot: New page table attribute (PAGE_*)
*
* Change page attributes of a page in the direct mapping. This is a variant
......@@ -240,10 +239,10 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
* Modules and drivers should use the set_memory_* APIs instead.
*/
static int change_page_attr_addr(unsigned long address, int numpages,
pgprot_t prot)
static int change_page_attr_addr(unsigned long address, pgprot_t prot)
{
int err = 0, kernel_map = 0, i;
int err = 0, kernel_map = 0;
unsigned long pfn = __pa(address) >> PAGE_SHIFT;
#ifdef CONFIG_X86_64
if (address >= __START_KERNEL_map &&
......@@ -254,30 +253,27 @@ static int change_page_attr_addr(unsigned long address, int numpages,
}
#endif
for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
unsigned long pfn = __pa(address) >> PAGE_SHIFT;
if (!kernel_map || pte_present(pfn_pte(0, prot))) {
err = __change_page_attr(address, pfn, prot);
if (err)
return err;
}
if (!kernel_map || pte_present(pfn_pte(0, prot))) {
err = __change_page_attr(address, pfn, prot);
if (err)
break;
}
#ifdef CONFIG_X86_64
/*
* Handle kernel mapping too which aliases part of
* lowmem:
*/
if (__pa(address) < KERNEL_TEXT_SIZE) {
unsigned long addr2;
pgprot_t prot2;
addr2 = __START_KERNEL_map + __pa(address);
/* Make sure the kernel mappings stay executable */
prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
err = __change_page_attr(addr2, pfn, prot2);
}
#endif
/*
* Handle kernel mapping too which aliases part of
* lowmem:
*/
if (__pa(address) < KERNEL_TEXT_SIZE) {
unsigned long addr2;
pgprot_t prot2;
addr2 = __START_KERNEL_map + __pa(address);
/* Make sure the kernel mappings stay executable */
prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
err = __change_page_attr(addr2, pfn, prot2);
}
#endif
return err;
}
......@@ -307,16 +303,24 @@ static int change_page_attr_set(unsigned long addr, int numpages,
pgprot_t current_prot;
int level;
pte_t *pte;
int i, ret;
pte = lookup_address(addr, &level);
if (pte)
current_prot = pte_pgprot(*pte);
else
pgprot_val(current_prot) = 0;
for (i = 0; i < numpages ; i++) {
pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
pte = lookup_address(addr, &level);
if (pte)
current_prot = pte_pgprot(*pte);
else
pgprot_val(current_prot) = 0;
return change_page_attr_addr(addr, numpages, prot);
pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
ret = change_page_attr_addr(addr, prot);
if (ret)
return ret;
addr += PAGE_SIZE;
}
return 0;
}
/**
......@@ -344,16 +348,24 @@ static int change_page_attr_clear(unsigned long addr, int numpages,
pgprot_t current_prot;
int level;
pte_t *pte;
pte = lookup_address(addr, &level);
if (pte)
current_prot = pte_pgprot(*pte);
else
pgprot_val(current_prot) = 0;
pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot);
return change_page_attr_addr(addr, numpages, prot);
int i, ret;
for (i = 0; i < numpages; i++) {
pte = lookup_address(addr, &level);
if (pte)
current_prot = pte_pgprot(*pte);
else
pgprot_val(current_prot) = 0;
pgprot_val(prot) =
pgprot_val(current_prot) & ~pgprot_val(prot);
ret = change_page_attr_addr(addr, prot);
if (ret)
return ret;
addr += PAGE_SIZE;
}
return 0;
}
int set_memory_uc(unsigned long addr, int numpages)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册