提交 76ebd054 编写于 作者: T Thomas Gleixner

x86: introduce page pool in cpa

DEBUG_PAGEALLOC was not possible on 64-bit due to its early-bootup
hardcoded reliance on PSE pages, and the unrobustness of the runtime
splitup of large pages. The splitup ended in recursive calls to
alloc_pages() when a page for a pte split was requested.

Avoid the recursion with a preallocated page pool, which is used to
split up large mappings and gets refilled in the return path of
kernel_map_pages after the split has been done. The size of the page
pool is adjusted to the available memory.

This part just implements the page pool and the initialization w/o
using it yet.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
上级 a03c2a48
......@@ -664,6 +664,8 @@ void __init mem_init(void)
if (boot_cpu_data.wp_works_ok < 0)
test_wp_bit();
cpa_init();
/*
* Subtle. SMP is doing it's boot stuff late (because it has to
* fork idle threads) - but it also needs low mappings for the
......
......@@ -528,6 +528,8 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10);
cpa_init();
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
......
......@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <asm/e820.h>
#include <asm/processor.h>
......@@ -336,6 +337,77 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
return do_split;
}
static LIST_HEAD(page_pool);
static unsigned long pool_size, pool_pages, pool_low;
static unsigned long pool_used, pool_failed, pool_refill;
static void cpa_fill_pool(void)
{
struct page *p;
gfp_t gfp = GFP_KERNEL;
/* Do not allocate from interrupt context */
if (in_irq() || irqs_disabled())
return;
/*
* Check unlocked. I does not matter when we have one more
* page in the pool. The bit lock avoids recursive pool
* allocations:
*/
if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill))
return;
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* We could do:
* gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
* but this fails on !PREEMPT kernels
*/
gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
#endif
while (pool_pages < pool_size) {
p = alloc_pages(gfp, 0);
if (!p) {
pool_failed++;
break;
}
spin_lock_irq(&pgd_lock);
list_add(&p->lru, &page_pool);
pool_pages++;
spin_unlock_irq(&pgd_lock);
}
clear_bit_unlock(0, &pool_refill);
}
#define SHIFT_MB (20 - PAGE_SHIFT)
#define ROUND_MB_GB ((1 << 10) - 1)
#define SHIFT_MB_GB 10
#define POOL_PAGES_PER_GB 16
void __init cpa_init(void)
{
struct sysinfo si;
unsigned long gb;
si_meminfo(&si);
/*
* Calculate the number of pool pages:
*
* Convert totalram (nr of pages) to MiB and round to the next
* GiB. Shift MiB to Gib and multiply the result by
* POOL_PAGES_PER_GB:
*/
gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
pool_size = POOL_PAGES_PER_GB * gb;
pool_low = pool_size;
cpa_fill_pool();
printk(KERN_DEBUG
"CPA: page pool initialized %lu of %lu pages preallocated\n",
pool_pages, pool_size);
}
static int split_large_page(pte_t *kpte, unsigned long address)
{
unsigned long flags, pfn, pfninc = 1;
......@@ -600,7 +672,7 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
* Check whether we really changed something:
*/
if (!cpa.flushtlb)
return ret;
goto out;
/*
* No need to flush, when we did not set any of the caching
......@@ -619,6 +691,8 @@ static int change_page_attr_set_clr(unsigned long addr, int numpages,
else
cpa_flush_all(cache);
out:
cpa_fill_pool();
return ret;
}
......@@ -772,6 +846,12 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
/*
* Try to refill the page pool here. We can do this only after
* the tlb flush.
*/
cpa_fill_pool();
}
#endif
......
......@@ -44,6 +44,8 @@ int set_memory_np(unsigned long addr, int numpages);
void clflush_cache_range(void *addr, unsigned int size);
void cpa_init(void);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
#endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册