提交 e44e41d0 编写于 作者: B Bob Picco 提交者: Tony Luck

[IA64] fix show_mem for VIRTUAL_MEM_MAP+FLATMEM

contig.c (FLATMEM) requires the same optimization as in discontig.c for show_mem
when VIRTUAL_MEM_MAP is in use. Otherwise FLATMEM has softlockup timeouts.
This was boot tested for memory configuration: SPARSEMEM,
DISCONTIG+VIRTUAL_MEM_MAP, FLATMEM, FLATMEM+VIRTUAL_MEM_MAP and
FLATMEM+VIRTUAL_MEM_MAP with largest memory gap less than LARGE_GAP by
using boot parameter "mem=".

This was boot tested and "echo m >/proc/sysrq-trigger" output evaluated for
: FLATMEM, FLATMEM+VIRTUAL_MEM_MAP, DISCONTIGMEM+VIRTUAL_MEM_MAP and
SPARSEMEM.
Signed-off-by: NBob Picco <bob.picco@hp.com>
Signed-off-by: NTony Luck <tony.luck@intel.com>
上级 921eea1c
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages; static unsigned long num_dma_physpages;
static unsigned long max_gap;
#endif #endif
/** /**
...@@ -45,9 +46,15 @@ show_mem (void) ...@@ -45,9 +46,15 @@ show_mem (void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr; i = max_mapnr;
while (i-- > 0) { for (i = 0; i < max_mapnr; i++) {
if (!pfn_valid(i)) if (!pfn_valid(i)) {
#ifdef CONFIG_VIRTUAL_MEM_MAP
if (max_gap < LARGE_GAP)
continue;
i = vmemmap_find_next_valid_pfn(0, i) - 1;
#endif
continue; continue;
}
total++; total++;
if (PageReserved(mem_map+i)) if (PageReserved(mem_map+i))
reserved++; reserved++;
...@@ -234,7 +241,6 @@ paging_init (void) ...@@ -234,7 +241,6 @@ paging_init (void)
unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES];
unsigned long max_gap;
#endif #endif
/* initialize mem_map[] */ /* initialize mem_map[] */
...@@ -266,7 +272,6 @@ paging_init (void) ...@@ -266,7 +272,6 @@ paging_init (void)
} }
} }
max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) { if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0; vmem_map = (struct page *) 0;
......
...@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void) ...@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void)
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_VIRTUAL_MEM_MAP
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
{
unsigned long end_address, hole_next_pfn;
unsigned long stop_address;
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[
pgdat->node_start_pfn + pgdat->node_spanned_pages];
do {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(end_address);
if (pgd_none(*pgd)) {
end_address += PGDIR_SIZE;
continue;
}
pud = pud_offset(pgd, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
}
pmd = pmd_offset(pud, end_address);
if (pmd_none(*pmd)) {
end_address += PMD_SIZE;
continue;
}
pte = pte_offset_kernel(pmd, end_address);
retry_pte:
if (pte_none(*pte)) {
end_address += PAGE_SIZE;
pte++;
if ((end_address < stop_address) &&
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
goto retry_pte;
continue;
}
/* Found next valid vmem_map page */
break;
} while (end_address < stop_address);
end_address = min(end_address, stop_address);
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
hole_next_pfn = end_address / sizeof(struct page);
return hole_next_pfn - pgdat->node_start_pfn;
}
#else
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
{
return i + 1;
}
#endif
/** /**
* show_mem - give short summary of memory stats * show_mem - give short summary of memory stats
* *
...@@ -625,7 +563,8 @@ void show_mem(void) ...@@ -625,7 +563,8 @@ void show_mem(void)
if (pfn_valid(pgdat->node_start_pfn + i)) if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i); page = pfn_to_page(pgdat->node_start_pfn + i);
else { else {
i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; i = vmemmap_find_next_valid_pfn(pgdat->node_id,
i) - 1;
continue; continue;
} }
if (PageReserved(page)) if (PageReserved(page))
......
...@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data) ...@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data)
} }
#ifdef CONFIG_VIRTUAL_MEM_MAP #ifdef CONFIG_VIRTUAL_MEM_MAP
int vmemmap_find_next_valid_pfn(int node, int i)
{
unsigned long end_address, hole_next_pfn;
unsigned long stop_address;
pg_data_t *pgdat = NODE_DATA(node);
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[
pgdat->node_start_pfn + pgdat->node_spanned_pages];
do {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(end_address);
if (pgd_none(*pgd)) {
end_address += PGDIR_SIZE;
continue;
}
pud = pud_offset(pgd, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
}
pmd = pmd_offset(pud, end_address);
if (pmd_none(*pmd)) {
end_address += PMD_SIZE;
continue;
}
pte = pte_offset_kernel(pmd, end_address);
retry_pte:
if (pte_none(*pte)) {
end_address += PAGE_SIZE;
pte++;
if ((end_address < stop_address) &&
(end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
goto retry_pte;
continue;
}
/* Found next valid vmem_map page */
break;
} while (end_address < stop_address);
end_address = min(end_address, stop_address);
end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
hole_next_pfn = end_address / sizeof(struct page);
return hole_next_pfn - pgdat->node_start_pfn;
}
int __init int __init
create_mem_map_page_table (u64 start, u64 end, void *arg) create_mem_map_page_table (u64 start, u64 end, void *arg)
......
...@@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *); ...@@ -56,6 +56,11 @@ extern void efi_memmap_init(unsigned long *, unsigned long *);
extern struct page *vmem_map; extern struct page *vmem_map;
extern int find_largest_hole (u64 start, u64 end, void *arg); extern int find_largest_hole (u64 start, u64 end, void *arg);
extern int create_mem_map_page_table (u64 start, u64 end, void *arg); extern int create_mem_map_page_table (u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else
static inline int vmemmap_find_next_valid_pfn(int node, int i)
{
return i + 1;
}
#endif #endif
#endif /* meminit_h */ #endif /* meminit_h */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册