提交 3e791d0f 编写于 作者: C Christophe Leroy 提交者: Michael Ellerman

powerpc/64s: Remove unneeded #ifdef CONFIG_DEBUG_PAGEALLOC in hash_utils

debug_pagealloc_enabled() is always defined and constant folds to
'false' when CONFIG_DEBUG_PAGEALLOC is not enabled.

Remove the #ifdefs, the code and associated static variables will
be optimised out by the compiler when CONFIG_DEBUG_PAGEALLOC is
not defined.
Signed-off-by: NChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: NNicholas Miehlbradt <nicholas@linux.ibm.com>
Signed-off-by: NMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220926075726.2846-2-nicholas@linux.ibm.com
上级 5e8b2c4d
...@@ -123,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size); ...@@ -123,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
#ifdef CONFIG_PPC_64K_PAGES #ifdef CONFIG_PPC_64K_PAGES
int mmu_ci_restrictions; int mmu_ci_restrictions;
#endif #endif
#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots; static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count; static unsigned long linear_map_hash_count;
static DEFINE_SPINLOCK(linear_map_hash_lock);
#endif /* CONFIG_DEBUG_PAGEALLOC */
struct mmu_hash_ops mmu_hash_ops; struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops); EXPORT_SYMBOL(mmu_hash_ops);
...@@ -427,11 +424,9 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ...@@ -427,11 +424,9 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
break; break;
cond_resched(); cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() && if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count) (paddr >> PAGE_SHIFT) < linear_map_hash_count)
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
#endif /* CONFIG_DEBUG_PAGEALLOC */
} }
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
...@@ -1066,7 +1061,6 @@ static void __init htab_initialize(void) ...@@ -1066,7 +1061,6 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL); prot = pgprot_val(PAGE_KERNEL);
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled()) { if (debug_pagealloc_enabled()) {
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid( linear_map_hash_slots = memblock_alloc_try_nid(
...@@ -1076,7 +1070,6 @@ static void __init htab_initialize(void) ...@@ -1076,7 +1070,6 @@ static void __init htab_initialize(void)
panic("%s: Failed to allocate %lu bytes max_addr=%pa\n", panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
__func__, linear_map_hash_count, &ppc64_rma_size); __func__, linear_map_hash_count, &ppc64_rma_size);
} }
#endif /* CONFIG_DEBUG_PAGEALLOC */
/* create bolted the linear mapping in the hash table */ /* create bolted the linear mapping in the hash table */
for_each_mem_range(i, &base, &end) { for_each_mem_range(i, &base, &end) {
...@@ -1988,6 +1981,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn, ...@@ -1988,6 +1981,8 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
} }
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
static DEFINE_SPINLOCK(linear_map_hash_lock);
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{ {
unsigned long hash; unsigned long hash;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册