From 1531cff44b5bb30c899404c044805ec60b2f3620 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Fri, 29 Jun 2018 14:06:29 +0530 Subject: [PATCH] powerpc/mm/hash: Remove the superfluous bitwise operation when find hpte group When computing the starting slot number for a hash page table group we used to do this hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; Multiplying with 8 (HPTES_PER_GROUP) imply the last three bits are 0. Hence we really don't need to clear then separately. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/mm/dump_hashpagetable.c | 2 +- arch/powerpc/mm/hash64_4k.c | 8 ++++---- arch/powerpc/mm/hash64_64k.c | 15 +++++++-------- arch/powerpc/mm/hash_utils_64.c | 10 ++++------ arch/powerpc/mm/hugepage-hash64.c | 9 ++++----- 5 files changed, 20 insertions(+), 24 deletions(-) diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c index 14cfb11b09d0..d241cb6518da 100644 --- a/arch/powerpc/mm/dump_hashpagetable.c +++ b/arch/powerpc/mm/dump_hashpagetable.c @@ -260,7 +260,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 * /* to check in the secondary hash table, we invert the hash */ if (!primary) hash = ~hash; - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* see if we can find an entry in the hpte with this hash */ for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) { lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes); diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c index d573d7d07f25..6fa6765a10eb 100644 --- a/arch/powerpc/mm/hash64_4k.c +++ b/arch/powerpc/mm/hash64_4k.c @@ -80,7 +80,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, hash = hpt_hash(vpn, shift, ssize); repeat: - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, @@ -89,7 +89,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * Primary is full, try the secondary */ if (unlikely(slot == -1)) { - hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, @@ -97,8 +97,8 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, MMU_PAGE_4K, ssize); if (slot == -1) { if (mftb() & 0x1) - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * + HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c index e601d95c3b20..3afa253d7f52 100644 --- a/arch/powerpc/mm/hash64_64k.c +++ b/arch/powerpc/mm/hash64_64k.c @@ -154,7 +154,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, } hash = hpt_hash(vpn, shift, ssize); repeat: - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, @@ -165,7 +165,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, if (unlikely(slot == -1)) { bool soft_invalid; - hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, MMU_PAGE_4K, MMU_PAGE_4K, @@ -193,8 +193,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, * that we do not get the same soft-invalid slot. */ if (soft_invalid || (mftb() & 0x1)) - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); /* @@ -288,7 +287,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, hash = hpt_hash(vpn, shift, ssize); repeat: - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, @@ -298,7 +297,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access, * Primary is full, try the secondary */ if (unlikely(slot == -1)) { - hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, @@ -306,8 +305,8 @@ int __hash_page_64K(unsigned long ea, unsigned long access, MMU_PAGE_64K, ssize); if (slot == -1) { if (mftb() & 0x1) - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * + HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); /* * FIXME!! Should be try the group from which we removed ? diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 5a72e980e25a..743c8dbe1941 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -1753,8 +1753,7 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn, long slot; repeat: - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags, @@ -1762,15 +1761,14 @@ long hpte_insert_repeating(unsigned long hash, unsigned long vpn, /* Primary is full, try the secondary */ if (unlikely(slot == -1)) { - hpte_group = ((~hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, vflags | HPTE_V_SECONDARY, psize, psize, ssize); if (slot == -1) { if (mftb() & 0x1) - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP)&~0x7UL; + hpte_group = (hash & htab_hash_mask) * + HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); goto repeat; diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c index f20d16f849c5..01f213d2bcb9 100644 --- a/arch/powerpc/mm/hugepage-hash64.c +++ b/arch/powerpc/mm/hugepage-hash64.c @@ -128,7 +128,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, new_pmd |= H_PAGE_HASHPTE; repeat: - hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; /* Insert into the hash table, primary slot */ slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, @@ -137,16 +137,15 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, * Primary is full, try the secondary */ if (unlikely(slot == -1)) { - hpte_group = ((~hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, HPTE_V_SECONDARY, psize, lpsize, ssize); if (slot == -1) { if (mftb() & 0x1) - hpte_group = ((hash & htab_hash_mask) * - HPTES_PER_GROUP) & ~0x7UL; + hpte_group = (hash & htab_hash_mask) * + HPTES_PER_GROUP; mmu_hash_ops.hpte_remove(hpte_group); goto repeat; -- GitLab