diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 40f17c34b4153fab93b4f1a2685dee0b8cac4da8..35c66a269bccca8b88fc52485568b19b86e06264 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -710,7 +710,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(__pte_alloc(mm, vma, pmd, address))) + if (unlikely(pmd_none(*pmd)) && + unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd))) diff --git a/mm/memory.c b/mm/memory.c index 221fc9ffcab1da33eb15947776975730b5058b67..7cf762857baa41d1236e51cca96604a47d709cda 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3560,7 +3560,8 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, * run pte_offset_map on the pmd, if an huge pmd could * materialize from under us from a different thread. */ - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address)) + if (unlikely(pmd_none(*pmd)) && + unlikely(__pte_alloc(mm, vma, pmd, address))) return VM_FAULT_OOM; /* if an huge pmd materialized from under us just retry later */ if (unlikely(pmd_trans_huge(*pmd)))