提交 2379a23e 编写于 作者: M Michal Hocko 提交者: Linus Torvalds

powerpc: get rid of superfluous __GFP_REPEAT

__GFP_REPEAT has a rather weak semantic but since it has been introduced
around 2.6.12 it has been ignored for low order allocations.

{pud,pmd}_alloc_one are allocating from {PGT,PUD}_CACHE initialized in
pgtable_cache_init which doesn't have larger than sizeof(void *) << 12
size and that fits into !costly allocation request size.

PGALLOC_GFP is used only in radix__pgd_alloc which uses either order-0
or order-4 requests.  The first one doesn't need the flag while the
second does.  Drop __GFP_REPEAT from PGALLOC_GFP and add it for the
order-4 one.

This means that this flag has never been actually useful here because it
has always been used only for PAGE_ALLOC_COSTLY requests.

Link: http://lkml.kernel.org/r/1464599699-30131-12-git-send-email-mhocko@kernel.orgSigned-off-by: NMichal Hocko <mhocko@suse.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 a4135b93
...@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[]; ...@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
pgtable_cache[(shift) - 1]; \ pgtable_cache[(shift) - 1]; \
}) })
#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern void pte_fragment_free(unsigned long *, int); extern void pte_fragment_free(unsigned long *, int);
...@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) ...@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
return (pgd_t *)__get_free_page(PGALLOC_GFP); return (pgd_t *)__get_free_page(PGALLOC_GFP);
#else #else
struct page *page; struct page *page;
page = alloc_pages(PGALLOC_GFP, 4); page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
if (!page) if (!page)
return NULL; return NULL;
return (pgd_t *) page_address(page); return (pgd_t *) page_address(page);
...@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) ...@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)
...@@ -115,8 +114,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, ...@@ -115,8 +114,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
......
...@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void pud_free(struct mm_struct *mm, pud_t *pud)
...@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, ...@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{ {
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
GFP_KERNEL|__GFP_REPEAT);
} }
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
......
...@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, ...@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
cachep = PGT_CACHE(pdshift - pshift); cachep = PGT_CACHE(pdshift - pshift);
#endif #endif
new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); new = kmem_cache_zalloc(cachep, GFP_KERNEL);
BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册