提交 54c14b30 编写于 作者: D Dan Williams 提交者: Shile Zhang

mm: move buddy list manipulations into helpers

to #26589565

commit b03641af680959df57c275a80ff0dc116627c7ae upstream

In preparation for runtime randomization of the zone lists, take all
(well, most of) the list_*() functions in the buddy allocator and put
them in helper functions.  Provide a common control point for injecting
additional behavior when freeing pages.

[dan.j.williams@intel.com: fix buddy list helpers]
  Link: http://lkml.kernel.org/r/155033679702.1773410.13041474192173212653.stgit@dwillia2-desk3.amr.corp.intel.com
[vbabka@suse.cz: remove del_page_from_free_area() migratetype parameter]
  Link: http://lkml.kernel.org/r/4672701b-6775-6efd-0797-b6242591419e@suse.cz
Link: http://lkml.kernel.org/r/154899812264.3165233.5219320056406926223.stgit@dwillia2-desk3.amr.corp.intel.comSigned-off-by: NDan Williams <dan.j.williams@intel.com>
Signed-off-by: NVlastimil Babka <vbabka@suse.cz>
Tested-by: NTetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Acked-by: NMichal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Robert Elliott <elliott@hpe.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: NYang Shi <yang.shi@linux.alibaba.com>
Reviewed-by: NXunlei Pang <xlpang@linux.alibaba.com>
Signed-off-by: NShile Zhang <shile.zhang@linux.alibaba.com>
上级 00625113
...@@ -512,9 +512,6 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma) ...@@ -512,9 +512,6 @@ static inline void vma_set_anonymous(struct vm_area_struct *vma)
struct mmu_gather; struct mmu_gather;
struct inode; struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) #if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd) static inline int pmd_devmap(pmd_t pmd)
{ {
......
...@@ -212,6 +212,9 @@ struct page { ...@@ -212,6 +212,9 @@ struct page {
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
struct page_frag_cache { struct page_frag_cache {
void * va; void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/pageblock-flags.h> #include <linux/pageblock-flags.h>
#include <linux/page-flags-layout.h> #include <linux/page-flags-layout.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <asm/page.h> #include <asm/page.h>
#include <linux/ali_hotfix.h> #include <linux/ali_hotfix.h>
...@@ -100,6 +102,50 @@ struct free_area { ...@@ -100,6 +102,50 @@ struct free_area {
unsigned long nr_free; unsigned long nr_free;
}; };
/* Used for pages not on another list */
static inline void add_to_free_area(struct page *page, struct free_area *area,
int migratetype)
{
list_add(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
}
/* Used for pages not on another list */
static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
int migratetype)
{
list_add_tail(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
}
/* Used for pages which are on another list */
static inline void move_to_free_area(struct page *page, struct free_area *area,
int migratetype)
{
list_move(&page->lru, &area->free_list[migratetype]);
}
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
return list_first_entry_or_null(&area->free_list[migratetype],
struct page, lru);
}
static inline void del_page_from_free_area(struct page *page,
struct free_area *area)
{
list_del(&page->lru);
__ClearPageBuddy(page);
set_page_private(page, 0);
area->nr_free--;
}
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
}
struct pglist_data; struct pglist_data;
/* /*
......
...@@ -1359,13 +1359,13 @@ static enum compact_result __compact_finished(struct zone *zone, ...@@ -1359,13 +1359,13 @@ static enum compact_result __compact_finished(struct zone *zone,
bool can_steal; bool can_steal;
/* Job done if page is free of the right migratetype */ /* Job done if page is free of the right migratetype */
if (!list_empty(&area->free_list[migratetype])) if (!free_area_empty(area, migratetype))
return COMPACT_SUCCESS; return COMPACT_SUCCESS;
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE && if (migratetype == MIGRATE_MOVABLE &&
!list_empty(&area->free_list[MIGRATE_CMA])) !free_area_empty(area, MIGRATE_CMA))
return COMPACT_SUCCESS; return COMPACT_SUCCESS;
#endif #endif
/* /*
......
...@@ -727,12 +727,6 @@ static inline void set_page_order(struct page *page, unsigned int order) ...@@ -727,12 +727,6 @@ static inline void set_page_order(struct page *page, unsigned int order)
__SetPageBuddy(page); __SetPageBuddy(page);
} }
static inline void rmv_page_order(struct page *page)
{
__ClearPageBuddy(page);
set_page_private(page, 0);
}
/* /*
* This function checks whether a page is free && is the buddy * This function checks whether a page is free && is the buddy
* we can coalesce a page and its buddy if * we can coalesce a page and its buddy if
...@@ -833,13 +827,10 @@ static inline void __free_one_page(struct page *page, ...@@ -833,13 +827,10 @@ static inline void __free_one_page(struct page *page,
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order. * merge with it and move up one order.
*/ */
if (page_is_guard(buddy)) { if (page_is_guard(buddy))
clear_page_guard(zone, buddy, order, migratetype); clear_page_guard(zone, buddy, order, migratetype);
} else { else
list_del(&buddy->lru); del_page_from_free_area(buddy, &zone->free_area[order]);
zone->free_area[order].nr_free--;
rmv_page_order(buddy);
}
combined_pfn = buddy_pfn & pfn; combined_pfn = buddy_pfn & pfn;
page = page + (combined_pfn - pfn); page = page + (combined_pfn - pfn);
pfn = combined_pfn; pfn = combined_pfn;
...@@ -889,15 +880,13 @@ static inline void __free_one_page(struct page *page, ...@@ -889,15 +880,13 @@ static inline void __free_one_page(struct page *page,
higher_buddy = higher_page + (buddy_pfn - combined_pfn); higher_buddy = higher_page + (buddy_pfn - combined_pfn);
if (pfn_valid_within(buddy_pfn) && if (pfn_valid_within(buddy_pfn) &&
page_is_buddy(higher_page, higher_buddy, order + 1)) { page_is_buddy(higher_page, higher_buddy, order + 1)) {
list_add_tail(&page->lru, add_to_free_area_tail(page, &zone->free_area[order],
&zone->free_area[order].free_list[migratetype]); migratetype);
goto out; return;
} }
} }
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); add_to_free_area(page, &zone->free_area[order], migratetype);
out:
zone->free_area[order].nr_free++;
} }
/* /*
...@@ -1918,8 +1907,7 @@ static inline void expand(struct zone *zone, struct page *page, ...@@ -1918,8 +1907,7 @@ static inline void expand(struct zone *zone, struct page *page,
if (set_page_guard(zone, &page[size], high, migratetype)) if (set_page_guard(zone, &page[size], high, migratetype))
continue; continue;
list_add(&page[size].lru, &area->free_list[migratetype]); add_to_free_area(&page[size], area, migratetype);
area->nr_free++;
set_page_order(&page[size], high); set_page_order(&page[size], high);
} }
} }
...@@ -2060,13 +2048,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, ...@@ -2060,13 +2048,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
/* Find a page of the appropriate size in the preferred list */ /* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) { for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]); area = &(zone->free_area[current_order]);
page = list_first_entry_or_null(&area->free_list[migratetype], page = get_page_from_free_area(area, migratetype);
struct page, lru);
if (!page) if (!page)
continue; continue;
list_del(&page->lru); del_page_from_free_area(page, area);
rmv_page_order(page);
area->nr_free--;
expand(zone, page, order, current_order, area, migratetype); expand(zone, page, order, current_order, area, migratetype);
set_pcppage_migratetype(page, migratetype); set_pcppage_migratetype(page, migratetype);
return page; return page;
...@@ -2156,8 +2141,7 @@ static int move_freepages(struct zone *zone, ...@@ -2156,8 +2141,7 @@ static int move_freepages(struct zone *zone,
} }
order = page_order(page); order = page_order(page);
list_move(&page->lru, move_to_free_area(page, &zone->free_area[order], migratetype);
&zone->free_area[order].free_list[migratetype]);
page += 1 << order; page += 1 << order;
pages_moved += 1 << order; pages_moved += 1 << order;
} }
...@@ -2306,7 +2290,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, ...@@ -2306,7 +2290,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
single_page: single_page:
area = &zone->free_area[current_order]; area = &zone->free_area[current_order];
list_move(&page->lru, &area->free_list[start_type]); move_to_free_area(page, area, start_type);
} }
/* /*
...@@ -2330,7 +2314,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, ...@@ -2330,7 +2314,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
if (fallback_mt == MIGRATE_TYPES) if (fallback_mt == MIGRATE_TYPES)
break; break;
if (list_empty(&area->free_list[fallback_mt])) if (free_area_empty(area, fallback_mt))
continue; continue;
if (can_steal_fallback(order, migratetype)) if (can_steal_fallback(order, migratetype))
...@@ -2417,9 +2401,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, ...@@ -2417,9 +2401,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
for (order = 0; order < MAX_ORDER; order++) { for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]); struct free_area *area = &(zone->free_area[order]);
page = list_first_entry_or_null( page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
&area->free_list[MIGRATE_HIGHATOMIC],
struct page, lru);
if (!page) if (!page)
continue; continue;
...@@ -2532,8 +2514,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) ...@@ -2532,8 +2514,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
VM_BUG_ON(current_order == MAX_ORDER); VM_BUG_ON(current_order == MAX_ORDER);
do_steal: do_steal:
page = list_first_entry(&area->free_list[fallback_mt], page = get_page_from_free_area(area, fallback_mt);
struct page, lru);
steal_suitable_fallback(zone, page, start_migratetype, can_steal); steal_suitable_fallback(zone, page, start_migratetype, can_steal);
...@@ -2960,6 +2941,7 @@ EXPORT_SYMBOL_GPL(split_page); ...@@ -2960,6 +2941,7 @@ EXPORT_SYMBOL_GPL(split_page);
int __isolate_free_page(struct page *page, unsigned int order) int __isolate_free_page(struct page *page, unsigned int order)
{ {
struct free_area *area = &page_zone(page)->free_area[order];
unsigned long watermark; unsigned long watermark;
struct zone *zone; struct zone *zone;
int mt; int mt;
...@@ -2984,9 +2966,8 @@ int __isolate_free_page(struct page *page, unsigned int order) ...@@ -2984,9 +2966,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
} }
/* Remove page from free list */ /* Remove page from free list */
list_del(&page->lru);
zone->free_area[order].nr_free--; del_page_from_free_area(page, area);
rmv_page_order(page);
/* /*
* Set the pageblock if the isolated page is at least half of a * Set the pageblock if the isolated page is at least half of a
...@@ -3294,13 +3275,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, ...@@ -3294,13 +3275,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
continue; continue;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
if (!list_empty(&area->free_list[mt])) if (!free_area_empty(area, mt))
return true; return true;
} }
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
if ((alloc_flags & ALLOC_CMA) && if ((alloc_flags & ALLOC_CMA) &&
!list_empty(&area->free_list[MIGRATE_CMA])) { !free_area_empty(area, MIGRATE_CMA)) {
return true; return true;
} }
#endif #endif
...@@ -5153,7 +5134,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask) ...@@ -5153,7 +5134,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
types[order] = 0; types[order] = 0;
for (type = 0; type < MIGRATE_TYPES; type++) { for (type = 0; type < MIGRATE_TYPES; type++) {
if (!list_empty(&area->free_list[type])) if (!free_area_empty(area, type))
types[order] |= 1 << type; types[order] |= 1 << type;
} }
} }
...@@ -8207,9 +8188,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) ...@@ -8207,9 +8188,7 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
pr_info("remove from free list %lx %d %lx\n", pr_info("remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn); pfn, 1 << order, end_pfn);
#endif #endif
list_del(&page->lru); del_page_from_free_area(page, &zone->free_area[order]);
rmv_page_order(page);
zone->free_area[order].nr_free--;
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
SetPageReserved((page+i)); SetPageReserved((page+i));
pfn += (1 << order); pfn += (1 << order);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册