diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index acd6f9cb01bd59c3e16e8930e6d965a6f0e6a7b5..70394cabaf4e0fe6efc3c18701796bb04c71168b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -116,6 +116,18 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar area->nr_free++; } +#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR +/* Used to preserve page allocation order entropy */ +void add_to_free_area_random(struct page *page, struct free_area *area, + int migratetype); +#else +static inline void add_to_free_area_random(struct page *page, + struct free_area *area, int migratetype) +{ + add_to_free_area(page, area, migratetype); +} +#endif + /* Used for pages which are on another list */ static inline void move_to_free_area(struct page *page, struct free_area *area, int migratetype) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b674625762c458df0e2465dc87ae0c2c743a6527..3b13d39141760edf95ab019e4ad52abea83e3b33 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -958,7 +959,8 @@ static inline void __free_one_page(struct page *page, * so it's less likely to be used soon and more likely to be merged * as a higher order page */ - if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) { + if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn) + && !is_shuffle_order(order)) { struct page *higher_page, *higher_buddy; combined_pfn = buddy_pfn & pfn; higher_page = page + (combined_pfn - pfn); @@ -972,7 +974,12 @@ static inline void __free_one_page(struct page *page, } } - add_to_free_area(page, &zone->free_area[order], migratetype); + if (is_shuffle_order(order)) + add_to_free_area_random(page, &zone->free_area[order], + migratetype); + else + add_to_free_area(page, &zone->free_area[order], migratetype); + } /* diff --git a/mm/shuffle.c b/mm/shuffle.c index bc0419a61fbe1aa1d1fcd2f734ec38d1eb9f4a45..3ce12481b1dccfb01112ca9b168a442cc81d5d4f 100644 --- a/mm/shuffle.c +++ b/mm/shuffle.c @@ -182,3 +182,26 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat) for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) shuffle_zone(z); } + +void add_to_free_area_random(struct page *page, struct free_area *area, + int migratetype) +{ + static u64 rand; + static u8 rand_bits; + + /* + * The lack of locking is deliberate. If 2 threads race to + * update the rand state it just adds to the entropy. + */ + if (rand_bits == 0) { + rand_bits = 64; + rand = get_random_u64(); + } + + if (rand & 1) + add_to_free_area(page, area, migratetype); + else + add_to_free_area_tail(page, area, migratetype); + rand_bits--; + rand >>= 1; +} diff --git a/mm/shuffle.h b/mm/shuffle.h index 644c8ee97b9e9fb6f337b6d96cc07345e040b4f0..777a257a0d2f9346f10e68db9da34789fd5a3711 100644 --- a/mm/shuffle.h +++ b/mm/shuffle.h @@ -36,6 +36,13 @@ static inline void shuffle_zone(struct zone *z) return; __shuffle_zone(z); } + +static inline bool is_shuffle_order(int order) +{ + if (!static_branch_unlikely(&page_alloc_shuffle_key)) + return false; + return order >= SHUFFLE_ORDER; +} #else static inline void shuffle_free_memory(pg_data_t *pgdat) { @@ -48,5 +55,10 @@ static inline void shuffle_zone(struct zone *z) static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl) { } + +static inline bool is_shuffle_order(int order) +{ + return false; +} #endif #endif /* _MM_SHUFFLE_H */