diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 9d0c454d23cd626107aecfd184f1e446f4d763b1..5d2a3458decc4f274c4b4eb3637ed81aaa23ef2c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -476,6 +476,9 @@ struct zone { * bootmem allocator): * managed_pages = present_pages - reserved_pages; * + * cma pages is present pages that are assigned for CMA use + * (MIGRATE_CMA). + * * So present_pages may be used by memory hotplug or memory power * management logic to figure out unmanaged pages by checking * (present_pages - managed_pages). And managed_pages should be used @@ -500,6 +503,9 @@ struct zone { atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; +#ifdef CONFIG_CMA + unsigned long cma_pages; +#endif const char *name; @@ -597,6 +603,15 @@ static inline unsigned long zone_managed_pages(struct zone *zone) return (unsigned long)atomic_long_read(&zone->managed_pages); } +static inline unsigned long zone_cma_pages(struct zone *zone) +{ +#ifdef CONFIG_CMA + return zone->cma_pages; +#else + return 0; +#endif +} + static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c72ecf5c9e3691c6745546f9081908978074bf7c..242f799fe323a190d003a5e3b3e981c1cd77fb62 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2143,6 +2143,7 @@ void __init init_cma_reserved_pageblock(struct page *page) } adjust_managed_page_count(page, pageblock_nr_pages); + page_zone(page)->cma_pages += pageblock_nr_pages; } #endif diff --git a/mm/vmstat.c b/mm/vmstat.c index 698bc0bc18d146942151348bac4012dea31b09bb..506cab48841013d4b0a3925ad7e4cbafeca08e90 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1634,14 +1634,16 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, "\n high %lu" "\n spanned %lu" "\n present %lu" - "\n managed %lu", + "\n managed %lu" + "\n cma %lu", zone_page_state(zone, NR_FREE_PAGES), min_wmark_pages(zone), low_wmark_pages(zone), high_wmark_pages(zone), zone->spanned_pages, zone->present_pages, - zone_managed_pages(zone)); + zone_managed_pages(zone), + zone_cma_pages(zone)); seq_printf(m, "\n protection: (%ld",