diff --git a/mm/migrate.c b/mm/migrate.c index 298c56e334cd400c70e88227f77feb9397bb64e8..90aa493faa60258ac7284b6a86d2559672cadcb1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -443,6 +443,7 @@ int migrate_page_move_mapping(struct address_space *mapping, int dirty; int expected_count = 1 + extra_count; void **pslot; + int nr = hpage_nr_pages(page); /* * Device public or private pages have an extra refcount as they are @@ -506,7 +507,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ + page_ref_add(newpage, nr); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -529,7 +530,7 @@ int migrate_page_move_mapping(struct address_space *mapping, int i; int index = page_index(page); - for (i = 1; i < HPAGE_PMD_NR; i++) { + for (i = 1; i < nr; i++) { pslot = radix_tree_lookup_slot(&mapping->i_pages, index + i); radix_tree_replace_slot(&mapping->i_pages, pslot, @@ -542,7 +543,7 @@ int migrate_page_move_mapping(struct address_space *mapping, * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); + page_ref_unfreeze(page, expected_count - nr); xa_unlock(&mapping->i_pages); /* Leave irq disabled to prevent preemption while updating stats */ @@ -558,17 +559,17 @@ int migrate_page_move_mapping(struct address_space *mapping, * are mapped to swap space. */ if (newzone != oldzone) { - __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES); - __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES); + __mod_node_page_state(oldzone->zone_pgdat, NR_FILE_PAGES, -nr); + __mod_node_page_state(newzone->zone_pgdat, NR_FILE_PAGES, nr); if (PageSwapBacked(page) && !PageSwapCache(page)) { - __dec_node_state(oldzone->zone_pgdat, NR_SHMEM); - __inc_node_state(newzone->zone_pgdat, NR_SHMEM); + __mod_node_page_state(oldzone->zone_pgdat, NR_SHMEM, -nr); + __mod_node_page_state(newzone->zone_pgdat, NR_SHMEM, nr); } if (dirty && mapping_cap_account_dirty(mapping)) { - __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY); - __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING); - __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY); - __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING); + __mod_node_page_state(oldzone->zone_pgdat, NR_FILE_DIRTY, -nr); + __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr); + __mod_node_page_state(newzone->zone_pgdat, NR_FILE_DIRTY, nr); + __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr); } } local_irq_enable();