diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0378f758b0659f1c36d213e09a3f22a6c2ba3baa..87dbb0fcfa1a5fa9f13734274ae1a995a9b5b106 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1537,8 +1537,10 @@ static void collapse_shmem(struct mm_struct *mm, khugepaged_pages_collapsed++; } else { /* Something went wrong: rollback changes to the radix-tree */ - shmem_uncharge(mapping->host, nr_none); xa_lock_irq(&mapping->i_pages); + mapping->nrpages -= nr_none; + shmem_uncharge(mapping->host, nr_none); + radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { if (iter.index >= end) break; diff --git a/mm/shmem.c b/mm/shmem.c index 38d228a30fdc56ddb708390633440711c54e4012..cd6b4bc221ebc4e02bc74c0ed0758a569482658a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, long pages) if (!shmem_inode_acct_block(inode, pages)) return false; + /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ + inode->i_mapping->nrpages += pages; + spin_lock_irqsave(&info->lock, flags); info->alloced += pages; inode->i_blocks += pages * BLOCKS_PER_PAGE; shmem_recalc_inode(inode); spin_unlock_irqrestore(&info->lock, flags); - inode->i_mapping->nrpages += pages; return true; } @@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode, long pages) struct shmem_inode_info *info = SHMEM_I(inode); unsigned long flags; + /* nrpages adjustment done by __delete_from_page_cache() or caller */ + spin_lock_irqsave(&info->lock, flags); info->alloced -= pages; inode->i_blocks -= pages * BLOCKS_PER_PAGE;