diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1d76f8ca90f014d6bef14006f820166e33f3e9f7..ee1c244a62a141f5cfa06e09ee6a381d512e23ad 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -113,7 +113,7 @@ extern void __split_huge_page_pmd(struct vm_area_struct *vma, do { \ pmd_t *____pmd = (__pmd); \ anon_vma_lock_write(__anon_vma); \ - anon_vma_unlock(__anon_vma); \ + anon_vma_unlock_write(__anon_vma); \ BUG_ON(pmd_trans_splitting(*____pmd) || \ pmd_trans_huge(*____pmd)); \ } while (0) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c20635c527a9071c35d804157b527d199188800d..6dacb93a6d94a3f710706b6ed2e16a3fa5ad5e4e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -123,7 +123,7 @@ static inline void anon_vma_lock_write(struct anon_vma *anon_vma) down_write(&anon_vma->root->rwsem); } -static inline void anon_vma_unlock(struct anon_vma *anon_vma) +static inline void anon_vma_unlock_write(struct anon_vma *anon_vma) { up_write(&anon_vma->root->rwsem); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f40b2ce23d60a047f518743483b3f08c60ec4fee..b1cc6591ed8341bbf5183d5c89aafd60d9fd77c1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1830,7 +1830,7 @@ int split_huge_page(struct page *page) BUG_ON(PageCompound(page)); out_unlock: - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); put_anon_vma(anon_vma); out: return ret; @@ -2322,7 +2322,7 @@ static void collapse_huge_page(struct mm_struct *mm, BUG_ON(!pmd_none(*pmd)); set_pmd_at(mm, address, pmd, _pmd); spin_unlock(&mm->page_table_lock); - anon_vma_unlock(vma->anon_vma); + anon_vma_unlock_write(vma->anon_vma); goto out; } @@ -2330,7 +2330,7 @@ static void collapse_huge_page(struct mm_struct *mm, * All pages are isolated and locked so anon_vma rmap * can't run anymore. */ - anon_vma_unlock(vma->anon_vma); + anon_vma_unlock_write(vma->anon_vma); __collapse_huge_page_copy(pte, new_page, vma, address, ptl); pte_unmap(pte); diff --git a/mm/mmap.c b/mm/mmap.c index 28416f6b8dd5d821119eca260612cc874d83f1b5..318e121affda3fbe5380a4049e2548b18bd45a52 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -809,7 +809,7 @@ again: remove_next = 1 + (end > next->vm_end); anon_vma_interval_tree_post_update_vma(vma); if (adjust_next) anon_vma_interval_tree_post_update_vma(next); - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); } if (mapping) mutex_unlock(&mapping->i_mmap_mutex); @@ -3017,7 +3017,7 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma) if (!__test_and_clear_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) BUG(); - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); } } diff --git a/mm/mremap.c b/mm/mremap.c index ebe27a8efa62555fd823fe8ffcdc7f1be0e407da..463a25705ac6d4ca38fb202b30b2ccae9161ce5b 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -135,7 +135,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); if (anon_vma) - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); if (mapping) mutex_unlock(&mapping->i_mmap_mutex); } diff --git a/mm/rmap.c b/mm/rmap.c index 3d38edffda410680d3da0b54594b0e643fa924e9..807c96bf0dc6d08cdc167ce432a68999b1e21692 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -105,7 +105,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma) */ if (rwsem_is_locked(&anon_vma->root->rwsem)) { anon_vma_lock_write(anon_vma); - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); } kmem_cache_free(anon_vma_cachep, anon_vma); @@ -191,7 +191,7 @@ int anon_vma_prepare(struct vm_area_struct *vma) avc = NULL; } spin_unlock(&mm->page_table_lock); - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); if (unlikely(allocated)) put_anon_vma(allocated); @@ -308,7 +308,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); - anon_vma_unlock(anon_vma); + anon_vma_unlock_write(anon_vma); return 0;