提交 9e60109f 编写于 作者: P Peter Zijlstra 提交者: Linus Torvalds

mm: rename drop_anon_vma() to put_anon_vma()

The normal code pattern used in the kernel is: get/put.
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: NKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: NHugh Dickins <hughd@google.com>
Reviewed-by: NRik van Riel <riel@redhat.com>
Acked-by: NMel Gorman <mel@csn.ul.ie>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 7bc32f6f
......@@ -87,7 +87,7 @@ static inline void get_anon_vma(struct anon_vma *anon_vma)
atomic_inc(&anon_vma->external_refcount);
}
void drop_anon_vma(struct anon_vma *);
void put_anon_vma(struct anon_vma *);
#else
static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
{
......@@ -102,7 +102,7 @@ static inline void get_anon_vma(struct anon_vma *anon_vma)
{
}
static inline void drop_anon_vma(struct anon_vma *anon_vma)
static inline void put_anon_vma(struct anon_vma *anon_vma)
{
}
#endif /* CONFIG_KSM */
......
......@@ -301,20 +301,6 @@ static inline int in_stable_tree(struct rmap_item *rmap_item)
return rmap_item->address & STABLE_FLAG;
}
static void hold_anon_vma(struct rmap_item *rmap_item,
struct anon_vma *anon_vma)
{
rmap_item->anon_vma = anon_vma;
get_anon_vma(anon_vma);
}
static void ksm_drop_anon_vma(struct rmap_item *rmap_item)
{
struct anon_vma *anon_vma = rmap_item->anon_vma;
drop_anon_vma(anon_vma);
}
/*
* ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
* page tables after it has passed through ksm_exit() - which, if necessary,
......@@ -397,7 +383,7 @@ static void break_cow(struct rmap_item *rmap_item)
* It is not an accident that whenever we want to break COW
* to undo, we also need to drop a reference to the anon_vma.
*/
ksm_drop_anon_vma(rmap_item);
put_anon_vma(rmap_item->anon_vma);
down_read(&mm->mmap_sem);
if (ksm_test_exit(mm))
......@@ -466,7 +452,7 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
ksm_pages_sharing--;
else
ksm_pages_shared--;
ksm_drop_anon_vma(rmap_item);
put_anon_vma(rmap_item->anon_vma);
rmap_item->address &= PAGE_MASK;
cond_resched();
}
......@@ -554,7 +540,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
else
ksm_pages_shared--;
ksm_drop_anon_vma(rmap_item);
put_anon_vma(rmap_item->anon_vma);
rmap_item->address &= PAGE_MASK;
} else if (rmap_item->address & UNSTABLE_FLAG) {
......@@ -949,7 +935,8 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
goto out;
/* Must get reference to anon_vma while still holding mmap_sem */
hold_anon_vma(rmap_item, vma->anon_vma);
rmap_item->anon_vma = vma->anon_vma;
get_anon_vma(vma->anon_vma);
out:
up_read(&mm->mmap_sem);
return err;
......
......@@ -764,7 +764,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
/* Drop an anon_vma reference if we took one */
if (anon_vma)
drop_anon_vma(anon_vma);
put_anon_vma(anon_vma);
uncharge:
if (!charge)
......@@ -856,7 +856,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
remove_migration_ptes(hpage, hpage);
if (anon_vma)
drop_anon_vma(anon_vma);
put_anon_vma(anon_vma);
out:
unlock_page(hpage);
......
......@@ -278,7 +278,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
if (empty) {
/* We no longer need the root anon_vma */
if (anon_vma->root != anon_vma)
drop_anon_vma(anon_vma->root);
put_anon_vma(anon_vma->root);
anon_vma_free(anon_vma);
}
}
......@@ -1493,7 +1493,7 @@ int try_to_munlock(struct page *page)
* we know we are the last user, nobody else can get a reference and we
* can do the freeing without the lock.
*/
void drop_anon_vma(struct anon_vma *anon_vma)
void put_anon_vma(struct anon_vma *anon_vma)
{
BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0);
if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册