diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b9b23ddca63af8da20f42a4a84764dc0e874764f..fd56111a52c1ca4e02c2626f5e6387da7f7bff9f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -27,18 +27,15 @@ struct anon_vma { struct anon_vma *root; /* Root of this anon_vma tree */ spinlock_t lock; /* Serialize access to vma list */ -#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) - /* - * The external_refcount is taken by either KSM or page migration - * to take a reference to an anon_vma when there is no + * The refcount is taken on an anon_vma when there is no * guarantee that the vma of page tables will exist for * the duration of the operation. A caller that takes * the reference is responsible for clearing up the * anon_vma if they are the last user on release */ - atomic_t external_refcount; -#endif + atomic_t refcount; + /* * NOTE: the LSB of the head.next is set by * mm_take_all_locks() _after_ taking the above lock. So the @@ -71,41 +68,12 @@ struct anon_vma_chain { }; #ifdef CONFIG_MMU -#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) -static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) -{ - atomic_set(&anon_vma->external_refcount, 0); -} - -static inline int anonvma_external_refcount(struct anon_vma *anon_vma) -{ - return atomic_read(&anon_vma->external_refcount); -} - static inline void get_anon_vma(struct anon_vma *anon_vma) { - atomic_inc(&anon_vma->external_refcount); + atomic_inc(&anon_vma->refcount); } void put_anon_vma(struct anon_vma *); -#else -static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) -{ -} - -static inline int anonvma_external_refcount(struct anon_vma *anon_vma) -{ - return 0; -} - -static inline void get_anon_vma(struct anon_vma *anon_vma) -{ -} - -static inline void put_anon_vma(struct anon_vma *anon_vma) -{ -} -#endif /* CONFIG_KSM */ static inline struct anon_vma *page_anon_vma(struct page *page) { diff --git a/mm/rmap.c b/mm/rmap.c index ad416afb2061fb4eb6b85a0b5be907c11690a211..873cd9ef912c3b72013af094fe2f6c365abfc77f 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -272,7 +272,7 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain) list_del(&anon_vma_chain->same_anon_vma); /* We must garbage collect the anon_vma if it's empty */ - empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); + empty = list_empty(&anon_vma->head) && !atomic_read(&anon_vma->refcount); anon_vma_unlock(anon_vma); if (empty) { @@ -303,7 +303,7 @@ static void anon_vma_ctor(void *data) struct anon_vma *anon_vma = data; spin_lock_init(&anon_vma->lock); - anonvma_external_refcount_init(anon_vma); + atomic_set(&anon_vma->refcount, 0); INIT_LIST_HEAD(&anon_vma->head); } @@ -1486,7 +1486,6 @@ int try_to_munlock(struct page *page) return try_to_unmap_file(page, TTU_MUNLOCK); } -#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) /* * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root * if necessary. Be careful to do all the tests under the lock. Once @@ -1495,8 +1494,8 @@ int try_to_munlock(struct page *page) */ void put_anon_vma(struct anon_vma *anon_vma) { - BUG_ON(atomic_read(&anon_vma->external_refcount) <= 0); - if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { + BUG_ON(atomic_read(&anon_vma->refcount) <= 0); + if (atomic_dec_and_lock(&anon_vma->refcount, &anon_vma->root->lock)) { struct anon_vma *root = anon_vma->root; int empty = list_empty(&anon_vma->head); int last_root_user = 0; @@ -1507,8 +1506,8 @@ void put_anon_vma(struct anon_vma *anon_vma) * the refcount on the root and check if we need to free it. */ if (empty && anon_vma != root) { - BUG_ON(atomic_read(&root->external_refcount) <= 0); - last_root_user = atomic_dec_and_test(&root->external_refcount); + BUG_ON(atomic_read(&root->refcount) <= 0); + last_root_user = atomic_dec_and_test(&root->refcount); root_empty = list_empty(&root->head); } anon_vma_unlock(anon_vma); @@ -1520,7 +1519,6 @@ void put_anon_vma(struct anon_vma *anon_vma) } } } -#endif #ifdef CONFIG_MIGRATION /*