提交 75f6d6d2 编写于 作者: M Minchan Kim 提交者: Linus Torvalds

mm, THP, swap: unify swap slot free functions to put_swap_page

Now, get_swap_page takes struct page and allocates swap space according
to page size(ie, normal or THP) so it would be more cleaner to introduce
put_swap_page which is a counter function of get_swap_page.  Then, it
calls right swap slot free function depending on page's size.

[ying.huang@intel.com: minor cleanup and fix]
Link: http://lkml.kernel.org/r/20170515112522.32457-3-ying.huang@intel.comSigned-off-by: NMinchan Kim <minchan@kernel.org>
Signed-off-by: N"Huang, Ying" <ying.huang@intel.com>
Acked-by: NJohannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 38d8b4e6
...@@ -387,6 +387,7 @@ static inline long get_nr_swap_pages(void) ...@@ -387,6 +387,7 @@ static inline long get_nr_swap_pages(void)
extern void si_swapinfo(struct sysinfo *); extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(struct page *page); extern swp_entry_t get_swap_page(struct page *page);
extern void put_swap_page(struct page *page, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int); extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]); extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern int add_swap_count_continuation(swp_entry_t, gfp_t);
...@@ -394,7 +395,6 @@ extern void swap_shmem_alloc(swp_entry_t); ...@@ -394,7 +395,6 @@ extern void swap_shmem_alloc(swp_entry_t);
extern int swap_duplicate(swp_entry_t); extern int swap_duplicate(swp_entry_t);
extern int swapcache_prepare(swp_entry_t); extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t); extern void swap_free(swp_entry_t);
extern void swapcache_free(swp_entry_t);
extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern int free_swap_and_cache(swp_entry_t); extern int free_swap_and_cache(swp_entry_t);
extern int swap_type_of(dev_t, sector_t, struct block_device **); extern int swap_type_of(dev_t, sector_t, struct block_device **);
...@@ -453,7 +453,7 @@ static inline void swap_free(swp_entry_t swp) ...@@ -453,7 +453,7 @@ static inline void swap_free(swp_entry_t swp)
{ {
} }
static inline void swapcache_free(swp_entry_t swp) static inline void put_swap_page(struct page *page, swp_entry_t swp)
{ {
} }
...@@ -578,13 +578,5 @@ static inline bool mem_cgroup_swap_full(struct page *page) ...@@ -578,13 +578,5 @@ static inline bool mem_cgroup_swap_full(struct page *page)
} }
#endif #endif
#ifdef CONFIG_THP_SWAP
extern void swapcache_free_cluster(swp_entry_t entry);
#else
static inline void swapcache_free_cluster(swp_entry_t entry)
{
}
#endif
#endif /* __KERNEL__*/ #endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */ #endif /* _LINUX_SWAP_H */
...@@ -1327,7 +1327,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1327,7 +1327,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
mutex_unlock(&shmem_swaplist_mutex); mutex_unlock(&shmem_swaplist_mutex);
free_swap: free_swap:
swapcache_free(swap); put_swap_page(page, swap);
redirty: redirty:
set_page_dirty(page); set_page_dirty(page);
if (wbc->for_reclaim) if (wbc->for_reclaim)
......
...@@ -231,10 +231,7 @@ int add_to_swap(struct page *page, struct list_head *list) ...@@ -231,10 +231,7 @@ int add_to_swap(struct page *page, struct list_head *list)
return 1; return 1;
fail_free: fail_free:
if (PageTransHuge(page)) put_swap_page(page, entry);
swapcache_free_cluster(entry);
else
swapcache_free(entry);
fail: fail:
if (PageTransHuge(page) && !split_huge_page_to_list(page, list)) if (PageTransHuge(page) && !split_huge_page_to_list(page, list))
goto retry; goto retry;
...@@ -259,11 +256,7 @@ void delete_from_swap_cache(struct page *page) ...@@ -259,11 +256,7 @@ void delete_from_swap_cache(struct page *page)
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
spin_unlock_irq(&address_space->tree_lock); spin_unlock_irq(&address_space->tree_lock);
if (PageTransHuge(page)) put_swap_page(page, entry);
swapcache_free_cluster(entry);
else
swapcache_free(entry);
page_ref_sub(page, hpage_nr_pages(page)); page_ref_sub(page, hpage_nr_pages(page));
} }
...@@ -415,7 +408,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -415,7 +408,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
* add_to_swap_cache() doesn't return -EEXIST, so we can safely * add_to_swap_cache() doesn't return -EEXIST, so we can safely
* clear SWAP_HAS_CACHE flag. * clear SWAP_HAS_CACHE flag.
*/ */
swapcache_free(entry); put_swap_page(new_page, entry);
} while (err != -ENOMEM); } while (err != -ENOMEM);
if (new_page) if (new_page)
......
...@@ -1148,7 +1148,7 @@ void swap_free(swp_entry_t entry) ...@@ -1148,7 +1148,7 @@ void swap_free(swp_entry_t entry)
/* /*
* Called after dropping swapcache to decrease refcnt to swap entries. * Called after dropping swapcache to decrease refcnt to swap entries.
*/ */
void swapcache_free(swp_entry_t entry) static void swapcache_free(swp_entry_t entry)
{ {
struct swap_info_struct *p; struct swap_info_struct *p;
...@@ -1160,7 +1160,7 @@ void swapcache_free(swp_entry_t entry) ...@@ -1160,7 +1160,7 @@ void swapcache_free(swp_entry_t entry)
} }
#ifdef CONFIG_THP_SWAP #ifdef CONFIG_THP_SWAP
void swapcache_free_cluster(swp_entry_t entry) static void swapcache_free_cluster(swp_entry_t entry)
{ {
unsigned long offset = swp_offset(entry); unsigned long offset = swp_offset(entry);
unsigned long idx = offset / SWAPFILE_CLUSTER; unsigned long idx = offset / SWAPFILE_CLUSTER;
...@@ -1184,8 +1184,20 @@ void swapcache_free_cluster(swp_entry_t entry) ...@@ -1184,8 +1184,20 @@ void swapcache_free_cluster(swp_entry_t entry)
swap_free_cluster(si, idx); swap_free_cluster(si, idx);
spin_unlock(&si->lock); spin_unlock(&si->lock);
} }
#else
static inline void swapcache_free_cluster(swp_entry_t entry)
{
}
#endif /* CONFIG_THP_SWAP */ #endif /* CONFIG_THP_SWAP */
void put_swap_page(struct page *page, swp_entry_t entry)
{
if (!PageTransHuge(page))
swapcache_free(entry);
else
swapcache_free_cluster(entry);
}
void swapcache_free_entries(swp_entry_t *entries, int n) void swapcache_free_entries(swp_entry_t *entries, int n)
{ {
struct swap_info_struct *p, *prev; struct swap_info_struct *p, *prev;
......
...@@ -708,7 +708,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, ...@@ -708,7 +708,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
mem_cgroup_swapout(page, swap); mem_cgroup_swapout(page, swap);
__delete_from_swap_cache(page); __delete_from_swap_cache(page);
spin_unlock_irqrestore(&mapping->tree_lock, flags); spin_unlock_irqrestore(&mapping->tree_lock, flags);
swapcache_free(swap); put_swap_page(page, swap);
} else { } else {
void (*freepage)(struct page *); void (*freepage)(struct page *);
void *shadow = NULL; void *shadow = NULL;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册