提交 a448f2d0 编写于 作者: H Huang Ying 提交者: Linus Torvalds

mm/swapfile.c: unify normal/huge code path in put_swap_page()

In this patch, the normal/huge code path in put_swap_page() and several
helper functions are unified to avoid duplicated code, bugs, etc.  and
make it easier to review the code.

The removed lines are more than added lines.  And the binary size is
kept exactly same when CONFIG_TRANSPARENT_HUGEPAGE=n.

Link: http://lkml.kernel.org/r/20180720071845.17920-6-ying.huang@intel.comSigned-off-by: N"Huang, Ying" <ying.huang@intel.com>
Suggested-by: NDave Hansen <dave.hansen@linux.intel.com>
Acked-by: NDave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: NDaniel Jordan <daniel.m.jordan@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Shaohua Li <shli@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 33ee011e
...@@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si, ...@@ -204,8 +204,16 @@ static void discard_swap_cluster(struct swap_info_struct *si,
#ifdef CONFIG_THP_SWAP #ifdef CONFIG_THP_SWAP
#define SWAPFILE_CLUSTER HPAGE_PMD_NR #define SWAPFILE_CLUSTER HPAGE_PMD_NR
#define swap_entry_size(size) (size)
#else #else
#define SWAPFILE_CLUSTER 256 #define SWAPFILE_CLUSTER 256
/*
* Define swap_entry_size() as constant to let compiler to optimize
* out some code if !CONFIG_THP_SWAP
*/
#define swap_entry_size(size) 1
#endif #endif
#define LATENCY_LIMIT 256 #define LATENCY_LIMIT 256
...@@ -1192,18 +1200,7 @@ void swap_free(swp_entry_t entry) ...@@ -1192,18 +1200,7 @@ void swap_free(swp_entry_t entry)
/* /*
* Called after dropping swapcache to decrease refcnt to swap entries. * Called after dropping swapcache to decrease refcnt to swap entries.
*/ */
static void swapcache_free(swp_entry_t entry) void put_swap_page(struct page *page, swp_entry_t entry)
{
struct swap_info_struct *p;
p = _swap_info_get(entry);
if (p) {
if (!__swap_entry_free(p, entry, SWAP_HAS_CACHE))
free_swap_slot(entry);
}
}
static void swapcache_free_cluster(swp_entry_t entry)
{ {
unsigned long offset = swp_offset(entry); unsigned long offset = swp_offset(entry);
unsigned long idx = offset / SWAPFILE_CLUSTER; unsigned long idx = offset / SWAPFILE_CLUSTER;
...@@ -1212,39 +1209,41 @@ static void swapcache_free_cluster(swp_entry_t entry) ...@@ -1212,39 +1209,41 @@ static void swapcache_free_cluster(swp_entry_t entry)
unsigned char *map; unsigned char *map;
unsigned int i, free_entries = 0; unsigned int i, free_entries = 0;
unsigned char val; unsigned char val;
int size = swap_entry_size(hpage_nr_pages(page));
if (!IS_ENABLED(CONFIG_THP_SWAP))
return;
si = _swap_info_get(entry); si = _swap_info_get(entry);
if (!si) if (!si)
return; return;
ci = lock_cluster(si, offset); if (size == SWAPFILE_CLUSTER) {
VM_BUG_ON(!cluster_is_huge(ci));
map = si->swap_map + offset;
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
val = map[i];
VM_BUG_ON(!(val & SWAP_HAS_CACHE));
if (val == SWAP_HAS_CACHE)
free_entries++;
}
if (!free_entries) {
for (i = 0; i < SWAPFILE_CLUSTER; i++)
map[i] &= ~SWAP_HAS_CACHE;
}
cluster_clear_huge(ci);
unlock_cluster(ci);
if (free_entries == SWAPFILE_CLUSTER) {
spin_lock(&si->lock);
ci = lock_cluster(si, offset); ci = lock_cluster(si, offset);
memset(map, 0, SWAPFILE_CLUSTER); VM_BUG_ON(!cluster_is_huge(ci));
map = si->swap_map + offset;
for (i = 0; i < SWAPFILE_CLUSTER; i++) {
val = map[i];
VM_BUG_ON(!(val & SWAP_HAS_CACHE));
if (val == SWAP_HAS_CACHE)
free_entries++;
}
if (!free_entries) {
for (i = 0; i < SWAPFILE_CLUSTER; i++)
map[i] &= ~SWAP_HAS_CACHE;
}
cluster_clear_huge(ci);
unlock_cluster(ci); unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); if (free_entries == SWAPFILE_CLUSTER) {
swap_free_cluster(si, idx); spin_lock(&si->lock);
spin_unlock(&si->lock); ci = lock_cluster(si, offset);
} else if (free_entries) { memset(map, 0, SWAPFILE_CLUSTER);
for (i = 0; i < SWAPFILE_CLUSTER; i++, entry.val++) { unlock_cluster(ci);
mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER);
swap_free_cluster(si, idx);
spin_unlock(&si->lock);
return;
}
}
if (size == 1 || free_entries) {
for (i = 0; i < size; i++, entry.val++) {
if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE)) if (!__swap_entry_free(si, entry, SWAP_HAS_CACHE))
free_swap_slot(entry); free_swap_slot(entry);
} }
...@@ -1268,14 +1267,6 @@ int split_swap_cluster(swp_entry_t entry) ...@@ -1268,14 +1267,6 @@ int split_swap_cluster(swp_entry_t entry)
} }
#endif #endif
void put_swap_page(struct page *page, swp_entry_t entry)
{
if (!PageTransHuge(page))
swapcache_free(entry);
else
swapcache_free_cluster(entry);
}
static int swp_entry_cmp(const void *ent1, const void *ent2) static int swp_entry_cmp(const void *ent1, const void *ent2)
{ {
const swp_entry_t *e1 = ent1, *e2 = ent2; const swp_entry_t *e1 = ent1, *e2 = ent2;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册