thrash.c 3.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8
/*
 * mm/thrash.c
 *
 * Copyright (C) 2004, Red Hat, Inc.
 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
 * Released under the GPL, see the file COPYING for details.
 *
 * Simple token based thrashing protection, using the algorithm
K
KOSAKI Motohiro 已提交
9
 * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
10 11 12 13 14 15 16 17
 *
 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
 * Improved algorithm to pass token:
 * Each task has a priority which is incremented if it contended
 * for the token in an interval less than its previous attempt.
 * If the token is acquired, that task's priority is boosted to prevent
 * the token from bouncing around too often and to let the task make
 * some progress in its execution.
L
Linus Torvalds 已提交
18
 */
19

L
Linus Torvalds 已提交
20 21 22 23
#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/swap.h>
24
#include <linux/memcontrol.h>
L
Linus Torvalds 已提交
25

26 27
#include <trace/events/vmscan.h>

28 29
#define TOKEN_AGING_INTERVAL	(0xFF)

L
Linus Torvalds 已提交
30
static DEFINE_SPINLOCK(swap_token_lock);
31
struct mm_struct *swap_token_mm;
32
struct mem_cgroup *swap_token_memcg;
L
Linus Torvalds 已提交
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
{
	struct mem_cgroup *memcg;

	memcg = try_get_mem_cgroup_from_mm(mm);
	if (memcg)
		css_put(mem_cgroup_css(memcg));

	return memcg;
}
#else
static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
{
	return NULL;
}
#endif

H
Hugh Dickins 已提交
52
void grab_swap_token(struct mm_struct *mm)
L
Linus Torvalds 已提交
53
{
54
	int current_interval;
55
	unsigned int old_prio = mm->token_priority;
56 57
	static unsigned int global_faults;
	static unsigned int last_aging;
L
Linus Torvalds 已提交
58

59
	global_faults++;
L
Linus Torvalds 已提交
60

H
Hugh Dickins 已提交
61
	current_interval = global_faults - mm->faultstamp;
L
Linus Torvalds 已提交
62

63 64
	if (!spin_trylock(&swap_token_lock))
		return;
L
Linus Torvalds 已提交
65

66
	/* First come first served */
67 68 69
	if (!swap_token_mm)
		goto replace_token;

70 71 72 73 74 75 76 77 78 79 80
	/*
	 * Usually, we don't need priority aging because long interval faults
	 * makes priority decrease quickly. But there is one exception. If the
	 * token owner task is sleeping, it never make long interval faults.
	 * Thus, we need a priority aging mechanism instead. The requirements
	 * of priority aging are
	 *  1) An aging interval is reasonable enough long. Too short aging
	 *     interval makes quick swap token lost and decrease performance.
	 *  2) The swap token owner task have to get priority aging even if
	 *     it's under sleep.
	 */
81 82 83 84 85
	if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
		swap_token_mm->token_priority /= 2;
		last_aging = global_faults;
	}

86 87
	if (mm == swap_token_mm) {
		mm->token_priority += 2;
88
		goto update_priority;
89
	}
L
Linus Torvalds 已提交
90

91 92 93 94 95
	if (current_interval < mm->last_interval)
		mm->token_priority++;
	else {
		if (likely(mm->token_priority > 0))
			mm->token_priority--;
L
Linus Torvalds 已提交
96
	}
97

98 99 100 101
	/* Check if we deserve the token */
	if (mm->token_priority > swap_token_mm->token_priority)
		goto replace_token;

102
update_priority:
103
	trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
104

105
out:
H
Hugh Dickins 已提交
106 107
	mm->faultstamp = global_faults;
	mm->last_interval = current_interval;
108
	spin_unlock(&swap_token_lock);
109 110 111 112
	return;

replace_token:
	mm->token_priority += 2;
113
	trace_replace_swap_token(swap_token_mm, mm);
114 115
	swap_token_mm = mm;
	swap_token_memcg = swap_token_memcg_from_mm(mm);
116
	last_aging = global_faults;
117
	goto out;
L
Linus Torvalds 已提交
118 119 120 121 122 123
}

/* Called on process exit. */
void __put_swap_token(struct mm_struct *mm)
{
	spin_lock(&swap_token_lock);
124
	if (likely(mm == swap_token_mm)) {
125
		trace_put_swap_token(swap_token_mm);
126
		swap_token_mm = NULL;
127 128
		swap_token_memcg = NULL;
	}
L
Linus Torvalds 已提交
129 130
	spin_unlock(&swap_token_lock);
}
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148

static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
{
	if (!a)
		return true;
	if (!b)
		return true;
	if (a == b)
		return true;
	return false;
}

void disable_swap_token(struct mem_cgroup *memcg)
{
	/* memcg reclaim don't disable unrelated mm token. */
	if (match_memcg(memcg, swap_token_memcg)) {
		spin_lock(&swap_token_lock);
		if (match_memcg(memcg, swap_token_memcg)) {
149
			trace_disable_swap_token(swap_token_mm);
150 151 152 153 154 155
			swap_token_mm = NULL;
			swap_token_memcg = NULL;
		}
		spin_unlock(&swap_token_lock);
	}
}