summaryrefslogtreecommitdiffstats
path: root/mm/thrash.c
blob: fabf2d0f51695eddacdb6b013fac0b9cdee3a655 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/*
 * mm/thrash.c
 *
 * Copyright (C) 2004, Red Hat, Inc.
 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
 * Released under the GPL, see the file COPYING for details.
 *
 * Simple token based thrashing protection, using the algorithm
 * described in:  http://www.cs.wm.edu/~sjiang/token.pdf
 *
 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
 * Improved algorithm to pass token:
 * Each task has a priority which is incremented if it contended
 * for the token in an interval less than its previous attempt.
 * If the token is acquired, that task's priority is boosted to prevent
 * the token from bouncing around too often and to let the task make
 * some progress in its execution.
 */

#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/swap.h>
#include <linux/memcontrol.h>

#include <trace/events/vmscan.h>

#define TOKEN_AGING_INTERVAL	(0xFF)

static DEFINE_SPINLOCK(swap_token_lock);
struct mm_struct *swap_token_mm;
struct mem_cgroup *swap_token_memcg;
static unsigned int global_faults;
static unsigned int last_aging;

#ifdef CONFIG_CGROUP_MEM_RES_CTLR
static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
{
	struct mem_cgroup *memcg;

	memcg = try_get_mem_cgroup_from_mm(mm);
	if (memcg)
		css_put(mem_cgroup_css(memcg));

	return memcg;
}
#else
static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
{
	return NULL;
}
#endif

void grab_swap_token(struct mm_struct *mm)
{
	int current_interval;
	unsigned int old_prio = mm->token_priority;

	global_faults++;

	current_interval = global_faults - mm->faultstamp;

	if (!spin_trylock(&swap_token_lock))
		return;

	/* First come first served */
	if (!swap_token_mm)
		goto replace_token;

	if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
		swap_token_mm->token_priority /= 2;
		last_aging = global_faults;
	}

	if (mm == swap_token_mm) {
		mm->token_priority += 2;
		goto update_priority;
	}

	if (current_interval < mm->last_interval)
		mm->token_priority++;
	else {
		if (likely(mm->token_priority > 0))
			mm->token_priority--;
	}

	/* Check if we deserve the token */
	if (mm->token_priority > swap_token_mm->token_priority)
		goto replace_token;

update_priority:
	trace_update_swap_token_priority(mm, old_prio, swap_token_mm);

out:
	mm->faultstamp = global_faults;
	mm->last_interval = current_interval;
	spin_unlock(&swap_token_lock);
	return;

replace_token:
	mm->token_priority += 2;
	trace_replace_swap_token(swap_token_mm, mm);
	swap_token_mm = mm;
	swap_token_memcg = swap_token_memcg_from_mm(mm);
	last_aging = global_faults;
	goto out;
}

/* Called on process exit. */
void __put_swap_token(struct mm_struct *mm)
{
	spin_lock(&swap_token_lock);
	if (likely(mm == swap_token_mm)) {
		trace_put_swap_token(swap_token_mm);
		swap_token_mm = NULL;
		swap_token_memcg = NULL;
	}
	spin_unlock(&swap_token_lock);
}

static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
{
	if (!a)
		return true;
	if (!b)
		return true;
	if (a == b)
		return true;
	return false;
}

void disable_swap_token(struct mem_cgroup *memcg)
{
	/* memcg reclaim don't disable unrelated mm token. */
	if (match_memcg(memcg, swap_token_memcg)) {
		spin_lock(&swap_token_lock);
		if (match_memcg(memcg, swap_token_memcg)) {
			trace_disable_swap_token(swap_token_mm);
			swap_token_mm = NULL;
			swap_token_memcg = NULL;
		}
		spin_unlock(&swap_token_lock);
	}
}