mmu_notifier.c 9.5 KB
Newer Older
A
Andrea Arcangeli 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/mm/mmu_notifier.c
 *
 *  Copyright (C) 2008  Qumranet, Inc.
 *  Copyright (C) 2008  SGI
 *             Christoph Lameter <clameter@sgi.com>
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */

#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
14
#include <linux/export.h>
A
Andrea Arcangeli 已提交
15 16
#include <linux/mm.h>
#include <linux/err.h>
17
#include <linux/srcu.h>
A
Andrea Arcangeli 已提交
18 19
#include <linux/rcupdate.h>
#include <linux/sched.h>
20
#include <linux/slab.h>
A
Andrea Arcangeli 已提交
21

22
/* global SRCU for all MMs */
23
static struct srcu_struct srcu;
24

A
Andrea Arcangeli 已提交
25 26 27 28 29 30 31
/*
 * This function can't run concurrently against mmu_notifier_register
 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 * in parallel despite there being no task using this mm any more,
 * through the vmas outside of the exit_mmap context, such as with
 * vmtruncate. This serializes against mmu_notifier_unregister with
32 33
 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
A
Andrea Arcangeli 已提交
34 35 36 37 38 39
 * can't go away from under us as exit_mmap holds an mm_count pin
 * itself.
 */
void __mmu_notifier_release(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
40
	int id;
41 42

	/*
43 44
	 * SRCU here will block mmu_notifier_unregister until
	 * ->release returns.
45
	 */
46
	id = srcu_read_lock(&srcu);
47 48 49 50 51 52 53 54 55 56 57
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
		/*
		 * If ->release runs before mmu_notifier_unregister it must be
		 * handled, as it's the only way for the driver to flush all
		 * existing sptes and stop the driver from establishing any more
		 * sptes before all the pages in the mm are freed.
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
	srcu_read_unlock(&srcu, id);

A
Andrea Arcangeli 已提交
58 59 60 61 62 63
	spin_lock(&mm->mmu_notifier_mm->lock);
	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
				 struct mmu_notifier,
				 hlist);
		/*
64 65 66 67
		 * We arrived before mmu_notifier_unregister so
		 * mmu_notifier_unregister will do nothing other than to wait
		 * for ->release to finish and for mmu_notifier_unregister to
		 * return.
A
Andrea Arcangeli 已提交
68 69 70 71 72 73
		 */
		hlist_del_init_rcu(&mn->hlist);
	}
	spin_unlock(&mm->mmu_notifier_mm->lock);

	/*
74 75 76 77 78 79 80
	 * synchronize_srcu here prevents mmu_notifier_release from returning to
	 * exit_mmap (which would proceed with freeing all pages in the mm)
	 * until the ->release method returns, if it was invoked by
	 * mmu_notifier_unregister.
	 *
	 * The mmu_notifier_mm can't go away from under us because one mm_count
	 * is held by exit_mmap.
A
Andrea Arcangeli 已提交
81
	 */
82
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
83 84 85 86 87 88 89 90 91 92 93
}

/*
 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 * unmap the address and return 1 or 0 depending if the mapping previously
 * existed or not.
 */
int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
					unsigned long address)
{
	struct mmu_notifier *mn;
94
	int young = 0, id;
A
Andrea Arcangeli 已提交
95

96
	id = srcu_read_lock(&srcu);
97
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
98 99 100
		if (mn->ops->clear_flush_young)
			young |= mn->ops->clear_flush_young(mn, mm, address);
	}
101
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
102 103 104 105

	return young;
}

A
Andrea Arcangeli 已提交
106 107 108 109
int __mmu_notifier_test_young(struct mm_struct *mm,
			      unsigned long address)
{
	struct mmu_notifier *mn;
110
	int young = 0, id;
A
Andrea Arcangeli 已提交
111

112
	id = srcu_read_lock(&srcu);
113
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
114 115 116 117 118 119
		if (mn->ops->test_young) {
			young = mn->ops->test_young(mn, mm, address);
			if (young)
				break;
		}
	}
120
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
121 122 123 124

	return young;
}

125 126 127 128
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
			       pte_t pte)
{
	struct mmu_notifier *mn;
129
	int id;
130

131
	id = srcu_read_lock(&srcu);
132
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
133 134 135
		if (mn->ops->change_pte)
			mn->ops->change_pte(mn, mm, address, pte);
	}
136
	srcu_read_unlock(&srcu, id);
137 138
}

A
Andrea Arcangeli 已提交
139 140 141 142
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
					  unsigned long address)
{
	struct mmu_notifier *mn;
143
	int id;
A
Andrea Arcangeli 已提交
144

145
	id = srcu_read_lock(&srcu);
146
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
147 148 149
		if (mn->ops->invalidate_page)
			mn->ops->invalidate_page(mn, mm, address);
	}
150
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
151 152 153 154 155 156
}

void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
157
	int id;
A
Andrea Arcangeli 已提交
158

159
	id = srcu_read_lock(&srcu);
160
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
161 162 163
		if (mn->ops->invalidate_range_start)
			mn->ops->invalidate_range_start(mn, mm, start, end);
	}
164
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
165
}
166
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
A
Andrea Arcangeli 已提交
167 168 169 170 171

void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
172
	int id;
A
Andrea Arcangeli 已提交
173

174
	id = srcu_read_lock(&srcu);
175
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
176 177 178
		if (mn->ops->invalidate_range_end)
			mn->ops->invalidate_range_end(mn, mm, start, end);
	}
179
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
180
}
181
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
A
Andrea Arcangeli 已提交
182 183 184 185 186 187 188 189 190 191

static int do_mmu_notifier_register(struct mmu_notifier *mn,
				    struct mm_struct *mm,
				    int take_mmap_sem)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	int ret;

	BUG_ON(atomic_read(&mm->mm_users) <= 0);

192
	/*
193 194 195
	 * Verify that mmu_notifier_init() already run and the global srcu is
	 * initialized.
	 */
196 197
	BUG_ON(!srcu.per_cpu_ref);

198 199 200 201 202
	ret = -ENOMEM;
	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
		goto out;

A
Andrea Arcangeli 已提交
203 204 205 206
	if (take_mmap_sem)
		down_write(&mm->mmap_sem);
	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
207
		goto out_clean;
A
Andrea Arcangeli 已提交
208 209 210 211

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
212

A
Andrea Arcangeli 已提交
213
		mm->mmu_notifier_mm = mmu_notifier_mm;
214
		mmu_notifier_mm = NULL;
A
Andrea Arcangeli 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230
	}
	atomic_inc(&mm->mm_count);

	/*
	 * Serialize the update against mmu_notifier_unregister. A
	 * side note: mmu_notifier_release can't run concurrently with
	 * us because we hold the mm_users pin (either implicitly as
	 * current->mm or explicitly with get_task_mm() or similar).
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
231
out_clean:
A
Andrea Arcangeli 已提交
232 233
	if (take_mmap_sem)
		up_write(&mm->mmap_sem);
234 235
	kfree(mmu_notifier_mm);
out:
A
Andrea Arcangeli 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}

/*
 * Must not hold mmap_sem nor any other VM related lock when calling
 * this registration function. Must also ensure mm_users can't go down
 * to zero while this runs to avoid races with mmu_notifier_release,
 * so mm has to be current->mm or the mm should be pinned safely such
 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 * pin should be released by calling mmput after mmu_notifier_register
 * returns. mmu_notifier_unregister must be always called to
 * unregister the notifier. mm_count is automatically pinned to allow
 * mmu_notifier_unregister to safely run at any time later, before or
 * after exit_mmap. ->release will always be called before exit_mmap
 * frees the pages.
 */
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 1);
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);

/*
 * Same as mmu_notifier_register but here the caller must hold the
 * mmap_sem in write mode.
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 0);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);

/* this is called after the last mmu_notifier_unregister() returned */
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
	kfree(mm->mmu_notifier_mm);
	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}

/*
 * This releases the mm_count pin automatically and frees the mm
 * structure if it was the last user of it. It serializes against
280 281
 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 * with the unregister lock + SRCU. All sptes must be dropped before
A
Andrea Arcangeli 已提交
282 283 284 285 286 287 288 289 290 291
 * calling mmu_notifier_unregister. ->release or any other notifier
 * method may be invoked concurrently with mmu_notifier_unregister,
 * and only after mmu_notifier_unregister returned we're guaranteed
 * that ->release or any other method can't run anymore.
 */
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	if (!hlist_unhashed(&mn->hlist)) {
292 293 294 295
		/*
		 * SRCU here will force exit_mmap to wait for ->release to
		 * finish before freeing the pages.
		 */
296
		int id;
297

298
		id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
299
		/*
300 301
		 * exit_mmap will block in mmu_notifier_release to guarantee
		 * that ->release is called before freeing the pages.
A
Andrea Arcangeli 已提交
302 303 304
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
305
		srcu_read_unlock(&srcu, id);
306

307
		spin_lock(&mm->mmu_notifier_mm->lock);
308
		/*
309 310
		 * Can not use list_del_rcu() since __mmu_notifier_release
		 * can delete it before we hold the lock.
311
		 */
312
		hlist_del_init_rcu(&mn->hlist);
A
Andrea Arcangeli 已提交
313
		spin_unlock(&mm->mmu_notifier_mm->lock);
314
	}
A
Andrea Arcangeli 已提交
315 316

	/*
317
	 * Wait for any running method to finish, of course including
G
Geert Uytterhoeven 已提交
318
	 * ->release if it was run by mmu_notifier_release instead of us.
A
Andrea Arcangeli 已提交
319
	 */
320
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
321 322 323 324 325 326

	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
327 328 329 330 331 332 333

static int __init mmu_notifier_init(void)
{
	return init_srcu_struct(&srcu);
}

module_init(mmu_notifier_init);