mmu_notifier.c 11.7 KB
Newer Older
A
Andrea Arcangeli 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/mm/mmu_notifier.c
 *
 *  Copyright (C) 2008  Qumranet, Inc.
 *  Copyright (C) 2008  SGI
 *             Christoph Lameter <clameter@sgi.com>
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */

#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
14
#include <linux/export.h>
A
Andrea Arcangeli 已提交
15 16
#include <linux/mm.h>
#include <linux/err.h>
17
#include <linux/srcu.h>
A
Andrea Arcangeli 已提交
18 19
#include <linux/rcupdate.h>
#include <linux/sched.h>
20
#include <linux/slab.h>
A
Andrea Arcangeli 已提交
21

22
/* global SRCU for all MMs */
23
static struct srcu_struct srcu;
24

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
/*
 * This function allows mmu_notifier::release callback to delay a call to
 * a function that will free appropriate resources. The function must be
 * quick and must not block.
 */
void mmu_notifier_call_srcu(struct rcu_head *rcu,
			    void (*func)(struct rcu_head *rcu))
{
	call_srcu(&srcu, rcu, func);
}
EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);

void mmu_notifier_synchronize(void)
{
	/* Wait for any running method to finish. */
	srcu_barrier(&srcu);
}
EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);

A
Andrea Arcangeli 已提交
44 45 46 47 48 49 50
/*
 * This function can't run concurrently against mmu_notifier_register
 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 * in parallel despite there being no task using this mm any more,
 * through the vmas outside of the exit_mmap context, such as with
 * vmtruncate. This serializes against mmu_notifier_unregister with
51 52
 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
A
Andrea Arcangeli 已提交
53 54 55 56 57 58
 * can't go away from under us as exit_mmap holds an mm_count pin
 * itself.
 */
void __mmu_notifier_release(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
59
	int id;
60 61

	/*
62 63
	 * SRCU here will block mmu_notifier_unregister until
	 * ->release returns.
64
	 */
65
	id = srcu_read_lock(&srcu);
66 67 68 69 70 71 72 73 74 75
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
		/*
		 * If ->release runs before mmu_notifier_unregister it must be
		 * handled, as it's the only way for the driver to flush all
		 * existing sptes and stop the driver from establishing any more
		 * sptes before all the pages in the mm are freed.
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);

A
Andrea Arcangeli 已提交
76 77 78 79 80 81
	spin_lock(&mm->mmu_notifier_mm->lock);
	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
				 struct mmu_notifier,
				 hlist);
		/*
82 83 84 85
		 * We arrived before mmu_notifier_unregister so
		 * mmu_notifier_unregister will do nothing other than to wait
		 * for ->release to finish and for mmu_notifier_unregister to
		 * return.
A
Andrea Arcangeli 已提交
86 87 88 89
		 */
		hlist_del_init_rcu(&mn->hlist);
	}
	spin_unlock(&mm->mmu_notifier_mm->lock);
90
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
91 92

	/*
93 94 95 96 97 98 99
	 * synchronize_srcu here prevents mmu_notifier_release from returning to
	 * exit_mmap (which would proceed with freeing all pages in the mm)
	 * until the ->release method returns, if it was invoked by
	 * mmu_notifier_unregister.
	 *
	 * The mmu_notifier_mm can't go away from under us because one mm_count
	 * is held by exit_mmap.
A
Andrea Arcangeli 已提交
100
	 */
101
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
102 103 104 105 106 107 108 109
}

/*
 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 * unmap the address and return 1 or 0 depending if the mapping previously
 * existed or not.
 */
int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
A
Andres Lagar-Cavilla 已提交
110 111
					unsigned long start,
					unsigned long end)
A
Andrea Arcangeli 已提交
112 113
{
	struct mmu_notifier *mn;
114
	int young = 0, id;
A
Andrea Arcangeli 已提交
115

116
	id = srcu_read_lock(&srcu);
117
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
118
		if (mn->ops->clear_flush_young)
A
Andres Lagar-Cavilla 已提交
119
			young |= mn->ops->clear_flush_young(mn, mm, start, end);
A
Andrea Arcangeli 已提交
120
	}
121
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
122 123 124 125

	return young;
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
int __mmu_notifier_clear_young(struct mm_struct *mm,
			       unsigned long start,
			       unsigned long end)
{
	struct mmu_notifier *mn;
	int young = 0, id;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->clear_young)
			young |= mn->ops->clear_young(mn, mm, start, end);
	}
	srcu_read_unlock(&srcu, id);

	return young;
}

A
Andrea Arcangeli 已提交
143 144 145 146
int __mmu_notifier_test_young(struct mm_struct *mm,
			      unsigned long address)
{
	struct mmu_notifier *mn;
147
	int young = 0, id;
A
Andrea Arcangeli 已提交
148

149
	id = srcu_read_lock(&srcu);
150
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
151 152 153 154 155 156
		if (mn->ops->test_young) {
			young = mn->ops->test_young(mn, mm, address);
			if (young)
				break;
		}
	}
157
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
158 159 160 161

	return young;
}

162 163 164 165
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
			       pte_t pte)
{
	struct mmu_notifier *mn;
166
	int id;
167

168
	id = srcu_read_lock(&srcu);
169
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
170 171 172
		if (mn->ops->change_pte)
			mn->ops->change_pte(mn, mm, address, pte);
	}
173
	srcu_read_unlock(&srcu, id);
174 175
}

A
Andrea Arcangeli 已提交
176 177 178 179
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
					  unsigned long address)
{
	struct mmu_notifier *mn;
180
	int id;
A
Andrea Arcangeli 已提交
181

182
	id = srcu_read_lock(&srcu);
183
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
184 185 186
		if (mn->ops->invalidate_page)
			mn->ops->invalidate_page(mn, mm, address);
	}
187
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
188 189 190 191 192 193
}

void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
194
	int id;
A
Andrea Arcangeli 已提交
195

196
	id = srcu_read_lock(&srcu);
197
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
198 199 200
		if (mn->ops->invalidate_range_start)
			mn->ops->invalidate_range_start(mn, mm, start, end);
	}
201
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
202
}
203
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
A
Andrea Arcangeli 已提交
204 205 206 207 208

void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
209
	int id;
A
Andrea Arcangeli 已提交
210

211
	id = srcu_read_lock(&srcu);
212
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
213 214 215 216 217 218 219 220 221 222
		/*
		 * Call invalidate_range here too to avoid the need for the
		 * subsystem of having to register an invalidate_range_end
		 * call-back when there is invalidate_range already. Usually a
		 * subsystem registers either invalidate_range_start()/end() or
		 * invalidate_range(), so this will be no additional overhead
		 * (besides the pointer check).
		 */
		if (mn->ops->invalidate_range)
			mn->ops->invalidate_range(mn, mm, start, end);
A
Andrea Arcangeli 已提交
223 224 225
		if (mn->ops->invalidate_range_end)
			mn->ops->invalidate_range_end(mn, mm, start, end);
	}
226
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
227
}
228
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
A
Andrea Arcangeli 已提交
229

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
void __mmu_notifier_invalidate_range(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	int id;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range)
			mn->ops->invalidate_range(mn, mm, start, end);
	}
	srcu_read_unlock(&srcu, id);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);

A
Andrea Arcangeli 已提交
245 246 247 248 249 250 251 252 253
static int do_mmu_notifier_register(struct mmu_notifier *mn,
				    struct mm_struct *mm,
				    int take_mmap_sem)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	int ret;

	BUG_ON(atomic_read(&mm->mm_users) <= 0);

254
	/*
255 256 257
	 * Verify that mmu_notifier_init() already run and the global srcu is
	 * initialized.
	 */
258 259
	BUG_ON(!srcu.per_cpu_ref);

260 261 262 263 264
	ret = -ENOMEM;
	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
		goto out;

A
Andrea Arcangeli 已提交
265 266 267 268
	if (take_mmap_sem)
		down_write(&mm->mmap_sem);
	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
269
		goto out_clean;
A
Andrea Arcangeli 已提交
270 271 272 273

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
274

A
Andrea Arcangeli 已提交
275
		mm->mmu_notifier_mm = mmu_notifier_mm;
276
		mmu_notifier_mm = NULL;
A
Andrea Arcangeli 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	}
	atomic_inc(&mm->mm_count);

	/*
	 * Serialize the update against mmu_notifier_unregister. A
	 * side note: mmu_notifier_release can't run concurrently with
	 * us because we hold the mm_users pin (either implicitly as
	 * current->mm or explicitly with get_task_mm() or similar).
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
293
out_clean:
A
Andrea Arcangeli 已提交
294 295
	if (take_mmap_sem)
		up_write(&mm->mmap_sem);
296 297
	kfree(mmu_notifier_mm);
out:
A
Andrea Arcangeli 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}

/*
 * Must not hold mmap_sem nor any other VM related lock when calling
 * this registration function. Must also ensure mm_users can't go down
 * to zero while this runs to avoid races with mmu_notifier_release,
 * so mm has to be current->mm or the mm should be pinned safely such
 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 * pin should be released by calling mmput after mmu_notifier_register
 * returns. mmu_notifier_unregister must be always called to
 * unregister the notifier. mm_count is automatically pinned to allow
 * mmu_notifier_unregister to safely run at any time later, before or
 * after exit_mmap. ->release will always be called before exit_mmap
 * frees the pages.
 */
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 1);
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);

/*
 * Same as mmu_notifier_register but here the caller must hold the
 * mmap_sem in write mode.
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 0);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);

/* this is called after the last mmu_notifier_unregister() returned */
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
	kfree(mm->mmu_notifier_mm);
	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}

/*
 * This releases the mm_count pin automatically and frees the mm
 * structure if it was the last user of it. It serializes against
342 343
 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 * with the unregister lock + SRCU. All sptes must be dropped before
A
Andrea Arcangeli 已提交
344 345 346 347 348 349 350 351 352 353
 * calling mmu_notifier_unregister. ->release or any other notifier
 * method may be invoked concurrently with mmu_notifier_unregister,
 * and only after mmu_notifier_unregister returned we're guaranteed
 * that ->release or any other method can't run anymore.
 */
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	if (!hlist_unhashed(&mn->hlist)) {
354 355 356 357
		/*
		 * SRCU here will force exit_mmap to wait for ->release to
		 * finish before freeing the pages.
		 */
358
		int id;
359

360
		id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
361
		/*
362 363
		 * exit_mmap will block in mmu_notifier_release to guarantee
		 * that ->release is called before freeing the pages.
A
Andrea Arcangeli 已提交
364 365 366
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
367
		srcu_read_unlock(&srcu, id);
368

369
		spin_lock(&mm->mmu_notifier_mm->lock);
370
		/*
371 372
		 * Can not use list_del_rcu() since __mmu_notifier_release
		 * can delete it before we hold the lock.
373
		 */
374
		hlist_del_init_rcu(&mn->hlist);
A
Andrea Arcangeli 已提交
375
		spin_unlock(&mm->mmu_notifier_mm->lock);
376
	}
A
Andrea Arcangeli 已提交
377 378

	/*
379
	 * Wait for any running method to finish, of course including
G
Geert Uytterhoeven 已提交
380
	 * ->release if it was run by mmu_notifier_release instead of us.
A
Andrea Arcangeli 已提交
381
	 */
382
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
383 384 385 386 387 388

	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
389

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
/*
 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
 */
void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
					struct mm_struct *mm)
{
	spin_lock(&mm->mmu_notifier_mm->lock);
	/*
	 * Can not use list_del_rcu() since __mmu_notifier_release
	 * can delete it before we hold the lock.
	 */
	hlist_del_init_rcu(&mn->hlist);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	BUG_ON(atomic_read(&mm->mm_count) <= 0);
	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);

409 410 411 412
static int __init mmu_notifier_init(void)
{
	return init_srcu_struct(&srcu);
}
413
subsys_initcall(mmu_notifier_init);