mmu_notifier.c 12.0 KB
Newer Older
A
Andrea Arcangeli 已提交
1 2 3 4 5
/*
 *  linux/mm/mmu_notifier.c
 *
 *  Copyright (C) 2008  Qumranet, Inc.
 *  Copyright (C) 2008  SGI
6
 *             Christoph Lameter <cl@linux.com>
A
Andrea Arcangeli 已提交
7 8 9 10 11 12 13
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */

#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
14
#include <linux/export.h>
A
Andrea Arcangeli 已提交
15 16
#include <linux/mm.h>
#include <linux/err.h>
17
#include <linux/srcu.h>
A
Andrea Arcangeli 已提交
18 19
#include <linux/rcupdate.h>
#include <linux/sched.h>
20
#include <linux/sched/mm.h>
21
#include <linux/slab.h>
A
Andrea Arcangeli 已提交
22

23
/* global SRCU for all MMs */
24
DEFINE_STATIC_SRCU(srcu);
25

26 27 28 29 30 31 32 33 34 35 36 37
/*
 * This function allows mmu_notifier::release callback to delay a call to
 * a function that will free appropriate resources. The function must be
 * quick and must not block.
 */
void mmu_notifier_call_srcu(struct rcu_head *rcu,
			    void (*func)(struct rcu_head *rcu))
{
	call_srcu(&srcu, rcu, func);
}
EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);

A
Andrea Arcangeli 已提交
38 39 40 41 42 43 44
/*
 * This function can't run concurrently against mmu_notifier_register
 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 * in parallel despite there being no task using this mm any more,
 * through the vmas outside of the exit_mmap context, such as with
 * vmtruncate. This serializes against mmu_notifier_unregister with
45 46
 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
A
Andrea Arcangeli 已提交
47 48 49 50 51 52
 * can't go away from under us as exit_mmap holds an mm_count pin
 * itself.
 */
void __mmu_notifier_release(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
53
	int id;
54 55

	/*
56 57
	 * SRCU here will block mmu_notifier_unregister until
	 * ->release returns.
58
	 */
59
	id = srcu_read_lock(&srcu);
60 61 62 63 64 65 66 67 68 69
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
		/*
		 * If ->release runs before mmu_notifier_unregister it must be
		 * handled, as it's the only way for the driver to flush all
		 * existing sptes and stop the driver from establishing any more
		 * sptes before all the pages in the mm are freed.
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);

A
Andrea Arcangeli 已提交
70 71 72 73 74 75
	spin_lock(&mm->mmu_notifier_mm->lock);
	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
				 struct mmu_notifier,
				 hlist);
		/*
76 77 78 79
		 * We arrived before mmu_notifier_unregister so
		 * mmu_notifier_unregister will do nothing other than to wait
		 * for ->release to finish and for mmu_notifier_unregister to
		 * return.
A
Andrea Arcangeli 已提交
80 81 82 83
		 */
		hlist_del_init_rcu(&mn->hlist);
	}
	spin_unlock(&mm->mmu_notifier_mm->lock);
84
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
85 86

	/*
87 88 89 90 91 92 93
	 * synchronize_srcu here prevents mmu_notifier_release from returning to
	 * exit_mmap (which would proceed with freeing all pages in the mm)
	 * until the ->release method returns, if it was invoked by
	 * mmu_notifier_unregister.
	 *
	 * The mmu_notifier_mm can't go away from under us because one mm_count
	 * is held by exit_mmap.
A
Andrea Arcangeli 已提交
94
	 */
95
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
96 97 98 99 100 101 102 103
}

/*
 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 * unmap the address and return 1 or 0 depending if the mapping previously
 * existed or not.
 */
int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
A
Andres Lagar-Cavilla 已提交
104 105
					unsigned long start,
					unsigned long end)
A
Andrea Arcangeli 已提交
106 107
{
	struct mmu_notifier *mn;
108
	int young = 0, id;
A
Andrea Arcangeli 已提交
109

110
	id = srcu_read_lock(&srcu);
111
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
112
		if (mn->ops->clear_flush_young)
A
Andres Lagar-Cavilla 已提交
113
			young |= mn->ops->clear_flush_young(mn, mm, start, end);
A
Andrea Arcangeli 已提交
114
	}
115
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
116 117 118 119

	return young;
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
int __mmu_notifier_clear_young(struct mm_struct *mm,
			       unsigned long start,
			       unsigned long end)
{
	struct mmu_notifier *mn;
	int young = 0, id;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->clear_young)
			young |= mn->ops->clear_young(mn, mm, start, end);
	}
	srcu_read_unlock(&srcu, id);

	return young;
}

A
Andrea Arcangeli 已提交
137 138 139 140
int __mmu_notifier_test_young(struct mm_struct *mm,
			      unsigned long address)
{
	struct mmu_notifier *mn;
141
	int young = 0, id;
A
Andrea Arcangeli 已提交
142

143
	id = srcu_read_lock(&srcu);
144
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
145 146 147 148 149 150
		if (mn->ops->test_young) {
			young = mn->ops->test_young(mn, mm, address);
			if (young)
				break;
		}
	}
151
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
152 153 154 155

	return young;
}

156 157 158 159
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
			       pte_t pte)
{
	struct mmu_notifier *mn;
160
	int id;
161

162
	id = srcu_read_lock(&srcu);
163
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
164 165 166
		if (mn->ops->change_pte)
			mn->ops->change_pte(mn, mm, address, pte);
	}
167
	srcu_read_unlock(&srcu, id);
168 169
}

170 171 172
int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end,
				  bool blockable)
A
Andrea Arcangeli 已提交
173
{
174
	struct mmu_notifier_range _range, *range = &_range;
A
Andrea Arcangeli 已提交
175
	struct mmu_notifier *mn;
176
	int ret = 0;
177
	int id;
A
Andrea Arcangeli 已提交
178

179 180 181 182 183
	range->blockable = blockable;
	range->start = start;
	range->end = end;
	range->mm = mm;

184
	id = srcu_read_lock(&srcu);
185
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
186
		if (mn->ops->invalidate_range_start) {
187
			int _ret = mn->ops->invalidate_range_start(mn, range);
188 189 190 191 192 193 194
			if (_ret) {
				pr_info("%pS callback failed with %d in %sblockable context.\n",
						mn->ops->invalidate_range_start, _ret,
						!blockable ? "non-" : "");
				ret = _ret;
			}
		}
A
Andrea Arcangeli 已提交
195
	}
196
	srcu_read_unlock(&srcu, id);
197 198

	return ret;
A
Andrea Arcangeli 已提交
199
}
200
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
A
Andrea Arcangeli 已提交
201 202

void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
203 204 205
					 unsigned long start,
					 unsigned long end,
					 bool only_end)
A
Andrea Arcangeli 已提交
206
{
207
	struct mmu_notifier_range _range, *range = &_range;
A
Andrea Arcangeli 已提交
208
	struct mmu_notifier *mn;
209
	int id;
A
Andrea Arcangeli 已提交
210

211 212 213 214 215 216 217 218 219 220
	/*
	 * The end call back will never be call if the start refused to go
	 * through because of blockable was false so here assume that we
	 * can block.
	 */
	range->blockable = true;
	range->start = start;
	range->end = end;
	range->mm = mm;

221
	id = srcu_read_lock(&srcu);
222
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
223 224 225 226 227 228 229
		/*
		 * Call invalidate_range here too to avoid the need for the
		 * subsystem of having to register an invalidate_range_end
		 * call-back when there is invalidate_range already. Usually a
		 * subsystem registers either invalidate_range_start()/end() or
		 * invalidate_range(), so this will be no additional overhead
		 * (besides the pointer check).
230 231 232 233 234
		 *
		 * We skip call to invalidate_range() if we know it is safe ie
		 * call site use mmu_notifier_invalidate_range_only_end() which
		 * is safe to do when we know that a call to invalidate_range()
		 * already happen under page table lock.
235
		 */
236
		if (!only_end && mn->ops->invalidate_range)
237
			mn->ops->invalidate_range(mn, mm, start, end);
A
Andrea Arcangeli 已提交
238
		if (mn->ops->invalidate_range_end)
239
			mn->ops->invalidate_range_end(mn, range);
A
Andrea Arcangeli 已提交
240
	}
241
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
242
}
243
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
A
Andrea Arcangeli 已提交
244

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
void __mmu_notifier_invalidate_range(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	int id;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range)
			mn->ops->invalidate_range(mn, mm, start, end);
	}
	srcu_read_unlock(&srcu, id);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);

A
Andrea Arcangeli 已提交
260 261 262 263 264 265 266 267 268
static int do_mmu_notifier_register(struct mmu_notifier *mn,
				    struct mm_struct *mm,
				    int take_mmap_sem)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	int ret;

	BUG_ON(atomic_read(&mm->mm_users) <= 0);

269 270 271 272 273
	ret = -ENOMEM;
	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
		goto out;

A
Andrea Arcangeli 已提交
274 275 276 277
	if (take_mmap_sem)
		down_write(&mm->mmap_sem);
	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
278
		goto out_clean;
A
Andrea Arcangeli 已提交
279 280 281 282

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
283

A
Andrea Arcangeli 已提交
284
		mm->mmu_notifier_mm = mmu_notifier_mm;
285
		mmu_notifier_mm = NULL;
A
Andrea Arcangeli 已提交
286
	}
V
Vegard Nossum 已提交
287
	mmgrab(mm);
A
Andrea Arcangeli 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301

	/*
	 * Serialize the update against mmu_notifier_unregister. A
	 * side note: mmu_notifier_release can't run concurrently with
	 * us because we hold the mm_users pin (either implicitly as
	 * current->mm or explicitly with get_task_mm() or similar).
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
302
out_clean:
A
Andrea Arcangeli 已提交
303 304
	if (take_mmap_sem)
		up_write(&mm->mmap_sem);
305 306
	kfree(mmu_notifier_mm);
out:
A
Andrea Arcangeli 已提交
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}

/*
 * Must not hold mmap_sem nor any other VM related lock when calling
 * this registration function. Must also ensure mm_users can't go down
 * to zero while this runs to avoid races with mmu_notifier_release,
 * so mm has to be current->mm or the mm should be pinned safely such
 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 * pin should be released by calling mmput after mmu_notifier_register
 * returns. mmu_notifier_unregister must be always called to
 * unregister the notifier. mm_count is automatically pinned to allow
 * mmu_notifier_unregister to safely run at any time later, before or
 * after exit_mmap. ->release will always be called before exit_mmap
 * frees the pages.
 */
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 1);
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);

/*
 * Same as mmu_notifier_register but here the caller must hold the
 * mmap_sem in write mode.
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 0);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);

/* this is called after the last mmu_notifier_unregister() returned */
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
	kfree(mm->mmu_notifier_mm);
	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}

/*
 * This releases the mm_count pin automatically and frees the mm
 * structure if it was the last user of it. It serializes against
351 352
 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 * with the unregister lock + SRCU. All sptes must be dropped before
A
Andrea Arcangeli 已提交
353 354 355 356 357 358 359 360 361 362
 * calling mmu_notifier_unregister. ->release or any other notifier
 * method may be invoked concurrently with mmu_notifier_unregister,
 * and only after mmu_notifier_unregister returned we're guaranteed
 * that ->release or any other method can't run anymore.
 */
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	if (!hlist_unhashed(&mn->hlist)) {
363 364 365 366
		/*
		 * SRCU here will force exit_mmap to wait for ->release to
		 * finish before freeing the pages.
		 */
367
		int id;
368

369
		id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
370
		/*
371 372
		 * exit_mmap will block in mmu_notifier_release to guarantee
		 * that ->release is called before freeing the pages.
A
Andrea Arcangeli 已提交
373 374 375
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
376
		srcu_read_unlock(&srcu, id);
377

378
		spin_lock(&mm->mmu_notifier_mm->lock);
379
		/*
380 381
		 * Can not use list_del_rcu() since __mmu_notifier_release
		 * can delete it before we hold the lock.
382
		 */
383
		hlist_del_init_rcu(&mn->hlist);
A
Andrea Arcangeli 已提交
384
		spin_unlock(&mm->mmu_notifier_mm->lock);
385
	}
A
Andrea Arcangeli 已提交
386 387

	/*
388
	 * Wait for any running method to finish, of course including
G
Geert Uytterhoeven 已提交
389
	 * ->release if it was run by mmu_notifier_release instead of us.
A
Andrea Arcangeli 已提交
390
	 */
391
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
392 393 394 395 396 397

	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
398

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
/*
 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
 */
void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
					struct mm_struct *mm)
{
	spin_lock(&mm->mmu_notifier_mm->lock);
	/*
	 * Can not use list_del_rcu() since __mmu_notifier_release
	 * can delete it before we hold the lock.
	 */
	hlist_del_init_rcu(&mn->hlist);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	BUG_ON(atomic_read(&mm->mm_count) <= 0);
	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);