mmu_notifier.c 12.2 KB
Newer Older
A
Andrea Arcangeli 已提交
1 2 3 4 5
/*
 *  linux/mm/mmu_notifier.c
 *
 *  Copyright (C) 2008  Qumranet, Inc.
 *  Copyright (C) 2008  SGI
6
 *             Christoph Lameter <cl@linux.com>
A
Andrea Arcangeli 已提交
7 8 9 10 11 12 13
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */

#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
14
#include <linux/export.h>
A
Andrea Arcangeli 已提交
15 16
#include <linux/mm.h>
#include <linux/err.h>
17
#include <linux/srcu.h>
A
Andrea Arcangeli 已提交
18 19
#include <linux/rcupdate.h>
#include <linux/sched.h>
20
#include <linux/sched/mm.h>
21
#include <linux/slab.h>
A
Andrea Arcangeli 已提交
22

23
/* global SRCU for all MMs */
24
DEFINE_STATIC_SRCU(srcu);
25

26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * This function allows mmu_notifier::release callback to delay a call to
 * a function that will free appropriate resources. The function must be
 * quick and must not block.
 */
void mmu_notifier_call_srcu(struct rcu_head *rcu,
			    void (*func)(struct rcu_head *rcu))
{
	call_srcu(&srcu, rcu, func);
}
EXPORT_SYMBOL_GPL(mmu_notifier_call_srcu);

void mmu_notifier_synchronize(void)
{
	/* Wait for any running method to finish. */
	srcu_barrier(&srcu);
}
EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);

A
Andrea Arcangeli 已提交
45 46 47 48 49 50 51
/*
 * This function can't run concurrently against mmu_notifier_register
 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 * in parallel despite there being no task using this mm any more,
 * through the vmas outside of the exit_mmap context, such as with
 * vmtruncate. This serializes against mmu_notifier_unregister with
52 53
 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
A
Andrea Arcangeli 已提交
54 55 56 57 58 59
 * can't go away from under us as exit_mmap holds an mm_count pin
 * itself.
 */
void __mmu_notifier_release(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
60
	int id;
61 62

	/*
63 64
	 * SRCU here will block mmu_notifier_unregister until
	 * ->release returns.
65
	 */
66
	id = srcu_read_lock(&srcu);
67 68 69 70 71 72 73 74 75 76
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
		/*
		 * If ->release runs before mmu_notifier_unregister it must be
		 * handled, as it's the only way for the driver to flush all
		 * existing sptes and stop the driver from establishing any more
		 * sptes before all the pages in the mm are freed.
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);

A
Andrea Arcangeli 已提交
77 78 79 80 81 82
	spin_lock(&mm->mmu_notifier_mm->lock);
	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
				 struct mmu_notifier,
				 hlist);
		/*
83 84 85 86
		 * We arrived before mmu_notifier_unregister so
		 * mmu_notifier_unregister will do nothing other than to wait
		 * for ->release to finish and for mmu_notifier_unregister to
		 * return.
A
Andrea Arcangeli 已提交
87 88 89 90
		 */
		hlist_del_init_rcu(&mn->hlist);
	}
	spin_unlock(&mm->mmu_notifier_mm->lock);
91
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
92 93

	/*
94 95 96 97 98 99 100
	 * synchronize_srcu here prevents mmu_notifier_release from returning to
	 * exit_mmap (which would proceed with freeing all pages in the mm)
	 * until the ->release method returns, if it was invoked by
	 * mmu_notifier_unregister.
	 *
	 * The mmu_notifier_mm can't go away from under us because one mm_count
	 * is held by exit_mmap.
A
Andrea Arcangeli 已提交
101
	 */
102
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
103 104 105 106 107 108 109 110
}

/*
 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 * unmap the address and return 1 or 0 depending if the mapping previously
 * existed or not.
 */
int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
A
Andres Lagar-Cavilla 已提交
111 112
					unsigned long start,
					unsigned long end)
A
Andrea Arcangeli 已提交
113 114
{
	struct mmu_notifier *mn;
115
	int young = 0, id;
A
Andrea Arcangeli 已提交
116

117
	id = srcu_read_lock(&srcu);
118
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
119
		if (mn->ops->clear_flush_young)
A
Andres Lagar-Cavilla 已提交
120
			young |= mn->ops->clear_flush_young(mn, mm, start, end);
A
Andrea Arcangeli 已提交
121
	}
122
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
123 124 125 126

	return young;
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
int __mmu_notifier_clear_young(struct mm_struct *mm,
			       unsigned long start,
			       unsigned long end)
{
	struct mmu_notifier *mn;
	int young = 0, id;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->clear_young)
			young |= mn->ops->clear_young(mn, mm, start, end);
	}
	srcu_read_unlock(&srcu, id);

	return young;
}

A
Andrea Arcangeli 已提交
144 145 146 147
int __mmu_notifier_test_young(struct mm_struct *mm,
			      unsigned long address)
{
	struct mmu_notifier *mn;
148
	int young = 0, id;
A
Andrea Arcangeli 已提交
149

150
	id = srcu_read_lock(&srcu);
151
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
152 153 154 155 156 157
		if (mn->ops->test_young) {
			young = mn->ops->test_young(mn, mm, address);
			if (young)
				break;
		}
	}
158
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
159 160 161 162

	return young;
}

163 164 165 166
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
			       pte_t pte)
{
	struct mmu_notifier *mn;
167
	int id;
168

169
	id = srcu_read_lock(&srcu);
170
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
171 172 173
		if (mn->ops->change_pte)
			mn->ops->change_pte(mn, mm, address, pte);
	}
174
	srcu_read_unlock(&srcu, id);
175 176
}

A
Andrea Arcangeli 已提交
177 178 179 180
void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
181
	int id;
A
Andrea Arcangeli 已提交
182

183
	id = srcu_read_lock(&srcu);
184
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
A
Andrea Arcangeli 已提交
185 186 187
		if (mn->ops->invalidate_range_start)
			mn->ops->invalidate_range_start(mn, mm, start, end);
	}
188
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
189
}
190
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_start);
A
Andrea Arcangeli 已提交
191 192

void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
193 194 195
					 unsigned long start,
					 unsigned long end,
					 bool only_end)
A
Andrea Arcangeli 已提交
196 197
{
	struct mmu_notifier *mn;
198
	int id;
A
Andrea Arcangeli 已提交
199

200
	id = srcu_read_lock(&srcu);
201
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
202 203 204 205 206 207 208
		/*
		 * Call invalidate_range here too to avoid the need for the
		 * subsystem of having to register an invalidate_range_end
		 * call-back when there is invalidate_range already. Usually a
		 * subsystem registers either invalidate_range_start()/end() or
		 * invalidate_range(), so this will be no additional overhead
		 * (besides the pointer check).
209 210 211 212 213
		 *
		 * We skip call to invalidate_range() if we know it is safe ie
		 * call site use mmu_notifier_invalidate_range_only_end() which
		 * is safe to do when we know that a call to invalidate_range()
		 * already happen under page table lock.
214
		 */
215
		if (!only_end && mn->ops->invalidate_range)
216
			mn->ops->invalidate_range(mn, mm, start, end);
A
Andrea Arcangeli 已提交
217 218 219
		if (mn->ops->invalidate_range_end)
			mn->ops->invalidate_range_end(mn, mm, start, end);
	}
220
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
221
}
222
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
A
Andrea Arcangeli 已提交
223

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
void __mmu_notifier_invalidate_range(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	int id;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range)
			mn->ops->invalidate_range(mn, mm, start, end);
	}
	srcu_read_unlock(&srcu, id);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);

239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
/*
 * Must be called while holding mm->mmap_sem for either read or write.
 * The result is guaranteed to be valid until mm->mmap_sem is dropped.
 */
bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
	int id;
	bool ret = false;

	WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));

	if (!mm_has_notifiers(mm))
		return ret;

	id = srcu_read_lock(&srcu);
	hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
		if (!mn->ops->invalidate_range &&
		    !mn->ops->invalidate_range_start &&
		    !mn->ops->invalidate_range_end)
				continue;

		if (!(mn->ops->flags & MMU_INVALIDATE_DOES_NOT_BLOCK)) {
			ret = true;
			break;
		}
	}
	srcu_read_unlock(&srcu, id);
	return ret;
}

A
Andrea Arcangeli 已提交
270 271 272 273 274 275 276 277 278
static int do_mmu_notifier_register(struct mmu_notifier *mn,
				    struct mm_struct *mm,
				    int take_mmap_sem)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	int ret;

	BUG_ON(atomic_read(&mm->mm_users) <= 0);

279 280 281 282 283
	ret = -ENOMEM;
	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
		goto out;

A
Andrea Arcangeli 已提交
284 285 286 287
	if (take_mmap_sem)
		down_write(&mm->mmap_sem);
	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
288
		goto out_clean;
A
Andrea Arcangeli 已提交
289 290 291 292

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
293

A
Andrea Arcangeli 已提交
294
		mm->mmu_notifier_mm = mmu_notifier_mm;
295
		mmu_notifier_mm = NULL;
A
Andrea Arcangeli 已提交
296
	}
V
Vegard Nossum 已提交
297
	mmgrab(mm);
A
Andrea Arcangeli 已提交
298 299 300 301 302 303 304 305 306 307 308 309 310 311

	/*
	 * Serialize the update against mmu_notifier_unregister. A
	 * side note: mmu_notifier_release can't run concurrently with
	 * us because we hold the mm_users pin (either implicitly as
	 * current->mm or explicitly with get_task_mm() or similar).
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
312
out_clean:
A
Andrea Arcangeli 已提交
313 314
	if (take_mmap_sem)
		up_write(&mm->mmap_sem);
315 316
	kfree(mmu_notifier_mm);
out:
A
Andrea Arcangeli 已提交
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}

/*
 * Must not hold mmap_sem nor any other VM related lock when calling
 * this registration function. Must also ensure mm_users can't go down
 * to zero while this runs to avoid races with mmu_notifier_release,
 * so mm has to be current->mm or the mm should be pinned safely such
 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 * pin should be released by calling mmput after mmu_notifier_register
 * returns. mmu_notifier_unregister must be always called to
 * unregister the notifier. mm_count is automatically pinned to allow
 * mmu_notifier_unregister to safely run at any time later, before or
 * after exit_mmap. ->release will always be called before exit_mmap
 * frees the pages.
 */
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 1);
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);

/*
 * Same as mmu_notifier_register but here the caller must hold the
 * mmap_sem in write mode.
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 0);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);

/* this is called after the last mmu_notifier_unregister() returned */
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
	kfree(mm->mmu_notifier_mm);
	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}

/*
 * This releases the mm_count pin automatically and frees the mm
 * structure if it was the last user of it. It serializes against
361 362
 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 * with the unregister lock + SRCU. All sptes must be dropped before
A
Andrea Arcangeli 已提交
363 364 365 366 367 368 369 370 371 372
 * calling mmu_notifier_unregister. ->release or any other notifier
 * method may be invoked concurrently with mmu_notifier_unregister,
 * and only after mmu_notifier_unregister returned we're guaranteed
 * that ->release or any other method can't run anymore.
 */
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	if (!hlist_unhashed(&mn->hlist)) {
373 374 375 376
		/*
		 * SRCU here will force exit_mmap to wait for ->release to
		 * finish before freeing the pages.
		 */
377
		int id;
378

379
		id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
380
		/*
381 382
		 * exit_mmap will block in mmu_notifier_release to guarantee
		 * that ->release is called before freeing the pages.
A
Andrea Arcangeli 已提交
383 384 385
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);
386
		srcu_read_unlock(&srcu, id);
387

388
		spin_lock(&mm->mmu_notifier_mm->lock);
389
		/*
390 391
		 * Can not use list_del_rcu() since __mmu_notifier_release
		 * can delete it before we hold the lock.
392
		 */
393
		hlist_del_init_rcu(&mn->hlist);
A
Andrea Arcangeli 已提交
394
		spin_unlock(&mm->mmu_notifier_mm->lock);
395
	}
A
Andrea Arcangeli 已提交
396 397

	/*
398
	 * Wait for any running method to finish, of course including
G
Geert Uytterhoeven 已提交
399
	 * ->release if it was run by mmu_notifier_release instead of us.
A
Andrea Arcangeli 已提交
400
	 */
401
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
402 403 404 405 406 407

	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
408

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
/*
 * Same as mmu_notifier_unregister but no callback and no srcu synchronization.
 */
void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
					struct mm_struct *mm)
{
	spin_lock(&mm->mmu_notifier_mm->lock);
	/*
	 * Can not use list_del_rcu() since __mmu_notifier_release
	 * can delete it before we hold the lock.
	 */
	hlist_del_init_rcu(&mn->hlist);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	BUG_ON(atomic_read(&mm->mm_count) <= 0);
	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);