mmu_notifier.c 9.3 KB
Newer Older
A
Andrea Arcangeli 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  linux/mm/mmu_notifier.c
 *
 *  Copyright (C) 2008  Qumranet, Inc.
 *  Copyright (C) 2008  SGI
 *             Christoph Lameter <clameter@sgi.com>
 *
 *  This work is licensed under the terms of the GNU GPL, version 2. See
 *  the COPYING file in the top-level directory.
 */

#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
14
#include <linux/export.h>
A
Andrea Arcangeli 已提交
15 16
#include <linux/mm.h>
#include <linux/err.h>
17
#include <linux/srcu.h>
A
Andrea Arcangeli 已提交
18 19
#include <linux/rcupdate.h>
#include <linux/sched.h>
20
#include <linux/slab.h>
A
Andrea Arcangeli 已提交
21

22
/* global SRCU for all MMs */
23
static struct srcu_struct srcu;
24

A
Andrea Arcangeli 已提交
25 26 27 28 29 30 31
/*
 * This function can't run concurrently against mmu_notifier_register
 * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap
 * runs with mm_users == 0. Other tasks may still invoke mmu notifiers
 * in parallel despite there being no task using this mm any more,
 * through the vmas outside of the exit_mmap context, such as with
 * vmtruncate. This serializes against mmu_notifier_unregister with
32 33
 * the mmu_notifier_mm->lock in addition to SRCU and it serializes
 * against the other mmu notifiers with SRCU. struct mmu_notifier_mm
A
Andrea Arcangeli 已提交
34 35 36 37 38 39
 * can't go away from under us as exit_mmap holds an mm_count pin
 * itself.
 */
void __mmu_notifier_release(struct mm_struct *mm)
{
	struct mmu_notifier *mn;
40
	int id;
41 42

	/*
43 44 45 46
	 * srcu_read_lock() here will block synchronize_srcu() in
	 * mmu_notifier_unregister() until all registered
	 * ->release() callouts this function makes have
	 * returned.
47
	 */
48
	id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
49 50 51 52 53
	spin_lock(&mm->mmu_notifier_mm->lock);
	while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
		mn = hlist_entry(mm->mmu_notifier_mm->list.first,
				 struct mmu_notifier,
				 hlist);
54

A
Andrea Arcangeli 已提交
55
		/*
56 57
		 * Unlink.  This will prevent mmu_notifier_unregister()
		 * from also making the ->release() callout.
A
Andrea Arcangeli 已提交
58 59
		 */
		hlist_del_init_rcu(&mn->hlist);
60 61 62 63 64 65 66 67 68
		spin_unlock(&mm->mmu_notifier_mm->lock);

		/*
		 * Clear sptes. (see 'release' description in mmu_notifier.h)
		 */
		if (mn->ops->release)
			mn->ops->release(mn, mm);

		spin_lock(&mm->mmu_notifier_mm->lock);
A
Andrea Arcangeli 已提交
69 70 71 72
	}
	spin_unlock(&mm->mmu_notifier_mm->lock);

	/*
73 74 75 76 77 78 79 80 81 82 83 84
	 * All callouts to ->release() which we have done are complete.
	 * Allow synchronize_srcu() in mmu_notifier_unregister() to complete
	 */
	srcu_read_unlock(&srcu, id);

	/*
	 * mmu_notifier_unregister() may have unlinked a notifier and may
	 * still be calling out to it.	Additionally, other notifiers
	 * may have been active via vmtruncate() et. al. Block here
	 * to ensure that all notifier callouts for this mm have been
	 * completed and the sptes are really cleaned up before returning
	 * to exit_mmap().
A
Andrea Arcangeli 已提交
85
	 */
86
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
87 88 89 90 91 92 93 94 95 96 97 98
}

/*
 * If no young bitflag is supported by the hardware, ->clear_flush_young can
 * unmap the address and return 1 or 0 depending if the mapping previously
 * existed or not.
 */
int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
					unsigned long address)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
99
	int young = 0, id;
A
Andrea Arcangeli 已提交
100

101
	id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
102 103 104 105
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->clear_flush_young)
			young |= mn->ops->clear_flush_young(mn, mm, address);
	}
106
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
107 108 109 110

	return young;
}

A
Andrea Arcangeli 已提交
111 112 113 114 115
int __mmu_notifier_test_young(struct mm_struct *mm,
			      unsigned long address)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
116
	int young = 0, id;
A
Andrea Arcangeli 已提交
117

118
	id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
119 120 121 122 123 124 125
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->test_young) {
			young = mn->ops->test_young(mn, mm, address);
			if (young)
				break;
		}
	}
126
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
127 128 129 130

	return young;
}

131 132 133 134 135
void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
			       pte_t pte)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
136
	int id;
137

138
	id = srcu_read_lock(&srcu);
139 140 141 142
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->change_pte)
			mn->ops->change_pte(mn, mm, address, pte);
	}
143
	srcu_read_unlock(&srcu, id);
144 145
}

A
Andrea Arcangeli 已提交
146 147 148 149 150
void __mmu_notifier_invalidate_page(struct mm_struct *mm,
					  unsigned long address)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
151
	int id;
A
Andrea Arcangeli 已提交
152

153
	id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
154 155 156 157
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_page)
			mn->ops->invalidate_page(mn, mm, address);
	}
158
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
159 160 161 162 163 164 165
}

void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
166
	int id;
A
Andrea Arcangeli 已提交
167

168
	id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
169 170 171 172
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range_start)
			mn->ops->invalidate_range_start(mn, mm, start, end);
	}
173
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
174 175 176 177 178 179 180
}

void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
				  unsigned long start, unsigned long end)
{
	struct mmu_notifier *mn;
	struct hlist_node *n;
181
	int id;
A
Andrea Arcangeli 已提交
182

183
	id = srcu_read_lock(&srcu);
A
Andrea Arcangeli 已提交
184 185 186 187
	hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) {
		if (mn->ops->invalidate_range_end)
			mn->ops->invalidate_range_end(mn, mm, start, end);
	}
188
	srcu_read_unlock(&srcu, id);
A
Andrea Arcangeli 已提交
189 190 191 192 193 194 195 196 197 198 199
}

static int do_mmu_notifier_register(struct mmu_notifier *mn,
				    struct mm_struct *mm,
				    int take_mmap_sem)
{
	struct mmu_notifier_mm *mmu_notifier_mm;
	int ret;

	BUG_ON(atomic_read(&mm->mm_users) <= 0);

200
	/*
201 202 203
	 * Verify that mmu_notifier_init() already run and the global srcu is
	 * initialized.
	 */
204 205
	BUG_ON(!srcu.per_cpu_ref);

206 207 208 209 210
	ret = -ENOMEM;
	mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
	if (unlikely(!mmu_notifier_mm))
		goto out;

A
Andrea Arcangeli 已提交
211 212 213 214
	if (take_mmap_sem)
		down_write(&mm->mmap_sem);
	ret = mm_take_all_locks(mm);
	if (unlikely(ret))
215
		goto out_clean;
A
Andrea Arcangeli 已提交
216 217 218 219

	if (!mm_has_notifiers(mm)) {
		INIT_HLIST_HEAD(&mmu_notifier_mm->list);
		spin_lock_init(&mmu_notifier_mm->lock);
220

A
Andrea Arcangeli 已提交
221
		mm->mmu_notifier_mm = mmu_notifier_mm;
222
		mmu_notifier_mm = NULL;
A
Andrea Arcangeli 已提交
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
	}
	atomic_inc(&mm->mm_count);

	/*
	 * Serialize the update against mmu_notifier_unregister. A
	 * side note: mmu_notifier_release can't run concurrently with
	 * us because we hold the mm_users pin (either implicitly as
	 * current->mm or explicitly with get_task_mm() or similar).
	 * We can't race against any other mmu notifier method either
	 * thanks to mm_take_all_locks().
	 */
	spin_lock(&mm->mmu_notifier_mm->lock);
	hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
	spin_unlock(&mm->mmu_notifier_mm->lock);

	mm_drop_all_locks(mm);
239
out_clean:
A
Andrea Arcangeli 已提交
240 241
	if (take_mmap_sem)
		up_write(&mm->mmap_sem);
242 243
	kfree(mmu_notifier_mm);
out:
A
Andrea Arcangeli 已提交
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
	BUG_ON(atomic_read(&mm->mm_users) <= 0);
	return ret;
}

/*
 * Must not hold mmap_sem nor any other VM related lock when calling
 * this registration function. Must also ensure mm_users can't go down
 * to zero while this runs to avoid races with mmu_notifier_release,
 * so mm has to be current->mm or the mm should be pinned safely such
 * as with get_task_mm(). If the mm is not current->mm, the mm_users
 * pin should be released by calling mmput after mmu_notifier_register
 * returns. mmu_notifier_unregister must be always called to
 * unregister the notifier. mm_count is automatically pinned to allow
 * mmu_notifier_unregister to safely run at any time later, before or
 * after exit_mmap. ->release will always be called before exit_mmap
 * frees the pages.
 */
int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 1);
}
EXPORT_SYMBOL_GPL(mmu_notifier_register);

/*
 * Same as mmu_notifier_register but here the caller must hold the
 * mmap_sem in write mode.
 */
int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm)
{
	return do_mmu_notifier_register(mn, mm, 0);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_register);

/* this is called after the last mmu_notifier_unregister() returned */
void __mmu_notifier_mm_destroy(struct mm_struct *mm)
{
	BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list));
	kfree(mm->mmu_notifier_mm);
	mm->mmu_notifier_mm = LIST_POISON1; /* debug */
}

/*
 * This releases the mm_count pin automatically and frees the mm
 * structure if it was the last user of it. It serializes against
288 289
 * running mmu notifiers with SRCU and against mmu_notifier_unregister
 * with the unregister lock + SRCU. All sptes must be dropped before
A
Andrea Arcangeli 已提交
290 291 292 293 294 295 296 297 298
 * calling mmu_notifier_unregister. ->release or any other notifier
 * method may be invoked concurrently with mmu_notifier_unregister,
 * and only after mmu_notifier_unregister returned we're guaranteed
 * that ->release or any other method can't run anymore.
 */
void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
	BUG_ON(atomic_read(&mm->mm_count) <= 0);

299
	spin_lock(&mm->mmu_notifier_mm->lock);
A
Andrea Arcangeli 已提交
300
	if (!hlist_unhashed(&mn->hlist)) {
301
		int id;
302

A
Andrea Arcangeli 已提交
303
		/*
304
		 * Ensure we synchronize up with __mmu_notifier_release().
A
Andrea Arcangeli 已提交
305
		 */
306 307 308 309 310
		id = srcu_read_lock(&srcu);

		hlist_del_rcu(&mn->hlist);
		spin_unlock(&mm->mmu_notifier_mm->lock);

A
Andrea Arcangeli 已提交
311 312
		if (mn->ops->release)
			mn->ops->release(mn, mm);
313

314 315 316 317 318
		/*
		 * Allow __mmu_notifier_release() to complete.
		 */
		srcu_read_unlock(&srcu, id);
	} else
A
Andrea Arcangeli 已提交
319 320 321
		spin_unlock(&mm->mmu_notifier_mm->lock);

	/*
322 323
	 * Wait for any running method to finish, including ->release() if it
	 * was run by __mmu_notifier_release() instead of us.
A
Andrea Arcangeli 已提交
324
	 */
325
	synchronize_srcu(&srcu);
A
Andrea Arcangeli 已提交
326 327 328 329 330 331

	BUG_ON(atomic_read(&mm->mm_count) <= 0);

	mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister);
332 333 334 335 336 337 338

static int __init mmu_notifier_init(void)
{
	return init_srcu_struct(&srcu);
}

module_init(mmu_notifier_init);