kmmio.c 14.3 KB
Newer Older
1 2 3 4 5 6 7
/* Support for MMIO probes.
 * Benfit many code from kprobes
 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
 *     2007 Alexander Eichner
 *     2008 Pekka Paalanen <pq@iki.fi>
 */

8
#include <linux/list.h>
9
#include <linux/rculist.h>
10 11 12 13 14 15 16 17
#include <linux/spinlock.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
18
#include <linux/percpu.h>
19
#include <linux/kdebug.h>
P
Pekka Paalanen 已提交
20
#include <linux/mutex.h>
P
Pekka Paalanen 已提交
21
#include <linux/io.h>
22 23
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
P
Pekka Paalanen 已提交
24
#include <linux/errno.h>
P
Pekka Paalanen 已提交
25
#include <asm/debugreg.h>
26
#include <linux/mmiotrace.h>
27 28 29 30

#define KMMIO_PAGE_HASH_BITS 4
#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)

31 32 33 34
struct kmmio_fault_page {
	struct list_head list;
	struct kmmio_fault_page *release_next;
	unsigned long page; /* location of the fault page */
35 36
	bool old_presence; /* page presence prior to arming */
	bool armed;
37 38 39 40 41 42 43 44 45 46 47 48 49 50

	/*
	 * Number of times this page has been registered as a part
	 * of a probe. If zero, page is disarmed and this may be freed.
	 * Used only by writers (RCU).
	 */
	int count;
};

struct kmmio_delayed_release {
	struct rcu_head rcu;
	struct kmmio_fault_page *release_list;
};

51 52 53 54
struct kmmio_context {
	struct kmmio_fault_page *fpage;
	struct kmmio_probe *probe;
	unsigned long saved_flags;
55
	unsigned long addr;
56 57 58 59 60
	int active;
};

static DEFINE_SPINLOCK(kmmio_lock);

P
Pekka Paalanen 已提交
61
/* Protected by kmmio_lock */
62
unsigned int kmmio_count;
63 64

/* Read-protected by RCU, write-protected by kmmio_lock. */
65 66 67
static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);

68 69 70 71 72
static struct list_head *kmmio_page_list(unsigned long page)
{
	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
}

73 74
/* Accessed per-cpu */
static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
75 76 77 78 79 80 81 82 83

/*
 * this is basically a dynamic stabbing problem:
 * Could use the existing prio tree code or
 * Possible better implementations:
 * The Interval Skip List: A Data Structure for Finding All Intervals That
 * Overlap a Point (might be simple)
 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
 */
84
/* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
85 86 87
static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
{
	struct kmmio_probe *p;
88
	list_for_each_entry_rcu(p, &kmmio_probes, list) {
89 90 91 92 93 94
		if (addr >= p->addr && addr <= (p->addr + p->len))
			return p;
	}
	return NULL;
}

95
/* You must be holding RCU read lock. */
96 97
static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
{
98 99
	struct list_head *head;
	struct kmmio_fault_page *p;
100 101

	page &= PAGE_MASK;
102 103
	head = kmmio_page_list(page);
	list_for_each_entry_rcu(p, head, list) {
104 105 106 107 108 109
		if (p->page == page)
			return p;
	}
	return NULL;
}

110
static int set_page_presence(unsigned long addr, bool present, bool *old)
111
{
P
Pekka Paalanen 已提交
112 113
	pteval_t pteval;
	pmdval_t pmdval;
114
	unsigned int level;
P
Pekka Paalanen 已提交
115 116
	pmd_t *pmd;
	pte_t *pte = lookup_address(addr, &level);
117

118
	if (!pte) {
P
Pekka Paalanen 已提交
119
		pr_err("kmmio: no pte for page 0x%08lx\n", addr);
120
		return -1;
121 122
	}

P
Pekka Paalanen 已提交
123 124 125
	switch (level) {
	case PG_LEVEL_2M:
		pmd = (pmd_t *)pte;
126 127 128
		pmdval = pmd_val(*pmd);
		*old = !!(pmdval & _PAGE_PRESENT);
		pmdval &= ~_PAGE_PRESENT;
P
Pekka Paalanen 已提交
129 130 131 132 133 134
		if (present)
			pmdval |= _PAGE_PRESENT;
		set_pmd(pmd, __pmd(pmdval));
		break;

	case PG_LEVEL_4K:
135 136 137
		pteval = pte_val(*pte);
		*old = !!(pteval & _PAGE_PRESENT);
		pteval &= ~_PAGE_PRESENT;
P
Pekka Paalanen 已提交
138 139 140 141 142 143 144
		if (present)
			pteval |= _PAGE_PRESENT;
		set_pte_atomic(pte, __pte(pteval));
		break;

	default:
		pr_err("kmmio: unexpected page level 0x%x.\n", level);
145
		return -1;
146 147
	}

P
Pekka Paalanen 已提交
148
	__flush_tlb_one(addr);
149 150

	return 0;
P
Pekka Paalanen 已提交
151
}
152

153 154 155 156 157 158 159 160 161 162 163 164
/*
 * Mark the given page as not present. Access to it will trigger a fault.
 *
 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
 * protection is ignored here. RCU read lock is assumed held, so the struct
 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
 * that double arming the same virtual address (page) cannot occur.
 *
 * Double disarming on the other hand is allowed, and may occur when a fault
 * and mmiotrace shutdown happen simultaneously.
 */
static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
P
Pekka Paalanen 已提交
165
{
166 167 168 169 170 171 172 173 174
	int ret;
	WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
	if (f->armed) {
		pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
					f->page, f->count, f->old_presence);
	}
	ret = set_page_presence(f->page, false, &f->old_presence);
	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
	f->armed = true;
175
	return ret;
176 177
}

178 179
/** Restore the given page to saved presence state. */
static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
180
{
181 182 183 184 185
	bool tmp;
	int ret = set_page_presence(f->page, f->old_presence, &tmp);
	WARN_ONCE(ret < 0,
			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
	f->armed = false;
186 187
}

188 189 190 191 192 193 194 195 196 197 198
/*
 * This is being called from do_page_fault().
 *
 * We may be in an interrupt or a critical section. Also prefecthing may
 * trigger a page fault. We may be in the middle of process switch.
 * We cannot take any locks, because we could be executing especially
 * within a kmmio critical section.
 *
 * Local interrupts are disabled, so preemption cannot happen.
 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
 */
199 200 201 202
/*
 * Interrupts are disabled on entry as trap3 is an interrupt gate
 * and they remain disabled thorough out this function.
 */
203
int kmmio_handler(struct pt_regs *regs, unsigned long addr)
204
{
205 206
	struct kmmio_context *ctx;
	struct kmmio_fault_page *faultpage;
P
Pekka Paalanen 已提交
207
	int ret = 0; /* default to fault not handled */
208 209 210 211 212

	/*
	 * Preemption is now disabled to prevent process switch during
	 * single stepping. We can only handle one active kmmio trace
	 * per cpu, so ensure that we finish it before something else
P
Pekka Paalanen 已提交
213 214 215
	 * gets to run. We also hold the RCU read lock over single
	 * stepping to avoid looking up the probe and kmmio_fault_page
	 * again.
216 217
	 */
	preempt_disable();
218
	rcu_read_lock();
P
Pekka Paalanen 已提交
219

220 221 222 223 224
	faultpage = get_kmmio_fault_page(addr);
	if (!faultpage) {
		/*
		 * Either this page fault is not caused by kmmio, or
		 * another CPU just pulled the kmmio probe from under
P
Pekka Paalanen 已提交
225
		 * our feet. The latter case should not be possible.
226 227 228 229 230
		 */
		goto no_kmmio;
	}

	ctx = &get_cpu_var(kmmio_ctx);
231
	if (ctx->active) {
232
		disarm_kmmio_fault_page(faultpage);
P
Pekka Paalanen 已提交
233 234 235 236 237 238 239 240 241 242 243
		if (addr == ctx->addr) {
			/*
			 * On SMP we sometimes get recursive probe hits on the
			 * same address. Context is already saved, fall out.
			 */
			pr_debug("kmmio: duplicate probe hit on CPU %d, for "
						"address 0x%08lx.\n",
						smp_processor_id(), addr);
			ret = 1;
			goto no_kmmio_ctx;
		}
244
		/*
245
		 * Prevent overwriting already in-flight context.
P
Pekka Paalanen 已提交
246 247
		 * This should not happen, let's hope disarming at least
		 * prevents a panic.
248
		 */
249 250
		pr_emerg("kmmio: recursive probe hit on CPU %d, "
					"for address 0x%08lx. Ignoring.\n",
251
					smp_processor_id(), addr);
P
Pekka Paalanen 已提交
252 253
		pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
					ctx->addr);
254
		goto no_kmmio_ctx;
255 256 257
	}
	ctx->active++;

258
	ctx->fpage = faultpage;
259
	ctx->probe = get_kmmio_probe(addr);
I
Ingo Molnar 已提交
260
	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
261
	ctx->addr = addr;
262 263 264 265

	if (ctx->probe && ctx->probe->pre_handler)
		ctx->probe->pre_handler(ctx->probe, regs, addr);

P
Pekka Paalanen 已提交
266 267 268 269
	/*
	 * Enable single-stepping and disable interrupts for the faulting
	 * context. Local interrupts must not get enabled during stepping.
	 */
I
Ingo Molnar 已提交
270 271
	regs->flags |= X86_EFLAGS_TF;
	regs->flags &= ~X86_EFLAGS_IF;
272

273
	/* Now we set present bit in PTE and single step. */
274
	disarm_kmmio_fault_page(ctx->fpage);
275

P
Pekka Paalanen 已提交
276 277 278 279 280 281 282
	/*
	 * If another cpu accesses the same page while we are stepping,
	 * the access will not be caught. It will simply succeed and the
	 * only downside is we lose the event. If this becomes a problem,
	 * the user should drop to single cpu before tracing.
	 */

283
	put_cpu_var(kmmio_ctx);
P
Pekka Paalanen 已提交
284
	return 1; /* fault handled */
285

286 287
no_kmmio_ctx:
	put_cpu_var(kmmio_ctx);
288
no_kmmio:
289
	rcu_read_unlock();
290
	preempt_enable_no_resched();
P
Pekka Paalanen 已提交
291
	return ret;
292 293 294 295 296
}

/*
 * Interrupts are disabled on entry as trap1 is an interrupt gate
 * and they remain disabled thorough out this function.
297
 * This must always get called as the pair to kmmio_handler().
298 299 300
 */
static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
{
301 302
	int ret = 0;
	struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
303

P
Pekka Paalanen 已提交
304 305 306
	if (!ctx->active) {
		pr_debug("kmmio: spurious debug trap on CPU %d.\n",
							smp_processor_id());
307
		goto out;
P
Pekka Paalanen 已提交
308
	}
309 310 311 312

	if (ctx->probe && ctx->probe->post_handler)
		ctx->probe->post_handler(ctx->probe, condition, regs);

313
	arm_kmmio_fault_page(ctx->fpage);
314

I
Ingo Molnar 已提交
315
	regs->flags &= ~X86_EFLAGS_TF;
316 317 318 319
	regs->flags |= ctx->saved_flags;

	/* These were acquired in kmmio_handler(). */
	ctx->active--;
320
	BUG_ON(ctx->active);
P
Pekka Paalanen 已提交
321
	rcu_read_unlock();
322 323 324 325 326 327 328
	preempt_enable_no_resched();

	/*
	 * if somebody else is singlestepping across a probe point, flags
	 * will have TF set, in which case, continue the remaining processing
	 * of do_debug, as if this is not a probe hit.
	 */
I
Ingo Molnar 已提交
329
	if (!(regs->flags & X86_EFLAGS_TF))
330 331 332 333
		ret = 1;
out:
	put_cpu_var(kmmio_ctx);
	return ret;
334 335
}

336
/* You must be holding kmmio_lock. */
337 338 339 340 341 342 343
static int add_kmmio_fault_page(unsigned long page)
{
	struct kmmio_fault_page *f;

	page &= PAGE_MASK;
	f = get_kmmio_fault_page(page);
	if (f) {
344
		if (!f->count)
345
			arm_kmmio_fault_page(f);
346 347 348 349
		f->count++;
		return 0;
	}

350
	f = kzalloc(sizeof(*f), GFP_ATOMIC);
351 352 353 354 355 356
	if (!f)
		return -1;

	f->count = 1;
	f->page = page;

357
	if (arm_kmmio_fault_page(f)) {
358 359 360 361 362
		kfree(f);
		return -1;
	}

	list_add_rcu(&f->list, kmmio_page_list(f->page));
363 364 365 366

	return 0;
}

367 368 369
/* You must be holding kmmio_lock. */
static void release_kmmio_fault_page(unsigned long page,
				struct kmmio_fault_page **release_list)
370 371 372 373 374 375 376 377 378
{
	struct kmmio_fault_page *f;

	page &= PAGE_MASK;
	f = get_kmmio_fault_page(page);
	if (!f)
		return;

	f->count--;
379
	BUG_ON(f->count < 0);
380
	if (!f->count) {
381
		disarm_kmmio_fault_page(f);
382 383
		f->release_next = *release_list;
		*release_list = f;
384 385 386
	}
}

387 388 389 390 391 392 393
/*
 * With page-unaligned ioremaps, one or two armed pages may contain
 * addresses from outside the intended mapping. Events for these addresses
 * are currently silently dropped. The events may result only from programming
 * mistakes by accessing addresses before the beginning or past the end of a
 * mapping.
 */
394 395
int register_kmmio_probe(struct kmmio_probe *p)
{
P
Pekka Paalanen 已提交
396
	unsigned long flags;
397 398
	int ret = 0;
	unsigned long size = 0;
399
	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
400

P
Pekka Paalanen 已提交
401
	spin_lock_irqsave(&kmmio_lock, flags);
402 403 404 405
	if (get_kmmio_probe(p->addr)) {
		ret = -EEXIST;
		goto out;
	}
P
Pekka Paalanen 已提交
406
	kmmio_count++;
407
	list_add_rcu(&p->list, &kmmio_probes);
408
	while (size < size_lim) {
409
		if (add_kmmio_fault_page(p->addr + size))
410
			pr_err("kmmio: Unable to set page fault.\n");
411 412 413
		size += PAGE_SIZE;
	}
out:
P
Pekka Paalanen 已提交
414
	spin_unlock_irqrestore(&kmmio_lock, flags);
415 416 417
	/*
	 * XXX: What should I do here?
	 * Here was a call to global_flush_tlb(), but it does not exist
418
	 * anymore. It seems it's not needed after all.
419 420 421
	 */
	return ret;
}
422
EXPORT_SYMBOL(register_kmmio_probe);
423

424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
{
	struct kmmio_delayed_release *dr = container_of(
						head,
						struct kmmio_delayed_release,
						rcu);
	struct kmmio_fault_page *p = dr->release_list;
	while (p) {
		struct kmmio_fault_page *next = p->release_next;
		BUG_ON(p->count);
		kfree(p);
		p = next;
	}
	kfree(dr);
}

static void remove_kmmio_fault_pages(struct rcu_head *head)
{
	struct kmmio_delayed_release *dr = container_of(
						head,
						struct kmmio_delayed_release,
						rcu);
	struct kmmio_fault_page *p = dr->release_list;
	struct kmmio_fault_page **prevp = &dr->release_list;
	unsigned long flags;
	spin_lock_irqsave(&kmmio_lock, flags);
	while (p) {
		if (!p->count)
			list_del_rcu(&p->list);
		else
			*prevp = p->release_next;
		prevp = &p->release_next;
		p = p->release_next;
	}
	spin_unlock_irqrestore(&kmmio_lock, flags);
	/* This is the real RCU destroy call. */
	call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
}

/*
 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
P
Pekka Paalanen 已提交
465 466
 * sure that the callbacks will not be called anymore. Only after that
 * you may actually release your struct kmmio_probe.
467 468 469 470 471 472 473 474 475
 *
 * Unregistering a kmmio fault page has three steps:
 * 1. release_kmmio_fault_page()
 *    Disarm the page, wait a grace period to let all faults finish.
 * 2. remove_kmmio_fault_pages()
 *    Remove the pages from kmmio_page_table.
 * 3. rcu_free_kmmio_fault_pages()
 *    Actally free the kmmio_fault_page structs as with RCU.
 */
476 477
void unregister_kmmio_probe(struct kmmio_probe *p)
{
P
Pekka Paalanen 已提交
478
	unsigned long flags;
479
	unsigned long size = 0;
480
	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
481 482
	struct kmmio_fault_page *release_list = NULL;
	struct kmmio_delayed_release *drelease;
483

P
Pekka Paalanen 已提交
484
	spin_lock_irqsave(&kmmio_lock, flags);
485
	while (size < size_lim) {
486
		release_kmmio_fault_page(p->addr + size, &release_list);
487 488
		size += PAGE_SIZE;
	}
489
	list_del_rcu(&p->list);
490
	kmmio_count--;
P
Pekka Paalanen 已提交
491
	spin_unlock_irqrestore(&kmmio_lock, flags);
492

493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
	if (!drelease) {
		pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
		return;
	}
	drelease->release_list = release_list;

	/*
	 * This is not really RCU here. We have just disarmed a set of
	 * pages so that they cannot trigger page faults anymore. However,
	 * we cannot remove the pages from kmmio_page_table,
	 * because a probe hit might be in flight on another CPU. The
	 * pages are collected into a list, and they will be removed from
	 * kmmio_page_table when it is certain that no probe hit related to
	 * these pages can be in flight. RCU grace period sounds like a
	 * good choice.
	 *
	 * If we removed the pages too early, kmmio page fault handler might
	 * not find the respective kmmio_fault_page and determine it's not
	 * a kmmio fault, when it actually is. This would lead to madness.
	 */
	call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
515
}
516
EXPORT_SYMBOL(unregister_kmmio_probe);
517 518 519 520 521 522

static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
								void *args)
{
	struct die_args *arg = args;

P
Pekka Paalanen 已提交
523
	if (val == DIE_DEBUG && (arg->err & DR_STEP))
524 525 526 527 528
		if (post_kmmio_handler(arg->err, arg->regs) == 1)
			return NOTIFY_STOP;

	return NOTIFY_DONE;
}
P
Pekka Paalanen 已提交
529 530 531 532 533 534 535 536 537 538 539 540 541

static struct notifier_block nb_die = {
	.notifier_call = kmmio_die_notifier
};

static int __init init_kmmio(void)
{
	int i;
	for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
		INIT_LIST_HEAD(&kmmio_page_table[i]);
	return register_die_notifier(&nb_die);
}
fs_initcall(init_kmmio); /* should be before device_initcall() */