kmmio.c 14.7 KB
Newer Older
1 2 3 4 5 6 7
/* Support for MMIO probes.
 * Benfit many code from kprobes
 * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
 *     2007 Alexander Eichner
 *     2008 Pekka Paalanen <pq@iki.fi>
 */

8
#include <linux/list.h>
9
#include <linux/rculist.h>
10 11 12 13 14 15 16 17
#include <linux/spinlock.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
#include <linux/preempt.h>
18
#include <linux/percpu.h>
19
#include <linux/kdebug.h>
P
Pekka Paalanen 已提交
20
#include <linux/mutex.h>
P
Pekka Paalanen 已提交
21
#include <linux/io.h>
22 23
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
P
Pekka Paalanen 已提交
24
#include <linux/errno.h>
P
Pekka Paalanen 已提交
25
#include <asm/debugreg.h>
26
#include <linux/mmiotrace.h>
27 28 29 30

#define KMMIO_PAGE_HASH_BITS 4
#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)

31 32 33 34
struct kmmio_fault_page {
	struct list_head list;
	struct kmmio_fault_page *release_next;
	unsigned long page; /* location of the fault page */
35 36
	bool old_presence; /* page presence prior to arming */
	bool armed;
37 38 39 40

	/*
	 * Number of times this page has been registered as a part
	 * of a probe. If zero, page is disarmed and this may be freed.
41 42
	 * Used only by writers (RCU) and post_kmmio_handler().
	 * Protected by kmmio_lock, when linked into kmmio_page_table.
43 44 45 46 47 48 49 50 51
	 */
	int count;
};

struct kmmio_delayed_release {
	struct rcu_head rcu;
	struct kmmio_fault_page *release_list;
};

52 53 54 55
struct kmmio_context {
	struct kmmio_fault_page *fpage;
	struct kmmio_probe *probe;
	unsigned long saved_flags;
56
	unsigned long addr;
57 58 59 60 61
	int active;
};

static DEFINE_SPINLOCK(kmmio_lock);

P
Pekka Paalanen 已提交
62
/* Protected by kmmio_lock */
63
unsigned int kmmio_count;
64 65

/* Read-protected by RCU, write-protected by kmmio_lock. */
66 67 68
static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);

69 70 71 72 73
static struct list_head *kmmio_page_list(unsigned long page)
{
	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
}

74 75
/* Accessed per-cpu */
static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
76 77 78 79 80 81 82 83 84

/*
 * this is basically a dynamic stabbing problem:
 * Could use the existing prio tree code or
 * Possible better implementations:
 * The Interval Skip List: A Data Structure for Finding All Intervals That
 * Overlap a Point (might be simple)
 * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
 */
85
/* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
86 87 88
static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
{
	struct kmmio_probe *p;
89
	list_for_each_entry_rcu(p, &kmmio_probes, list) {
90 91 92 93 94 95
		if (addr >= p->addr && addr <= (p->addr + p->len))
			return p;
	}
	return NULL;
}

96
/* You must be holding RCU read lock. */
97 98
static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
{
99 100
	struct list_head *head;
	struct kmmio_fault_page *p;
101 102

	page &= PAGE_MASK;
103 104
	head = kmmio_page_list(page);
	list_for_each_entry_rcu(p, head, list) {
105 106 107 108 109 110
		if (p->page == page)
			return p;
	}
	return NULL;
}

111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
{
	pmdval_t v = pmd_val(*pmd);
	*old = !!(v & _PAGE_PRESENT);
	v &= ~_PAGE_PRESENT;
	if (present)
		v |= _PAGE_PRESENT;
	set_pmd(pmd, __pmd(v));
}

static void set_pte_presence(pte_t *pte, bool present, bool *old)
{
	pteval_t v = pte_val(*pte);
	*old = !!(v & _PAGE_PRESENT);
	v &= ~_PAGE_PRESENT;
	if (present)
		v |= _PAGE_PRESENT;
	set_pte_atomic(pte, __pte(v));
}

131
static int set_page_presence(unsigned long addr, bool present, bool *old)
132
{
133
	unsigned int level;
P
Pekka Paalanen 已提交
134
	pte_t *pte = lookup_address(addr, &level);
135

136
	if (!pte) {
P
Pekka Paalanen 已提交
137
		pr_err("kmmio: no pte for page 0x%08lx\n", addr);
138
		return -1;
139 140
	}

P
Pekka Paalanen 已提交
141 142
	switch (level) {
	case PG_LEVEL_2M:
143
		set_pmd_presence((pmd_t *)pte, present, old);
P
Pekka Paalanen 已提交
144 145
		break;
	case PG_LEVEL_4K:
146
		set_pte_presence(pte, present, old);
P
Pekka Paalanen 已提交
147 148 149
		break;
	default:
		pr_err("kmmio: unexpected page level 0x%x.\n", level);
150
		return -1;
151 152
	}

P
Pekka Paalanen 已提交
153
	__flush_tlb_one(addr);
154
	return 0;
P
Pekka Paalanen 已提交
155
}
156

157 158 159 160 161 162 163 164 165 166 167 168
/*
 * Mark the given page as not present. Access to it will trigger a fault.
 *
 * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
 * protection is ignored here. RCU read lock is assumed held, so the struct
 * will not disappear unexpectedly. Furthermore, the caller must guarantee,
 * that double arming the same virtual address (page) cannot occur.
 *
 * Double disarming on the other hand is allowed, and may occur when a fault
 * and mmiotrace shutdown happen simultaneously.
 */
static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
P
Pekka Paalanen 已提交
169
{
170 171 172 173 174 175 176 177 178
	int ret;
	WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
	if (f->armed) {
		pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
					f->page, f->count, f->old_presence);
	}
	ret = set_page_presence(f->page, false, &f->old_presence);
	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
	f->armed = true;
179
	return ret;
180 181
}

182 183
/** Restore the given page to saved presence state. */
static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
184
{
185 186 187 188 189
	bool tmp;
	int ret = set_page_presence(f->page, f->old_presence, &tmp);
	WARN_ONCE(ret < 0,
			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
	f->armed = false;
190 191
}

192 193 194 195 196 197 198 199 200 201 202
/*
 * This is being called from do_page_fault().
 *
 * We may be in an interrupt or a critical section. Also prefecthing may
 * trigger a page fault. We may be in the middle of process switch.
 * We cannot take any locks, because we could be executing especially
 * within a kmmio critical section.
 *
 * Local interrupts are disabled, so preemption cannot happen.
 * Do not enable interrupts, do not sleep, and watch out for other CPUs.
 */
203 204 205 206
/*
 * Interrupts are disabled on entry as trap3 is an interrupt gate
 * and they remain disabled thorough out this function.
 */
207
int kmmio_handler(struct pt_regs *regs, unsigned long addr)
208
{
209 210
	struct kmmio_context *ctx;
	struct kmmio_fault_page *faultpage;
P
Pekka Paalanen 已提交
211
	int ret = 0; /* default to fault not handled */
212 213 214 215 216

	/*
	 * Preemption is now disabled to prevent process switch during
	 * single stepping. We can only handle one active kmmio trace
	 * per cpu, so ensure that we finish it before something else
P
Pekka Paalanen 已提交
217 218 219
	 * gets to run. We also hold the RCU read lock over single
	 * stepping to avoid looking up the probe and kmmio_fault_page
	 * again.
220 221
	 */
	preempt_disable();
222
	rcu_read_lock();
P
Pekka Paalanen 已提交
223

224 225 226 227 228
	faultpage = get_kmmio_fault_page(addr);
	if (!faultpage) {
		/*
		 * Either this page fault is not caused by kmmio, or
		 * another CPU just pulled the kmmio probe from under
P
Pekka Paalanen 已提交
229
		 * our feet. The latter case should not be possible.
230 231 232 233 234
		 */
		goto no_kmmio;
	}

	ctx = &get_cpu_var(kmmio_ctx);
235
	if (ctx->active) {
P
Pekka Paalanen 已提交
236 237
		if (addr == ctx->addr) {
			/*
238 239 240
			 * A second fault on the same page means some other
			 * condition needs handling by do_page_fault(), the
			 * page really not being present is the most common.
P
Pekka Paalanen 已提交
241
			 */
242 243 244 245 246 247 248 249 250 251 252 253 254 255
			pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
					addr, smp_processor_id());

			if (!faultpage->old_presence)
				pr_info("kmmio: unexpected secondary hit for "
					"address 0x%08lx on CPU %d.\n", addr,
					smp_processor_id());
		} else {
			/*
			 * Prevent overwriting already in-flight context.
			 * This should not happen, let's hope disarming at
			 * least prevents a panic.
			 */
			pr_emerg("kmmio: recursive probe hit on CPU %d, "
256
					"for address 0x%08lx. Ignoring.\n",
257
					smp_processor_id(), addr);
258 259 260 261
			pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
						ctx->addr);
			disarm_kmmio_fault_page(faultpage);
		}
262
		goto no_kmmio_ctx;
263 264 265
	}
	ctx->active++;

266
	ctx->fpage = faultpage;
267
	ctx->probe = get_kmmio_probe(addr);
I
Ingo Molnar 已提交
268
	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
269
	ctx->addr = addr;
270 271 272 273

	if (ctx->probe && ctx->probe->pre_handler)
		ctx->probe->pre_handler(ctx->probe, regs, addr);

P
Pekka Paalanen 已提交
274 275 276 277
	/*
	 * Enable single-stepping and disable interrupts for the faulting
	 * context. Local interrupts must not get enabled during stepping.
	 */
I
Ingo Molnar 已提交
278 279
	regs->flags |= X86_EFLAGS_TF;
	regs->flags &= ~X86_EFLAGS_IF;
280

281
	/* Now we set present bit in PTE and single step. */
282
	disarm_kmmio_fault_page(ctx->fpage);
283

P
Pekka Paalanen 已提交
284 285 286 287 288 289 290
	/*
	 * If another cpu accesses the same page while we are stepping,
	 * the access will not be caught. It will simply succeed and the
	 * only downside is we lose the event. If this becomes a problem,
	 * the user should drop to single cpu before tracing.
	 */

291
	put_cpu_var(kmmio_ctx);
P
Pekka Paalanen 已提交
292
	return 1; /* fault handled */
293

294 295
no_kmmio_ctx:
	put_cpu_var(kmmio_ctx);
296
no_kmmio:
297
	rcu_read_unlock();
298
	preempt_enable_no_resched();
P
Pekka Paalanen 已提交
299
	return ret;
300 301 302 303 304
}

/*
 * Interrupts are disabled on entry as trap1 is an interrupt gate
 * and they remain disabled thorough out this function.
305
 * This must always get called as the pair to kmmio_handler().
306 307 308
 */
static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
{
309 310
	int ret = 0;
	struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
311

P
Pekka Paalanen 已提交
312
	if (!ctx->active) {
313
		pr_warning("kmmio: spurious debug trap on CPU %d.\n",
P
Pekka Paalanen 已提交
314
							smp_processor_id());
315
		goto out;
P
Pekka Paalanen 已提交
316
	}
317 318 319 320

	if (ctx->probe && ctx->probe->post_handler)
		ctx->probe->post_handler(ctx->probe, condition, regs);

321 322 323 324 325
	/* Prevent racing against release_kmmio_fault_page(). */
	spin_lock(&kmmio_lock);
	if (ctx->fpage->count)
		arm_kmmio_fault_page(ctx->fpage);
	spin_unlock(&kmmio_lock);
326

I
Ingo Molnar 已提交
327
	regs->flags &= ~X86_EFLAGS_TF;
328 329 330 331
	regs->flags |= ctx->saved_flags;

	/* These were acquired in kmmio_handler(). */
	ctx->active--;
332
	BUG_ON(ctx->active);
P
Pekka Paalanen 已提交
333
	rcu_read_unlock();
334 335 336 337 338 339 340
	preempt_enable_no_resched();

	/*
	 * if somebody else is singlestepping across a probe point, flags
	 * will have TF set, in which case, continue the remaining processing
	 * of do_debug, as if this is not a probe hit.
	 */
I
Ingo Molnar 已提交
341
	if (!(regs->flags & X86_EFLAGS_TF))
342 343 344 345
		ret = 1;
out:
	put_cpu_var(kmmio_ctx);
	return ret;
346 347
}

348
/* You must be holding kmmio_lock. */
349 350 351 352 353 354 355
static int add_kmmio_fault_page(unsigned long page)
{
	struct kmmio_fault_page *f;

	page &= PAGE_MASK;
	f = get_kmmio_fault_page(page);
	if (f) {
356
		if (!f->count)
357
			arm_kmmio_fault_page(f);
358 359 360 361
		f->count++;
		return 0;
	}

362
	f = kzalloc(sizeof(*f), GFP_ATOMIC);
363 364 365 366 367 368
	if (!f)
		return -1;

	f->count = 1;
	f->page = page;

369
	if (arm_kmmio_fault_page(f)) {
370 371 372 373 374
		kfree(f);
		return -1;
	}

	list_add_rcu(&f->list, kmmio_page_list(f->page));
375 376 377 378

	return 0;
}

379 380 381
/* You must be holding kmmio_lock. */
static void release_kmmio_fault_page(unsigned long page,
				struct kmmio_fault_page **release_list)
382 383 384 385 386 387 388 389 390
{
	struct kmmio_fault_page *f;

	page &= PAGE_MASK;
	f = get_kmmio_fault_page(page);
	if (!f)
		return;

	f->count--;
391
	BUG_ON(f->count < 0);
392
	if (!f->count) {
393
		disarm_kmmio_fault_page(f);
394 395
		f->release_next = *release_list;
		*release_list = f;
396 397 398
	}
}

399 400 401 402 403 404 405
/*
 * With page-unaligned ioremaps, one or two armed pages may contain
 * addresses from outside the intended mapping. Events for these addresses
 * are currently silently dropped. The events may result only from programming
 * mistakes by accessing addresses before the beginning or past the end of a
 * mapping.
 */
406 407
int register_kmmio_probe(struct kmmio_probe *p)
{
P
Pekka Paalanen 已提交
408
	unsigned long flags;
409 410
	int ret = 0;
	unsigned long size = 0;
411
	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
412

P
Pekka Paalanen 已提交
413
	spin_lock_irqsave(&kmmio_lock, flags);
414 415 416 417
	if (get_kmmio_probe(p->addr)) {
		ret = -EEXIST;
		goto out;
	}
P
Pekka Paalanen 已提交
418
	kmmio_count++;
419
	list_add_rcu(&p->list, &kmmio_probes);
420
	while (size < size_lim) {
421
		if (add_kmmio_fault_page(p->addr + size))
422
			pr_err("kmmio: Unable to set page fault.\n");
423 424 425
		size += PAGE_SIZE;
	}
out:
P
Pekka Paalanen 已提交
426
	spin_unlock_irqrestore(&kmmio_lock, flags);
427 428 429
	/*
	 * XXX: What should I do here?
	 * Here was a call to global_flush_tlb(), but it does not exist
430
	 * anymore. It seems it's not needed after all.
431 432 433
	 */
	return ret;
}
434
EXPORT_SYMBOL(register_kmmio_probe);
435

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
{
	struct kmmio_delayed_release *dr = container_of(
						head,
						struct kmmio_delayed_release,
						rcu);
	struct kmmio_fault_page *p = dr->release_list;
	while (p) {
		struct kmmio_fault_page *next = p->release_next;
		BUG_ON(p->count);
		kfree(p);
		p = next;
	}
	kfree(dr);
}

static void remove_kmmio_fault_pages(struct rcu_head *head)
{
454 455
	struct kmmio_delayed_release *dr =
		container_of(head, struct kmmio_delayed_release, rcu);
456 457 458
	struct kmmio_fault_page *p = dr->release_list;
	struct kmmio_fault_page **prevp = &dr->release_list;
	unsigned long flags;
459

460 461
	spin_lock_irqsave(&kmmio_lock, flags);
	while (p) {
462
		if (!p->count) {
463
			list_del_rcu(&p->list);
464 465
			prevp = &p->release_next;
		} else {
466
			*prevp = p->release_next;
467
		}
468 469 470
		p = p->release_next;
	}
	spin_unlock_irqrestore(&kmmio_lock, flags);
471

472 473 474 475 476 477
	/* This is the real RCU destroy call. */
	call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
}

/*
 * Remove a kmmio probe. You have to synchronize_rcu() before you can be
P
Pekka Paalanen 已提交
478 479
 * sure that the callbacks will not be called anymore. Only after that
 * you may actually release your struct kmmio_probe.
480 481 482 483 484 485 486 487 488
 *
 * Unregistering a kmmio fault page has three steps:
 * 1. release_kmmio_fault_page()
 *    Disarm the page, wait a grace period to let all faults finish.
 * 2. remove_kmmio_fault_pages()
 *    Remove the pages from kmmio_page_table.
 * 3. rcu_free_kmmio_fault_pages()
 *    Actally free the kmmio_fault_page structs as with RCU.
 */
489 490
void unregister_kmmio_probe(struct kmmio_probe *p)
{
P
Pekka Paalanen 已提交
491
	unsigned long flags;
492
	unsigned long size = 0;
493
	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
494 495
	struct kmmio_fault_page *release_list = NULL;
	struct kmmio_delayed_release *drelease;
496

P
Pekka Paalanen 已提交
497
	spin_lock_irqsave(&kmmio_lock, flags);
498
	while (size < size_lim) {
499
		release_kmmio_fault_page(p->addr + size, &release_list);
500 501
		size += PAGE_SIZE;
	}
502
	list_del_rcu(&p->list);
503
	kmmio_count--;
P
Pekka Paalanen 已提交
504
	spin_unlock_irqrestore(&kmmio_lock, flags);
505

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
	drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
	if (!drelease) {
		pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
		return;
	}
	drelease->release_list = release_list;

	/*
	 * This is not really RCU here. We have just disarmed a set of
	 * pages so that they cannot trigger page faults anymore. However,
	 * we cannot remove the pages from kmmio_page_table,
	 * because a probe hit might be in flight on another CPU. The
	 * pages are collected into a list, and they will be removed from
	 * kmmio_page_table when it is certain that no probe hit related to
	 * these pages can be in flight. RCU grace period sounds like a
	 * good choice.
	 *
	 * If we removed the pages too early, kmmio page fault handler might
	 * not find the respective kmmio_fault_page and determine it's not
	 * a kmmio fault, when it actually is. This would lead to madness.
	 */
	call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
528
}
529
EXPORT_SYMBOL(unregister_kmmio_probe);
530 531 532 533 534 535

static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
								void *args)
{
	struct die_args *arg = args;

P
Pekka Paalanen 已提交
536
	if (val == DIE_DEBUG && (arg->err & DR_STEP))
537 538 539 540 541
		if (post_kmmio_handler(arg->err, arg->regs) == 1)
			return NOTIFY_STOP;

	return NOTIFY_DONE;
}
P
Pekka Paalanen 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554

static struct notifier_block nb_die = {
	.notifier_call = kmmio_die_notifier
};

static int __init init_kmmio(void)
{
	int i;
	for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
		INIT_LIST_HEAD(&kmmio_page_table[i]);
	return register_die_notifier(&nb_die);
}
fs_initcall(init_kmmio); /* should be before device_initcall() */