ds.c 24.4 KB
Newer Older
1 2 3 4
/*
 * Debug Store support
 *
 * This provides a low-level interface to the hardware's Debug Store
M
Markus Metzger 已提交
5
 * feature that is used for branch trace store (BTS) and
6 7
 * precise-event based sampling (PEBS).
 *
M
Markus Metzger 已提交
8
 * It manages:
9
 * - DS and BTS hardware configuration
10
 * - buffer overflow handling (to be done)
M
Markus Metzger 已提交
11
 * - buffer access
12
 *
13 14 15
 * It does not do:
 * - security checking (is the caller allowed to trace the task)
 * - buffer allocation (memory accounting)
16 17
 *
 *
18 19
 * Copyright (C) 2007-2009 Intel Corporation.
 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2009
20 21
 */

M
Markus Metzger 已提交
22

23 24 25 26 27
#include <asm/ds.h>

#include <linux/errno.h>
#include <linux/string.h>
#include <linux/slab.h>
M
Markus Metzger 已提交
28
#include <linux/sched.h>
I
Ingo Molnar 已提交
29
#include <linux/mm.h>
30
#include <linux/kernel.h>
M
Markus Metzger 已提交
31 32 33 34 35 36


/*
 * The configuration for a particular DS hardware implementation.
 */
struct ds_configuration {
37 38 39 40 41
	/* the name of the configuration */
	const char *name;
	/* the size of one pointer-typed field in the DS structure and
	   in the BTS and PEBS buffers in bytes;
	   this covers the first 8 DS fields related to buffer management. */
M
Markus Metzger 已提交
42 43 44
	unsigned char  sizeof_field;
	/* the size of a BTS/PEBS record in bytes */
	unsigned char  sizeof_rec[2];
45 46 47
	/* a series of bit-masks to control various features indexed
	 * by enum ds_feature */
	unsigned long ctl[dsf_ctl_max];
M
Markus Metzger 已提交
48
};
49 50 51 52 53 54 55 56 57 58 59 60
static DEFINE_PER_CPU(struct ds_configuration, ds_cfg_array);

#define ds_cfg per_cpu(ds_cfg_array, smp_processor_id())

#define MAX_SIZEOF_DS (12 * 8)	/* maximal size of a DS configuration */
#define MAX_SIZEOF_BTS (3 * 8)	/* maximal size of a BTS record */
#define DS_ALIGNMENT (1 << 3)	/* BTS and PEBS buffer alignment */

#define BTS_CONTROL \
 (ds_cfg.ctl[dsf_bts] | ds_cfg.ctl[dsf_bts_kernel] | ds_cfg.ctl[dsf_bts_user] |\
  ds_cfg.ctl[dsf_bts_overflow])

61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * A BTS or PEBS tracer.
 *
 * This holds the configuration of the tracer and serves as a handle
 * to identify tracers.
 */
struct ds_tracer {
	/* the DS context (partially) owned by this tracer */
	struct ds_context *context;
	/* the buffer provided on ds_request() and its size in bytes */
	void *buffer;
	size_t size;
};

struct bts_tracer {
	/* the common DS part */
	struct ds_tracer ds;
79 80
	/* the trace including the DS configuration */
	struct bts_trace trace;
81 82 83 84 85 86 87
	/* buffer overflow notification function */
	bts_ovfl_callback_t ovfl;
};

struct pebs_tracer {
	/* the common DS part */
	struct ds_tracer ds;
88 89
	/* the trace including the DS configuration */
	struct pebs_trace trace;
90 91 92
	/* buffer overflow notification function */
	pebs_ovfl_callback_t ovfl;
};
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

/*
 * Debug Store (DS) save area configuration (see Intel64 and IA32
 * Architectures Software Developer's Manual, section 18.5)
 *
 * The DS configuration consists of the following fields; different
 * architetures vary in the size of those fields.
 * - double-word aligned base linear address of the BTS buffer
 * - write pointer into the BTS buffer
 * - end linear address of the BTS buffer (one byte beyond the end of
 *   the buffer)
 * - interrupt pointer into BTS buffer
 *   (interrupt occurs when write pointer passes interrupt pointer)
 * - double-word aligned base linear address of the PEBS buffer
 * - write pointer into the PEBS buffer
 * - end linear address of the PEBS buffer (one byte beyond the end of
 *   the buffer)
 * - interrupt pointer into PEBS buffer
 *   (interrupt occurs when write pointer passes interrupt pointer)
 * - value to which counter is reset following counter overflow
 *
M
Markus Metzger 已提交
114 115
 * Later architectures use 64bit pointers throughout, whereas earlier
 * architectures use 32bit pointers in 32bit mode.
116 117
 *
 *
M
Markus Metzger 已提交
118 119 120 121
 * We compute the base address for the first 8 fields based on:
 * - the field size stored in the DS configuration
 * - the relative field position
 * - an offset giving the start of the respective region
122
 *
M
Markus Metzger 已提交
123 124
 * This offset is further used to index various arrays holding
 * information for BTS and PEBS at the respective index.
125
 *
M
Markus Metzger 已提交
126 127
 * On later 32bit processors, we only access the lower 32bit of the
 * 64bit pointer fields. The upper halves will be zeroed out.
128 129
 */

M
Markus Metzger 已提交
130 131 132 133 134 135
enum ds_field {
	ds_buffer_base = 0,
	ds_index,
	ds_absolute_maximum,
	ds_interrupt_threshold,
};
136

M
Markus Metzger 已提交
137 138 139
enum ds_qualifier {
	ds_bts  = 0,
	ds_pebs
140 141
};

M
Markus Metzger 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
static inline unsigned long ds_get(const unsigned char *base,
				   enum ds_qualifier qual, enum ds_field field)
{
	base += (ds_cfg.sizeof_field * (field + (4 * qual)));
	return *(unsigned long *)base;
}

static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
			  enum ds_field field, unsigned long value)
{
	base += (ds_cfg.sizeof_field * (field + (4 * qual)));
	(*(unsigned long *)base) = value;
}


157
/*
158
 * Locking is done only for allocating BTS or PEBS resources.
159
 */
160
static DEFINE_SPINLOCK(ds_lock);
161 162 163


/*
M
Markus Metzger 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177
 * We either support (system-wide) per-cpu or per-thread allocation.
 * We distinguish the two based on the task_struct pointer, where a
 * NULL pointer indicates per-cpu allocation for the current cpu.
 *
 * Allocations are use-counted. As soon as resources are allocated,
 * further allocations must be of the same type (per-cpu or
 * per-thread). We model this by counting allocations (i.e. the number
 * of tracers of a certain type) for one type negatively:
 *   =0  no tracers
 *   >0  number of per-thread tracers
 *   <0  number of per-cpu tracers
 *
 * Tracers essentially gives the number of ds contexts for a certain
 * type of allocation.
178
 */
179
static atomic_t tracers = ATOMIC_INIT(0);
M
Markus Metzger 已提交
180 181

static inline void get_tracer(struct task_struct *task)
182
{
183 184 185 186
	if (task)
		atomic_inc(&tracers);
	else
		atomic_dec(&tracers);
187
}
M
Markus Metzger 已提交
188 189

static inline void put_tracer(struct task_struct *task)
190
{
191 192 193 194
	if (task)
		atomic_dec(&tracers);
	else
		atomic_inc(&tracers);
195
}
M
Markus Metzger 已提交
196 197

static inline int check_tracer(struct task_struct *task)
198
{
199 200 201
	return task ?
		(atomic_read(&tracers) >= 0) :
		(atomic_read(&tracers) <= 0);
202
}
M
Markus Metzger 已提交
203 204 205 206 207 208 209 210 211 212 213 214


/*
 * The DS context is either attached to a thread or to a cpu:
 * - in the former case, the thread_struct contains a pointer to the
 *   attached context.
 * - in the latter case, we use a static array of per-cpu context
 *   pointers.
 *
 * Contexts are use-counted. They are allocated on first access and
 * deallocated when the last user puts the context.
 */
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
struct ds_context {
	/* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
	unsigned char ds[MAX_SIZEOF_DS];
	/* the owner of the BTS and PEBS configuration, respectively */
	struct bts_tracer *bts_master;
	struct pebs_tracer *pebs_master;
	/* use count */
	unsigned long count;
	/* a pointer to the context location inside the thread_struct
	 * or the per_cpu context array */
	struct ds_context **this;
	/* a pointer to the task owning this context, or NULL, if the
	 * context is owned by a cpu */
	struct task_struct *task;
};

static DEFINE_PER_CPU(struct ds_context *, system_context_array);
M
Markus Metzger 已提交
232

233
#define system_context per_cpu(system_context_array, smp_processor_id())
M
Markus Metzger 已提交
234

235 236

static inline struct ds_context *ds_get_context(struct task_struct *task)
237
{
M
Markus Metzger 已提交
238
	struct ds_context **p_context =
239
		(task ? &task->thread.ds_ctx : &system_context);
240 241
	struct ds_context *context = NULL;
	struct ds_context *new_context = NULL;
242
	unsigned long irq;
M
Markus Metzger 已提交
243

244 245 246 247
	/* Chances are small that we already have a context. */
	new_context = kzalloc(sizeof(*new_context), GFP_KERNEL);
	if (!new_context)
		return NULL;
248

249
	spin_lock_irqsave(&ds_lock, irq);
M
Markus Metzger 已提交
250

251 252 253
	context = *p_context;
	if (!context) {
		context = new_context;
M
Markus Metzger 已提交
254

255 256 257
		context->this = p_context;
		context->task = task;
		context->count = 0;
M
Markus Metzger 已提交
258

259 260
		if (task)
			set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
261

262 263
		if (!task || (task == current))
			wrmsrl(MSR_IA32_DS_AREA, (unsigned long)context->ds);
264

265 266
		*p_context = context;
	}
267

268
	context->count++;
269

270
	spin_unlock_irqrestore(&ds_lock, irq);
M
Markus Metzger 已提交
271

272 273
	if (context != new_context)
		kfree(new_context);
M
Markus Metzger 已提交
274 275

	return context;
276
}
M
Markus Metzger 已提交
277 278

static inline void ds_put_context(struct ds_context *context)
279
{
280 281
	unsigned long irq;

M
Markus Metzger 已提交
282 283 284
	if (!context)
		return;

285
	spin_lock_irqsave(&ds_lock, irq);
M
Markus Metzger 已提交
286

287 288 289 290
	if (--context->count) {
		spin_unlock_irqrestore(&ds_lock, irq);
		return;
	}
M
Markus Metzger 已提交
291

292
	*(context->this) = NULL;
M
Markus Metzger 已提交
293 294 295 296 297 298 299

	if (context->task)
		clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);

	if (!context->task || (context->task == current))
		wrmsrl(MSR_IA32_DS_AREA, 0);

300
	spin_unlock_irqrestore(&ds_lock, irq);
301 302

	kfree(context);
303
}
M
Markus Metzger 已提交
304 305 306


/*
307
 * Call the tracer's callback on a buffer overflow.
M
Markus Metzger 已提交
308 309 310 311
 *
 * context: the ds context
 * qual: the buffer type
 */
312 313 314
static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
{
	switch (qual) {
315 316 317 318 319 320 321 322 323
	case ds_bts:
		if (context->bts_master &&
		    context->bts_master->ovfl)
			context->bts_master->ovfl(context->bts_master);
		break;
	case ds_pebs:
		if (context->pebs_master &&
		    context->pebs_master->ovfl)
			context->pebs_master->ovfl(context->pebs_master);
324 325
		break;
	}
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
}


/*
 * Write raw data into the BTS or PEBS buffer.
 *
 * The remainder of any partially written record is zeroed out.
 *
 * context: the DS context
 * qual: the buffer type
 * record: the data to write
 * size: the size of the data
 */
static int ds_write(struct ds_context *context, enum ds_qualifier qual,
		    const void *record, size_t size)
{
	int bytes_written = 0;

	if (!record)
		return -EINVAL;

	while (size) {
		unsigned long base, index, end, write_end, int_th;
		unsigned long write_size, adj_write_size;

		/*
		 * write as much as possible without producing an
		 * overflow interrupt.
		 *
		 * interrupt_threshold must either be
		 * - bigger than absolute_maximum or
		 * - point to a record between buffer_base and absolute_maximum
		 *
		 * index points to a valid record.
		 */
		base   = ds_get(context->ds, qual, ds_buffer_base);
		index  = ds_get(context->ds, qual, ds_index);
		end    = ds_get(context->ds, qual, ds_absolute_maximum);
		int_th = ds_get(context->ds, qual, ds_interrupt_threshold);

		write_end = min(end, int_th);

		/* if we are already beyond the interrupt threshold,
		 * we fill the entire buffer */
		if (write_end <= index)
			write_end = end;

		if (write_end <= index)
			break;

		write_size = min((unsigned long) size, write_end - index);
		memcpy((void *)index, record, write_size);

		record = (const char *)record + write_size;
		size -= write_size;
		bytes_written += write_size;

		adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
		adj_write_size *= ds_cfg.sizeof_rec[qual];

		/* zero out trailing bytes */
		memset((char *)index + write_size, 0,
		       adj_write_size - write_size);
		index += adj_write_size;

		if (index >= end)
			index = base;
		ds_set(context->ds, qual, ds_index, index);

		if (index >= int_th)
			ds_overflow(context, qual);
	}

	return bytes_written;
}


/*
 * Branch Trace Store (BTS) uses the following format. Different
 * architectures vary in the size of those fields.
 * - source linear address
 * - destination linear address
 * - flags
 *
 * Later architectures use 64bit pointers throughout, whereas earlier
 * architectures use 32bit pointers in 32bit mode.
 *
 * We compute the base address for the first 8 fields based on:
 * - the field size stored in the DS configuration
 * - the relative field position
 *
 * In order to store additional information in the BTS buffer, we use
 * a special source address to indicate that the record requires
 * special interpretation.
 *
 * Netburst indicated via a bit in the flags field whether the branch
 * was predicted; this is ignored.
 *
 * We use two levels of abstraction:
 * - the raw data level defined here
 * - an arch-independent level defined in ds.h
 */

enum bts_field {
	bts_from,
	bts_to,
	bts_flags,

	bts_qual = bts_from,
	bts_jiffies = bts_to,
	bts_pid = bts_flags,

	bts_qual_mask = (bts_qual_max - 1),
	bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
};

static inline unsigned long bts_get(const char *base, enum bts_field field)
{
	base += (ds_cfg.sizeof_field * field);
	return *(unsigned long *)base;
}

static inline void bts_set(char *base, enum bts_field field, unsigned long val)
{
	base += (ds_cfg.sizeof_field * field);;
	(*(unsigned long *)base) = val;
}


/*
 * The raw BTS data is architecture dependent.
 *
 * For higher-level users, we give an arch-independent view.
 * - ds.h defines struct bts_struct
 * - bts_read translates one raw bts record into a bts_struct
 * - bts_write translates one bts_struct into the raw format and
 *   writes it into the top of the parameter tracer's buffer.
 *
 * return: bytes read/written on success; -Eerrno, otherwise
 */
static int bts_read(struct bts_tracer *tracer, const void *at,
		    struct bts_struct *out)
{
	if (!tracer)
		return -EINVAL;

	if (at < tracer->trace.ds.begin)
		return -EINVAL;

	if (tracer->trace.ds.end < (at + tracer->trace.ds.size))
		return -EINVAL;

	memset(out, 0, sizeof(*out));
	if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) {
		out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask);
		out->variant.timestamp.jiffies = bts_get(at, bts_jiffies);
		out->variant.timestamp.pid = bts_get(at, bts_pid);
	} else {
		out->qualifier = bts_branch;
		out->variant.lbr.from = bts_get(at, bts_from);
		out->variant.lbr.to   = bts_get(at, bts_to);
487 488 489

		if (!out->variant.lbr.from && !out->variant.lbr.to)
			out->qualifier = bts_invalid;
490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	}

	return ds_cfg.sizeof_rec[ds_bts];
}

static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in)
{
	unsigned char raw[MAX_SIZEOF_BTS];

	if (!tracer)
		return -EINVAL;

	if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts])
		return -EOVERFLOW;

	switch (in->qualifier) {
	case bts_invalid:
		bts_set(raw, bts_from, 0);
		bts_set(raw, bts_to, 0);
		bts_set(raw, bts_flags, 0);
		break;
	case bts_branch:
		bts_set(raw, bts_from, in->variant.lbr.from);
		bts_set(raw, bts_to,   in->variant.lbr.to);
		bts_set(raw, bts_flags, 0);
		break;
	case bts_task_arrives:
	case bts_task_departs:
		bts_set(raw, bts_qual, (bts_escape | in->qualifier));
		bts_set(raw, bts_jiffies, in->variant.timestamp.jiffies);
		bts_set(raw, bts_pid, in->variant.timestamp.pid);
521
		break;
522 523
	default:
		return -EINVAL;
524
	}
525 526 527

	return ds_write(tracer->ds.context, ds_bts, raw,
			ds_cfg.sizeof_rec[ds_bts]);
528
}
M
Markus Metzger 已提交
529 530


531 532 533 534 535 536 537 538 539 540 541 542 543
static void ds_write_config(struct ds_context *context,
			    struct ds_trace *cfg, enum ds_qualifier qual)
{
	unsigned char *ds = context->ds;

	ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin);
	ds_set(ds, qual, ds_index, (unsigned long)cfg->top);
	ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end);
	ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith);
}

static void ds_read_config(struct ds_context *context,
			   struct ds_trace *cfg, enum ds_qualifier qual)
544
{
545 546 547 548 549 550 551 552 553 554 555
	unsigned char *ds = context->ds;

	cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base);
	cfg->top = (void *)ds_get(ds, qual, ds_index);
	cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum);
	cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold);
}

static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual,
			     void *base, size_t size, size_t ith,
			     unsigned int flags) {
M
Markus Metzger 已提交
556
	unsigned long buffer, adj;
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571

	/* adjust the buffer address and size to meet alignment
	 * constraints:
	 * - buffer is double-word aligned
	 * - size is multiple of record size
	 *
	 * We checked the size at the very beginning; we have enough
	 * space to do the adjustment.
	 */
	buffer = (unsigned long)base;

	adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
	buffer += adj;
	size   -= adj;

572 573
	trace->n = size / ds_cfg.sizeof_rec[qual];
	trace->size = ds_cfg.sizeof_rec[qual];
574

575
	size = (trace->n * trace->size);
576

577 578 579
	trace->begin = (void *)buffer;
	trace->top = trace->begin;
	trace->end = (void *)(buffer + size);
580 581 582
	/* The value for 'no threshold' is -1, which will set the
	 * threshold outside of the buffer, just like we want it.
	 */
583 584 585
	trace->ith = (void *)(buffer + size - ith);

	trace->flags = flags;
586 587
}

588 589 590 591

static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace,
		      enum ds_qualifier qual, struct task_struct *task,
		      void *base, size_t size, size_t th, unsigned int flags)
592 593 594
{
	struct ds_context *context;
	int error;
M
Markus Metzger 已提交
595

596 597 598 599
	error = -EINVAL;
	if (!base)
		goto out;

M
Markus Metzger 已提交
600
	/* we require some space to do alignment adjustments below */
601 602 603
	error = -EINVAL;
	if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual]))
		goto out;
M
Markus Metzger 已提交
604

605 606 607 608 609 610 611 612 613 614
	if (th != (size_t)-1) {
		th *= ds_cfg.sizeof_rec[qual];

		error = -EINVAL;
		if (size <= th)
			goto out;
	}

	tracer->buffer = base;
	tracer->size = size;
M
Markus Metzger 已提交
615

616 617
	error = -ENOMEM;
	context = ds_get_context(task);
M
Markus Metzger 已提交
618
	if (!context)
619 620 621
		goto out;
	tracer->context = context;

622
	ds_init_ds_trace(trace, qual, base, size, th, flags);
623

624
	error = 0;
625
 out:
M
Markus Metzger 已提交
626
	return error;
627
}
M
Markus Metzger 已提交
628

629 630
struct bts_tracer *ds_request_bts(struct task_struct *task,
				  void *base, size_t size,
631 632
				  bts_ovfl_callback_t ovfl, size_t th,
				  unsigned int flags)
633
{
634
	struct bts_tracer *tracer;
635
	unsigned long irq;
636
	int error;
M
Markus Metzger 已提交
637

638 639 640 641
	error = -EOPNOTSUPP;
	if (!ds_cfg.ctl[dsf_bts])
		goto out;

642 643 644 645 646 647 648 649 650 651 652
	/* buffer overflow notification is not yet implemented */
	error = -EOPNOTSUPP;
	if (ovfl)
		goto out;

	error = -ENOMEM;
	tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
	if (!tracer)
		goto out;
	tracer->ovfl = ovfl;

653 654
	error = ds_request(&tracer->ds, &tracer->trace.ds,
			   ds_bts, task, base, size, th, flags);
655 656 657
	if (error < 0)
		goto out_tracer;

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679

	spin_lock_irqsave(&ds_lock, irq);

	error = -EPERM;
	if (!check_tracer(task))
		goto out_unlock;
	get_tracer(task);

	error = -EPERM;
	if (tracer->ds.context->bts_master)
		goto out_put_tracer;
	tracer->ds.context->bts_master = tracer;

	spin_unlock_irqrestore(&ds_lock, irq);


	tracer->trace.read  = bts_read;
	tracer->trace.write = bts_write;

	ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
	ds_resume_bts(tracer);

680 681
	return tracer;

682 683 684 685 686
 out_put_tracer:
	put_tracer(task);
 out_unlock:
	spin_unlock_irqrestore(&ds_lock, irq);
	ds_put_context(tracer->ds.context);
687
 out_tracer:
688
	kfree(tracer);
689 690
 out:
	return ERR_PTR(error);
691
}
M
Markus Metzger 已提交
692

693 694
struct pebs_tracer *ds_request_pebs(struct task_struct *task,
				    void *base, size_t size,
695 696
				    pebs_ovfl_callback_t ovfl, size_t th,
				    unsigned int flags)
697
{
698
	struct pebs_tracer *tracer;
699
	unsigned long irq;
M
Markus Metzger 已提交
700 701
	int error;

702 703 704
	/* buffer overflow notification is not yet implemented */
	error = -EOPNOTSUPP;
	if (ovfl)
M
Markus Metzger 已提交
705 706
		goto out;

707 708 709 710 711
	error = -ENOMEM;
	tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
	if (!tracer)
		goto out;
	tracer->ovfl = ovfl;
M
Markus Metzger 已提交
712

713 714
	error = ds_request(&tracer->ds, &tracer->trace.ds,
			   ds_pebs, task, base, size, th, flags);
715 716
	if (error < 0)
		goto out_tracer;
M
Markus Metzger 已提交
717

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	spin_lock_irqsave(&ds_lock, irq);

	error = -EPERM;
	if (!check_tracer(task))
		goto out_unlock;
	get_tracer(task);

	error = -EPERM;
	if (tracer->ds.context->pebs_master)
		goto out_put_tracer;
	tracer->ds.context->pebs_master = tracer;

	spin_unlock_irqrestore(&ds_lock, irq);

	ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
	ds_resume_pebs(tracer);

735 736
	return tracer;

737 738 739 740 741
 out_put_tracer:
	put_tracer(task);
 out_unlock:
	spin_unlock_irqrestore(&ds_lock, irq);
	ds_put_context(tracer->ds.context);
742
 out_tracer:
743
	kfree(tracer);
M
Markus Metzger 已提交
744
 out:
745 746 747
	return ERR_PTR(error);
}

748
void ds_release_bts(struct bts_tracer *tracer)
749
{
750
	if (!tracer)
751
		return;
752

753
	ds_suspend_bts(tracer);
754

755 756
	WARN_ON_ONCE(tracer->ds.context->bts_master != tracer);
	tracer->ds.context->bts_master = NULL;
M
Markus Metzger 已提交
757

758 759
	put_tracer(tracer->ds.context->task);
	ds_put_context(tracer->ds.context);
760 761

	kfree(tracer);
762
}
M
Markus Metzger 已提交
763

764
void ds_suspend_bts(struct bts_tracer *tracer)
765
{
766
	struct task_struct *task;
767 768

	if (!tracer)
769
		return;
770

771
	task = tracer->ds.context->task;
772

773 774
	if (!task || (task == current))
		update_debugctlmsr(get_debugctlmsr() & ~BTS_CONTROL);
775

776 777
	if (task) {
		task->thread.debugctlmsr &= ~BTS_CONTROL;
778

779 780 781
		if (!task->thread.debugctlmsr)
			clear_tsk_thread_flag(task, TIF_DEBUGCTLMSR);
	}
M
Markus Metzger 已提交
782
}
783

784
void ds_resume_bts(struct bts_tracer *tracer)
M
Markus Metzger 已提交
785
{
786 787
	struct task_struct *task;
	unsigned long control;
788

789
	if (!tracer)
790
		return;
791

792
	task = tracer->ds.context->task;
793

794 795 796 797 798
	control = ds_cfg.ctl[dsf_bts];
	if (!(tracer->trace.ds.flags & BTS_KERNEL))
		control |= ds_cfg.ctl[dsf_bts_kernel];
	if (!(tracer->trace.ds.flags & BTS_USER))
		control |= ds_cfg.ctl[dsf_bts_user];
799

800 801 802 803
	if (task) {
		task->thread.debugctlmsr |= control;
		set_tsk_thread_flag(task, TIF_DEBUGCTLMSR);
	}
804

805 806
	if (!task || (task == current))
		update_debugctlmsr(get_debugctlmsr() | control);
807 808
}

809
void ds_release_pebs(struct pebs_tracer *tracer)
810
{
811
	if (!tracer)
812
		return;
M
Markus Metzger 已提交
813

814
	ds_suspend_pebs(tracer);
M
Markus Metzger 已提交
815

816 817
	WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
	tracer->ds.context->pebs_master = NULL;
818

819 820
	put_tracer(tracer->ds.context->task);
	ds_put_context(tracer->ds.context);
821

822
	kfree(tracer);
M
Markus Metzger 已提交
823 824
}

825
void ds_suspend_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
826 827
{

M
Markus Metzger 已提交
828
}
829

830
void ds_resume_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
831
{
832 833 834

}

835
const struct bts_trace *ds_read_bts(struct bts_tracer *tracer)
836
{
837
	if (!tracer)
838
		return NULL;
839

840 841
	ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
	return &tracer->trace;
M
Markus Metzger 已提交
842
}
843

844
const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
845
{
846
	if (!tracer)
847
		return NULL;
848

849 850 851
	ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
	tracer->trace.reset_value =
		*(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8));
852

853
	return &tracer->trace;
M
Markus Metzger 已提交
854
}
855

856
int ds_reset_bts(struct bts_tracer *tracer)
M
Markus Metzger 已提交
857
{
858 859 860
	if (!tracer)
		return -EINVAL;

861
	tracer->trace.ds.top = tracer->trace.ds.begin;
862

863 864
	ds_set(tracer->ds.context->ds, ds_bts, ds_index,
	       (unsigned long)tracer->trace.ds.top);
865 866

	return 0;
M
Markus Metzger 已提交
867
}
868

869
int ds_reset_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
870
{
871 872
	if (!tracer)
		return -EINVAL;
873

874
	tracer->trace.ds.top = tracer->trace.ds.begin;
875

876 877
	ds_set(tracer->ds.context->ds, ds_bts, ds_index,
	       (unsigned long)tracer->trace.ds.top);
M
Markus Metzger 已提交
878

879
	return 0;
880 881
}

882
int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
883
{
884 885
	if (!tracer)
		return -EINVAL;
886

887
	*(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8)) = value;
M
Markus Metzger 已提交
888

889
	return 0;
M
Markus Metzger 已提交
890 891
}

892
static const struct ds_configuration ds_cfg_netburst = {
893
	.name = "Netburst",
894 895 896 897 898 899
	.ctl[dsf_bts]		= (1 << 2) | (1 << 3),
	.ctl[dsf_bts_kernel]	= (1 << 5),
	.ctl[dsf_bts_user]	= (1 << 6),

	.sizeof_field		= sizeof(long),
	.sizeof_rec[ds_bts]	= sizeof(long) * 3,
900
#ifdef __i386__
901
	.sizeof_rec[ds_pebs]	= sizeof(long) * 10,
902
#else
903
	.sizeof_rec[ds_pebs]	= sizeof(long) * 18,
904
#endif
905
};
906
static const struct ds_configuration ds_cfg_pentium_m = {
907
	.name = "Pentium M",
908 909 910 911
	.ctl[dsf_bts]		= (1 << 6) | (1 << 7),

	.sizeof_field		= sizeof(long),
	.sizeof_rec[ds_bts]	= sizeof(long) * 3,
912
#ifdef __i386__
913
	.sizeof_rec[ds_pebs]	= sizeof(long) * 10,
914
#else
915
	.sizeof_rec[ds_pebs]	= sizeof(long) * 18,
916
#endif
917
};
918 919
static const struct ds_configuration ds_cfg_core2_atom = {
	.name = "Core 2/Atom",
920 921 922 923 924 925 926 927
	.ctl[dsf_bts]		= (1 << 6) | (1 << 7),
	.ctl[dsf_bts_kernel]	= (1 << 9),
	.ctl[dsf_bts_user]	= (1 << 10),

	.sizeof_field		= 8,
	.sizeof_rec[ds_bts]	= 8 * 3,
	.sizeof_rec[ds_pebs]	= 8 * 18,
};
928

929
static void
930 931
ds_configure(const struct ds_configuration *cfg)
{
932
	memset(&ds_cfg, 0, sizeof(ds_cfg));
933
	ds_cfg = *cfg;
934

935 936 937 938 939 940 941 942
	printk(KERN_INFO "[ds] using %s configuration\n", ds_cfg.name);

	if (!cpu_has_bts) {
		ds_cfg.ctl[dsf_bts] = 0;
		printk(KERN_INFO "[ds] bts not available\n");
	}
	if (!cpu_has_pebs)
		printk(KERN_INFO "[ds] pebs not available\n");
943

944
	WARN_ON_ONCE(MAX_SIZEOF_DS < (12 * ds_cfg.sizeof_field));
945 946 947 948 949 950 951
}

void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
{
	switch (c->x86) {
	case 0x6:
		switch (c->x86_model) {
952 953
		case 0x9:
		case 0xd: /* Pentium M */
954
			ds_configure(&ds_cfg_pentium_m);
955
			break;
956 957 958 959 960 961 962 963
		case 0xf:
		case 0x17: /* Core2 */
		case 0x1c: /* Atom */
			ds_configure(&ds_cfg_core2_atom);
			break;
		case 0x1a: /* i7 */
		default:
			/* sorry, don't know about them */
964 965 966
			break;
		}
		break;
967
	case 0xf:
968 969 970 971
		switch (c->x86_model) {
		case 0x0:
		case 0x1:
		case 0x2: /* Netburst */
972
			ds_configure(&ds_cfg_netburst);
973 974 975 976 977 978 979 980 981 982 983
			break;
		default:
			/* sorry, don't know about them */
			break;
		}
		break;
	default:
		/* sorry, don't know about them */
		break;
	}
}
M
Markus Metzger 已提交
984

985 986 987 988
/*
 * Change the DS configuration from tracing prev to tracing next.
 */
void ds_switch_to(struct task_struct *prev, struct task_struct *next)
M
Markus Metzger 已提交
989
{
990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	struct ds_context *prev_ctx = prev->thread.ds_ctx;
	struct ds_context *next_ctx = next->thread.ds_ctx;

	if (prev_ctx) {
		update_debugctlmsr(0);

		if (prev_ctx->bts_master &&
		    (prev_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) {
			struct bts_struct ts = {
				.qualifier = bts_task_departs,
				.variant.timestamp.jiffies = jiffies_64,
				.variant.timestamp.pid = prev->pid
			};
			bts_write(prev_ctx->bts_master, &ts);
		}
	}

	if (next_ctx) {
		if (next_ctx->bts_master &&
		    (next_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) {
			struct bts_struct ts = {
				.qualifier = bts_task_arrives,
				.variant.timestamp.jiffies = jiffies_64,
				.variant.timestamp.pid = next->pid
			};
			bts_write(next_ctx->bts_master, &ts);
		}

		wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
1019
	}
1020 1021

	update_debugctlmsr(next->thread.debugctlmsr);
M
Markus Metzger 已提交
1022
}
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

void ds_copy_thread(struct task_struct *tsk, struct task_struct *father)
{
	clear_tsk_thread_flag(tsk, TIF_DS_AREA_MSR);
	tsk->thread.ds_ctx = NULL;
}

void ds_exit_thread(struct task_struct *tsk)
{
}