ds.c 24.1 KB
Newer Older
1 2 3 4
/*
 * Debug Store support
 *
 * This provides a low-level interface to the hardware's Debug Store
M
Markus Metzger 已提交
5
 * feature that is used for branch trace store (BTS) and
6 7
 * precise-event based sampling (PEBS).
 *
M
Markus Metzger 已提交
8
 * It manages:
9
 * - DS and BTS hardware configuration
10
 * - buffer overflow handling (to be done)
M
Markus Metzger 已提交
11
 * - buffer access
12
 *
13 14 15
 * It does not do:
 * - security checking (is the caller allowed to trace the task)
 * - buffer allocation (memory accounting)
16 17
 *
 *
M
Markus Metzger 已提交
18 19
 * Copyright (C) 2007-2008 Intel Corporation.
 * Markus Metzger <markus.t.metzger@intel.com>, 2007-2008
20 21
 */

M
Markus Metzger 已提交
22

23 24 25 26 27
#include <asm/ds.h>

#include <linux/errno.h>
#include <linux/string.h>
#include <linux/slab.h>
M
Markus Metzger 已提交
28
#include <linux/sched.h>
I
Ingo Molnar 已提交
29
#include <linux/mm.h>
30
#include <linux/kernel.h>
M
Markus Metzger 已提交
31 32 33 34 35 36


/*
 * The configuration for a particular DS hardware implementation.
 */
struct ds_configuration {
37 38 39 40 41
	/* the name of the configuration */
	const char *name;
	/* the size of one pointer-typed field in the DS structure and
	   in the BTS and PEBS buffers in bytes;
	   this covers the first 8 DS fields related to buffer management. */
M
Markus Metzger 已提交
42 43 44
	unsigned char  sizeof_field;
	/* the size of a BTS/PEBS record in bytes */
	unsigned char  sizeof_rec[2];
45 46 47
	/* a series of bit-masks to control various features indexed
	 * by enum ds_feature */
	unsigned long ctl[dsf_ctl_max];
M
Markus Metzger 已提交
48
};
49 50 51 52 53 54 55 56 57 58 59 60
static DEFINE_PER_CPU(struct ds_configuration, ds_cfg_array);

#define ds_cfg per_cpu(ds_cfg_array, smp_processor_id())

#define MAX_SIZEOF_DS (12 * 8)	/* maximal size of a DS configuration */
#define MAX_SIZEOF_BTS (3 * 8)	/* maximal size of a BTS record */
#define DS_ALIGNMENT (1 << 3)	/* BTS and PEBS buffer alignment */

#define BTS_CONTROL \
 (ds_cfg.ctl[dsf_bts] | ds_cfg.ctl[dsf_bts_kernel] | ds_cfg.ctl[dsf_bts_user] |\
  ds_cfg.ctl[dsf_bts_overflow])

61

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/*
 * A BTS or PEBS tracer.
 *
 * This holds the configuration of the tracer and serves as a handle
 * to identify tracers.
 */
struct ds_tracer {
	/* the DS context (partially) owned by this tracer */
	struct ds_context *context;
	/* the buffer provided on ds_request() and its size in bytes */
	void *buffer;
	size_t size;
};

struct bts_tracer {
	/* the common DS part */
	struct ds_tracer ds;
79 80
	/* the trace including the DS configuration */
	struct bts_trace trace;
81 82 83 84 85 86 87
	/* buffer overflow notification function */
	bts_ovfl_callback_t ovfl;
};

struct pebs_tracer {
	/* the common DS part */
	struct ds_tracer ds;
88 89
	/* the trace including the DS configuration */
	struct pebs_trace trace;
90 91 92
	/* buffer overflow notification function */
	pebs_ovfl_callback_t ovfl;
};
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

/*
 * Debug Store (DS) save area configuration (see Intel64 and IA32
 * Architectures Software Developer's Manual, section 18.5)
 *
 * The DS configuration consists of the following fields; different
 * architetures vary in the size of those fields.
 * - double-word aligned base linear address of the BTS buffer
 * - write pointer into the BTS buffer
 * - end linear address of the BTS buffer (one byte beyond the end of
 *   the buffer)
 * - interrupt pointer into BTS buffer
 *   (interrupt occurs when write pointer passes interrupt pointer)
 * - double-word aligned base linear address of the PEBS buffer
 * - write pointer into the PEBS buffer
 * - end linear address of the PEBS buffer (one byte beyond the end of
 *   the buffer)
 * - interrupt pointer into PEBS buffer
 *   (interrupt occurs when write pointer passes interrupt pointer)
 * - value to which counter is reset following counter overflow
 *
M
Markus Metzger 已提交
114 115
 * Later architectures use 64bit pointers throughout, whereas earlier
 * architectures use 32bit pointers in 32bit mode.
116 117
 *
 *
M
Markus Metzger 已提交
118 119 120 121
 * We compute the base address for the first 8 fields based on:
 * - the field size stored in the DS configuration
 * - the relative field position
 * - an offset giving the start of the respective region
122
 *
M
Markus Metzger 已提交
123 124
 * This offset is further used to index various arrays holding
 * information for BTS and PEBS at the respective index.
125
 *
M
Markus Metzger 已提交
126 127
 * On later 32bit processors, we only access the lower 32bit of the
 * 64bit pointer fields. The upper halves will be zeroed out.
128 129
 */

M
Markus Metzger 已提交
130 131 132 133 134 135
enum ds_field {
	ds_buffer_base = 0,
	ds_index,
	ds_absolute_maximum,
	ds_interrupt_threshold,
};
136

M
Markus Metzger 已提交
137 138 139
enum ds_qualifier {
	ds_bts  = 0,
	ds_pebs
140 141
};

M
Markus Metzger 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
static inline unsigned long ds_get(const unsigned char *base,
				   enum ds_qualifier qual, enum ds_field field)
{
	base += (ds_cfg.sizeof_field * (field + (4 * qual)));
	return *(unsigned long *)base;
}

static inline void ds_set(unsigned char *base, enum ds_qualifier qual,
			  enum ds_field field, unsigned long value)
{
	base += (ds_cfg.sizeof_field * (field + (4 * qual)));
	(*(unsigned long *)base) = value;
}


157
/*
158
 * Locking is done only for allocating BTS or PEBS resources.
159
 */
160
static DEFINE_SPINLOCK(ds_lock);
161 162 163


/*
M
Markus Metzger 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177
 * We either support (system-wide) per-cpu or per-thread allocation.
 * We distinguish the two based on the task_struct pointer, where a
 * NULL pointer indicates per-cpu allocation for the current cpu.
 *
 * Allocations are use-counted. As soon as resources are allocated,
 * further allocations must be of the same type (per-cpu or
 * per-thread). We model this by counting allocations (i.e. the number
 * of tracers of a certain type) for one type negatively:
 *   =0  no tracers
 *   >0  number of per-thread tracers
 *   <0  number of per-cpu tracers
 *
 * Tracers essentially gives the number of ds contexts for a certain
 * type of allocation.
178
 */
179
static atomic_t tracers = ATOMIC_INIT(0);
M
Markus Metzger 已提交
180 181

static inline void get_tracer(struct task_struct *task)
182
{
183 184 185 186
	if (task)
		atomic_inc(&tracers);
	else
		atomic_dec(&tracers);
187
}
M
Markus Metzger 已提交
188 189

static inline void put_tracer(struct task_struct *task)
190
{
191 192 193 194
	if (task)
		atomic_dec(&tracers);
	else
		atomic_inc(&tracers);
195
}
M
Markus Metzger 已提交
196 197

static inline int check_tracer(struct task_struct *task)
198
{
199 200 201
	return task ?
		(atomic_read(&tracers) >= 0) :
		(atomic_read(&tracers) <= 0);
202
}
M
Markus Metzger 已提交
203 204 205 206 207 208 209 210 211 212 213 214


/*
 * The DS context is either attached to a thread or to a cpu:
 * - in the former case, the thread_struct contains a pointer to the
 *   attached context.
 * - in the latter case, we use a static array of per-cpu context
 *   pointers.
 *
 * Contexts are use-counted. They are allocated on first access and
 * deallocated when the last user puts the context.
 */
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
struct ds_context {
	/* pointer to the DS configuration; goes into MSR_IA32_DS_AREA */
	unsigned char ds[MAX_SIZEOF_DS];
	/* the owner of the BTS and PEBS configuration, respectively */
	struct bts_tracer *bts_master;
	struct pebs_tracer *pebs_master;
	/* use count */
	unsigned long count;
	/* a pointer to the context location inside the thread_struct
	 * or the per_cpu context array */
	struct ds_context **this;
	/* a pointer to the task owning this context, or NULL, if the
	 * context is owned by a cpu */
	struct task_struct *task;
};

static DEFINE_PER_CPU(struct ds_context *, system_context_array);
M
Markus Metzger 已提交
232

233
#define system_context per_cpu(system_context_array, smp_processor_id())
M
Markus Metzger 已提交
234

235 236

static inline struct ds_context *ds_get_context(struct task_struct *task)
237
{
M
Markus Metzger 已提交
238
	struct ds_context **p_context =
239
		(task ? &task->thread.ds_ctx : &system_context);
240 241
	struct ds_context *context = NULL;
	struct ds_context *new_context = NULL;
242
	unsigned long irq;
M
Markus Metzger 已提交
243

244 245 246 247
	/* Chances are small that we already have a context. */
	new_context = kzalloc(sizeof(*new_context), GFP_KERNEL);
	if (!new_context)
		return NULL;
248

249
	spin_lock_irqsave(&ds_lock, irq);
M
Markus Metzger 已提交
250

251 252 253
	context = *p_context;
	if (!context) {
		context = new_context;
M
Markus Metzger 已提交
254

255 256 257
		context->this = p_context;
		context->task = task;
		context->count = 0;
M
Markus Metzger 已提交
258

259 260
		if (task)
			set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
261

262 263
		if (!task || (task == current))
			wrmsrl(MSR_IA32_DS_AREA, (unsigned long)context->ds);
264

265 266
		*p_context = context;
	}
267

268
	context->count++;
269

270
	spin_unlock_irqrestore(&ds_lock, irq);
M
Markus Metzger 已提交
271

272 273
	if (context != new_context)
		kfree(new_context);
M
Markus Metzger 已提交
274 275

	return context;
276
}
M
Markus Metzger 已提交
277 278

static inline void ds_put_context(struct ds_context *context)
279
{
280 281
	unsigned long irq;

M
Markus Metzger 已提交
282 283 284
	if (!context)
		return;

285
	spin_lock_irqsave(&ds_lock, irq);
M
Markus Metzger 已提交
286

287 288 289 290
	if (--context->count) {
		spin_unlock_irqrestore(&ds_lock, irq);
		return;
	}
M
Markus Metzger 已提交
291

292
	*(context->this) = NULL;
M
Markus Metzger 已提交
293 294 295 296 297 298 299

	if (context->task)
		clear_tsk_thread_flag(context->task, TIF_DS_AREA_MSR);

	if (!context->task || (context->task == current))
		wrmsrl(MSR_IA32_DS_AREA, 0);

300
	spin_unlock_irqrestore(&ds_lock, irq);
301 302

	kfree(context);
303
}
M
Markus Metzger 已提交
304 305 306


/*
307
 * Call the tracer's callback on a buffer overflow.
M
Markus Metzger 已提交
308 309 310 311
 *
 * context: the ds context
 * qual: the buffer type
 */
312 313 314
static void ds_overflow(struct ds_context *context, enum ds_qualifier qual)
{
	switch (qual) {
315 316 317 318 319 320 321 322 323
	case ds_bts:
		if (context->bts_master &&
		    context->bts_master->ovfl)
			context->bts_master->ovfl(context->bts_master);
		break;
	case ds_pebs:
		if (context->pebs_master &&
		    context->pebs_master->ovfl)
			context->pebs_master->ovfl(context->pebs_master);
324 325
		break;
	}
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
}


/*
 * Write raw data into the BTS or PEBS buffer.
 *
 * The remainder of any partially written record is zeroed out.
 *
 * context: the DS context
 * qual: the buffer type
 * record: the data to write
 * size: the size of the data
 */
static int ds_write(struct ds_context *context, enum ds_qualifier qual,
		    const void *record, size_t size)
{
	int bytes_written = 0;

	if (!record)
		return -EINVAL;

	while (size) {
		unsigned long base, index, end, write_end, int_th;
		unsigned long write_size, adj_write_size;

		/*
		 * write as much as possible without producing an
		 * overflow interrupt.
		 *
		 * interrupt_threshold must either be
		 * - bigger than absolute_maximum or
		 * - point to a record between buffer_base and absolute_maximum
		 *
		 * index points to a valid record.
		 */
		base   = ds_get(context->ds, qual, ds_buffer_base);
		index  = ds_get(context->ds, qual, ds_index);
		end    = ds_get(context->ds, qual, ds_absolute_maximum);
		int_th = ds_get(context->ds, qual, ds_interrupt_threshold);

		write_end = min(end, int_th);

		/* if we are already beyond the interrupt threshold,
		 * we fill the entire buffer */
		if (write_end <= index)
			write_end = end;

		if (write_end <= index)
			break;

		write_size = min((unsigned long) size, write_end - index);
		memcpy((void *)index, record, write_size);

		record = (const char *)record + write_size;
		size -= write_size;
		bytes_written += write_size;

		adj_write_size = write_size / ds_cfg.sizeof_rec[qual];
		adj_write_size *= ds_cfg.sizeof_rec[qual];

		/* zero out trailing bytes */
		memset((char *)index + write_size, 0,
		       adj_write_size - write_size);
		index += adj_write_size;

		if (index >= end)
			index = base;
		ds_set(context->ds, qual, ds_index, index);

		if (index >= int_th)
			ds_overflow(context, qual);
	}

	return bytes_written;
}


/*
 * Branch Trace Store (BTS) uses the following format. Different
 * architectures vary in the size of those fields.
 * - source linear address
 * - destination linear address
 * - flags
 *
 * Later architectures use 64bit pointers throughout, whereas earlier
 * architectures use 32bit pointers in 32bit mode.
 *
 * We compute the base address for the first 8 fields based on:
 * - the field size stored in the DS configuration
 * - the relative field position
 *
 * In order to store additional information in the BTS buffer, we use
 * a special source address to indicate that the record requires
 * special interpretation.
 *
 * Netburst indicated via a bit in the flags field whether the branch
 * was predicted; this is ignored.
 *
 * We use two levels of abstraction:
 * - the raw data level defined here
 * - an arch-independent level defined in ds.h
 */

enum bts_field {
	bts_from,
	bts_to,
	bts_flags,

	bts_qual = bts_from,
	bts_jiffies = bts_to,
	bts_pid = bts_flags,

	bts_qual_mask = (bts_qual_max - 1),
	bts_escape = ((unsigned long)-1 & ~bts_qual_mask)
};

static inline unsigned long bts_get(const char *base, enum bts_field field)
{
	base += (ds_cfg.sizeof_field * field);
	return *(unsigned long *)base;
}

static inline void bts_set(char *base, enum bts_field field, unsigned long val)
{
	base += (ds_cfg.sizeof_field * field);;
	(*(unsigned long *)base) = val;
}


/*
 * The raw BTS data is architecture dependent.
 *
 * For higher-level users, we give an arch-independent view.
 * - ds.h defines struct bts_struct
 * - bts_read translates one raw bts record into a bts_struct
 * - bts_write translates one bts_struct into the raw format and
 *   writes it into the top of the parameter tracer's buffer.
 *
 * return: bytes read/written on success; -Eerrno, otherwise
 */
static int bts_read(struct bts_tracer *tracer, const void *at,
		    struct bts_struct *out)
{
	if (!tracer)
		return -EINVAL;

	if (at < tracer->trace.ds.begin)
		return -EINVAL;

	if (tracer->trace.ds.end < (at + tracer->trace.ds.size))
		return -EINVAL;

	memset(out, 0, sizeof(*out));
	if ((bts_get(at, bts_qual) & ~bts_qual_mask) == bts_escape) {
		out->qualifier = (bts_get(at, bts_qual) & bts_qual_mask);
		out->variant.timestamp.jiffies = bts_get(at, bts_jiffies);
		out->variant.timestamp.pid = bts_get(at, bts_pid);
	} else {
		out->qualifier = bts_branch;
		out->variant.lbr.from = bts_get(at, bts_from);
		out->variant.lbr.to   = bts_get(at, bts_to);
	}

	return ds_cfg.sizeof_rec[ds_bts];
}

static int bts_write(struct bts_tracer *tracer, const struct bts_struct *in)
{
	unsigned char raw[MAX_SIZEOF_BTS];

	if (!tracer)
		return -EINVAL;

	if (MAX_SIZEOF_BTS < ds_cfg.sizeof_rec[ds_bts])
		return -EOVERFLOW;

	switch (in->qualifier) {
	case bts_invalid:
		bts_set(raw, bts_from, 0);
		bts_set(raw, bts_to, 0);
		bts_set(raw, bts_flags, 0);
		break;
	case bts_branch:
		bts_set(raw, bts_from, in->variant.lbr.from);
		bts_set(raw, bts_to,   in->variant.lbr.to);
		bts_set(raw, bts_flags, 0);
		break;
	case bts_task_arrives:
	case bts_task_departs:
		bts_set(raw, bts_qual, (bts_escape | in->qualifier));
		bts_set(raw, bts_jiffies, in->variant.timestamp.jiffies);
		bts_set(raw, bts_pid, in->variant.timestamp.pid);
518
		break;
519 520
	default:
		return -EINVAL;
521
	}
522 523 524

	return ds_write(tracer->ds.context, ds_bts, raw,
			ds_cfg.sizeof_rec[ds_bts]);
525
}
M
Markus Metzger 已提交
526 527


528 529 530 531 532 533 534 535 536 537 538 539 540
static void ds_write_config(struct ds_context *context,
			    struct ds_trace *cfg, enum ds_qualifier qual)
{
	unsigned char *ds = context->ds;

	ds_set(ds, qual, ds_buffer_base, (unsigned long)cfg->begin);
	ds_set(ds, qual, ds_index, (unsigned long)cfg->top);
	ds_set(ds, qual, ds_absolute_maximum, (unsigned long)cfg->end);
	ds_set(ds, qual, ds_interrupt_threshold, (unsigned long)cfg->ith);
}

static void ds_read_config(struct ds_context *context,
			   struct ds_trace *cfg, enum ds_qualifier qual)
541
{
542 543 544 545 546 547 548 549 550 551 552
	unsigned char *ds = context->ds;

	cfg->begin = (void *)ds_get(ds, qual, ds_buffer_base);
	cfg->top = (void *)ds_get(ds, qual, ds_index);
	cfg->end = (void *)ds_get(ds, qual, ds_absolute_maximum);
	cfg->ith = (void *)ds_get(ds, qual, ds_interrupt_threshold);
}

static void ds_init_ds_trace(struct ds_trace *trace, enum ds_qualifier qual,
			     void *base, size_t size, size_t ith,
			     unsigned int flags) {
M
Markus Metzger 已提交
553
	unsigned long buffer, adj;
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568

	/* adjust the buffer address and size to meet alignment
	 * constraints:
	 * - buffer is double-word aligned
	 * - size is multiple of record size
	 *
	 * We checked the size at the very beginning; we have enough
	 * space to do the adjustment.
	 */
	buffer = (unsigned long)base;

	adj = ALIGN(buffer, DS_ALIGNMENT) - buffer;
	buffer += adj;
	size   -= adj;

569 570
	trace->n = size / ds_cfg.sizeof_rec[qual];
	trace->size = ds_cfg.sizeof_rec[qual];
571

572
	size = (trace->n * trace->size);
573

574 575 576
	trace->begin = (void *)buffer;
	trace->top = trace->begin;
	trace->end = (void *)(buffer + size);
577 578 579
	/* The value for 'no threshold' is -1, which will set the
	 * threshold outside of the buffer, just like we want it.
	 */
580 581 582
	trace->ith = (void *)(buffer + size - ith);

	trace->flags = flags;
583 584
}

585 586 587 588

static int ds_request(struct ds_tracer *tracer, struct ds_trace *trace,
		      enum ds_qualifier qual, struct task_struct *task,
		      void *base, size_t size, size_t th, unsigned int flags)
589 590 591
{
	struct ds_context *context;
	int error;
M
Markus Metzger 已提交
592

593 594 595 596
	error = -EINVAL;
	if (!base)
		goto out;

M
Markus Metzger 已提交
597
	/* we require some space to do alignment adjustments below */
598 599 600
	error = -EINVAL;
	if (size < (DS_ALIGNMENT + ds_cfg.sizeof_rec[qual]))
		goto out;
M
Markus Metzger 已提交
601

602 603 604 605 606 607 608 609 610 611
	if (th != (size_t)-1) {
		th *= ds_cfg.sizeof_rec[qual];

		error = -EINVAL;
		if (size <= th)
			goto out;
	}

	tracer->buffer = base;
	tracer->size = size;
M
Markus Metzger 已提交
612

613 614
	error = -ENOMEM;
	context = ds_get_context(task);
M
Markus Metzger 已提交
615
	if (!context)
616 617 618
		goto out;
	tracer->context = context;

619
	ds_init_ds_trace(trace, qual, base, size, th, flags);
620

621
	error = 0;
622
 out:
M
Markus Metzger 已提交
623
	return error;
624
}
M
Markus Metzger 已提交
625

626 627
struct bts_tracer *ds_request_bts(struct task_struct *task,
				  void *base, size_t size,
628 629
				  bts_ovfl_callback_t ovfl, size_t th,
				  unsigned int flags)
630
{
631
	struct bts_tracer *tracer;
632
	unsigned long irq;
633
	int error;
M
Markus Metzger 已提交
634

635 636 637 638
	error = -EOPNOTSUPP;
	if (!ds_cfg.ctl[dsf_bts])
		goto out;

639 640 641 642 643 644 645 646 647 648 649
	/* buffer overflow notification is not yet implemented */
	error = -EOPNOTSUPP;
	if (ovfl)
		goto out;

	error = -ENOMEM;
	tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
	if (!tracer)
		goto out;
	tracer->ovfl = ovfl;

650 651
	error = ds_request(&tracer->ds, &tracer->trace.ds,
			   ds_bts, task, base, size, th, flags);
652 653 654
	if (error < 0)
		goto out_tracer;

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

	spin_lock_irqsave(&ds_lock, irq);

	error = -EPERM;
	if (!check_tracer(task))
		goto out_unlock;
	get_tracer(task);

	error = -EPERM;
	if (tracer->ds.context->bts_master)
		goto out_put_tracer;
	tracer->ds.context->bts_master = tracer;

	spin_unlock_irqrestore(&ds_lock, irq);


	tracer->trace.read  = bts_read;
	tracer->trace.write = bts_write;

	ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
	ds_resume_bts(tracer);

677 678
	return tracer;

679 680 681 682 683
 out_put_tracer:
	put_tracer(task);
 out_unlock:
	spin_unlock_irqrestore(&ds_lock, irq);
	ds_put_context(tracer->ds.context);
684
 out_tracer:
685
	kfree(tracer);
686 687
 out:
	return ERR_PTR(error);
688
}
M
Markus Metzger 已提交
689

690 691
struct pebs_tracer *ds_request_pebs(struct task_struct *task,
				    void *base, size_t size,
692 693
				    pebs_ovfl_callback_t ovfl, size_t th,
				    unsigned int flags)
694
{
695
	struct pebs_tracer *tracer;
696
	unsigned long irq;
M
Markus Metzger 已提交
697 698
	int error;

699 700 701
	/* buffer overflow notification is not yet implemented */
	error = -EOPNOTSUPP;
	if (ovfl)
M
Markus Metzger 已提交
702 703
		goto out;

704 705 706 707 708
	error = -ENOMEM;
	tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
	if (!tracer)
		goto out;
	tracer->ovfl = ovfl;
M
Markus Metzger 已提交
709

710 711
	error = ds_request(&tracer->ds, &tracer->trace.ds,
			   ds_pebs, task, base, size, th, flags);
712 713
	if (error < 0)
		goto out_tracer;
M
Markus Metzger 已提交
714

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
	spin_lock_irqsave(&ds_lock, irq);

	error = -EPERM;
	if (!check_tracer(task))
		goto out_unlock;
	get_tracer(task);

	error = -EPERM;
	if (tracer->ds.context->pebs_master)
		goto out_put_tracer;
	tracer->ds.context->pebs_master = tracer;

	spin_unlock_irqrestore(&ds_lock, irq);

	ds_write_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
	ds_resume_pebs(tracer);

732 733
	return tracer;

734 735 736 737 738
 out_put_tracer:
	put_tracer(task);
 out_unlock:
	spin_unlock_irqrestore(&ds_lock, irq);
	ds_put_context(tracer->ds.context);
739
 out_tracer:
740
	kfree(tracer);
M
Markus Metzger 已提交
741
 out:
742 743 744
	return ERR_PTR(error);
}

745
void ds_release_bts(struct bts_tracer *tracer)
746
{
747
	if (!tracer)
748
		return;
749

750
	ds_suspend_bts(tracer);
751

752 753
	WARN_ON_ONCE(tracer->ds.context->bts_master != tracer);
	tracer->ds.context->bts_master = NULL;
M
Markus Metzger 已提交
754

755 756
	put_tracer(tracer->ds.context->task);
	ds_put_context(tracer->ds.context);
757 758

	kfree(tracer);
759
}
M
Markus Metzger 已提交
760

761
void ds_suspend_bts(struct bts_tracer *tracer)
762
{
763
	struct task_struct *task;
764 765

	if (!tracer)
766
		return;
767

768
	task = tracer->ds.context->task;
769

770 771
	if (!task || (task == current))
		update_debugctlmsr(get_debugctlmsr() & ~BTS_CONTROL);
772

773 774
	if (task) {
		task->thread.debugctlmsr &= ~BTS_CONTROL;
775

776 777 778
		if (!task->thread.debugctlmsr)
			clear_tsk_thread_flag(task, TIF_DEBUGCTLMSR);
	}
M
Markus Metzger 已提交
779
}
780

781
void ds_resume_bts(struct bts_tracer *tracer)
M
Markus Metzger 已提交
782
{
783 784
	struct task_struct *task;
	unsigned long control;
785

786
	if (!tracer)
787
		return;
788

789
	task = tracer->ds.context->task;
790

791 792 793 794 795
	control = ds_cfg.ctl[dsf_bts];
	if (!(tracer->trace.ds.flags & BTS_KERNEL))
		control |= ds_cfg.ctl[dsf_bts_kernel];
	if (!(tracer->trace.ds.flags & BTS_USER))
		control |= ds_cfg.ctl[dsf_bts_user];
796

797 798 799 800
	if (task) {
		task->thread.debugctlmsr |= control;
		set_tsk_thread_flag(task, TIF_DEBUGCTLMSR);
	}
801

802 803
	if (!task || (task == current))
		update_debugctlmsr(get_debugctlmsr() | control);
804 805
}

806
void ds_release_pebs(struct pebs_tracer *tracer)
807
{
808
	if (!tracer)
809
		return;
M
Markus Metzger 已提交
810

811
	ds_suspend_pebs(tracer);
M
Markus Metzger 已提交
812

813 814
	WARN_ON_ONCE(tracer->ds.context->pebs_master != tracer);
	tracer->ds.context->pebs_master = NULL;
815

816 817
	put_tracer(tracer->ds.context->task);
	ds_put_context(tracer->ds.context);
818

819
	kfree(tracer);
M
Markus Metzger 已提交
820 821
}

822
void ds_suspend_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
823 824
{

M
Markus Metzger 已提交
825
}
826

827
void ds_resume_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
828
{
829 830 831

}

832
const struct bts_trace *ds_read_bts(struct bts_tracer *tracer)
833
{
834
	if (!tracer)
835
		return NULL;
836

837 838
	ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_bts);
	return &tracer->trace;
M
Markus Metzger 已提交
839
}
840

841
const struct pebs_trace *ds_read_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
842
{
843
	if (!tracer)
844
		return NULL;
845

846 847 848
	ds_read_config(tracer->ds.context, &tracer->trace.ds, ds_pebs);
	tracer->trace.reset_value =
		*(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8));
849

850
	return &tracer->trace;
M
Markus Metzger 已提交
851
}
852

853
int ds_reset_bts(struct bts_tracer *tracer)
M
Markus Metzger 已提交
854
{
855 856 857
	if (!tracer)
		return -EINVAL;

858
	tracer->trace.ds.top = tracer->trace.ds.begin;
859

860 861
	ds_set(tracer->ds.context->ds, ds_bts, ds_index,
	       (unsigned long)tracer->trace.ds.top);
862 863

	return 0;
M
Markus Metzger 已提交
864
}
865

866
int ds_reset_pebs(struct pebs_tracer *tracer)
M
Markus Metzger 已提交
867
{
868 869
	if (!tracer)
		return -EINVAL;
870

871
	tracer->trace.ds.top = tracer->trace.ds.begin;
872

873 874
	ds_set(tracer->ds.context->ds, ds_bts, ds_index,
	       (unsigned long)tracer->trace.ds.top);
M
Markus Metzger 已提交
875

876
	return 0;
877 878
}

879
int ds_set_pebs_reset(struct pebs_tracer *tracer, u64 value)
880
{
881 882
	if (!tracer)
		return -EINVAL;
883

884
	*(u64 *)(tracer->ds.context->ds + (ds_cfg.sizeof_field * 8)) = value;
M
Markus Metzger 已提交
885

886
	return 0;
M
Markus Metzger 已提交
887 888
}

889 890 891 892 893 894 895 896
static const struct ds_configuration ds_cfg_netburst = {
	.name = "netburst",
	.ctl[dsf_bts]		= (1 << 2) | (1 << 3),
	.ctl[dsf_bts_kernel]	= (1 << 5),
	.ctl[dsf_bts_user]	= (1 << 6),

	.sizeof_field		= sizeof(long),
	.sizeof_rec[ds_bts]	= sizeof(long) * 3,
897
#ifdef __i386__
898
	.sizeof_rec[ds_pebs]	= sizeof(long) * 10,
899
#else
900
	.sizeof_rec[ds_pebs]	= sizeof(long) * 18,
901
#endif
902
};
903 904 905 906 907 908
static const struct ds_configuration ds_cfg_pentium_m = {
	.name = "pentium m",
	.ctl[dsf_bts]		= (1 << 6) | (1 << 7),

	.sizeof_field		= sizeof(long),
	.sizeof_rec[ds_bts]	= sizeof(long) * 3,
909
#ifdef __i386__
910
	.sizeof_rec[ds_pebs]	= sizeof(long) * 10,
911
#else
912
	.sizeof_rec[ds_pebs]	= sizeof(long) * 18,
913
#endif
914
};
915 916 917 918 919 920 921 922 923 924
static const struct ds_configuration ds_cfg_core2 = {
	.name = "core 2",
	.ctl[dsf_bts]		= (1 << 6) | (1 << 7),
	.ctl[dsf_bts_kernel]	= (1 << 9),
	.ctl[dsf_bts_user]	= (1 << 10),

	.sizeof_field		= 8,
	.sizeof_rec[ds_bts]	= 8 * 3,
	.sizeof_rec[ds_pebs]	= 8 * 18,
};
925

926
static void
927 928
ds_configure(const struct ds_configuration *cfg)
{
929
	memset(&ds_cfg, 0, sizeof(ds_cfg));
930
	ds_cfg = *cfg;
931

932 933 934 935 936 937 938 939
	printk(KERN_INFO "[ds] using %s configuration\n", ds_cfg.name);

	if (!cpu_has_bts) {
		ds_cfg.ctl[dsf_bts] = 0;
		printk(KERN_INFO "[ds] bts not available\n");
	}
	if (!cpu_has_pebs)
		printk(KERN_INFO "[ds] pebs not available\n");
940

941
	WARN_ON_ONCE(MAX_SIZEOF_DS < (12 * ds_cfg.sizeof_field));
942 943 944 945 946 947 948
}

void __cpuinit ds_init_intel(struct cpuinfo_x86 *c)
{
	switch (c->x86) {
	case 0x6:
		switch (c->x86_model) {
949 950 951
		case 0 ... 0xC:
			/* sorry, don't know about them */
			break;
952 953
		case 0xD:
		case 0xE: /* Pentium M */
954
			ds_configure(&ds_cfg_pentium_m);
955
			break;
956
		default: /* Core2, Atom, ... */
957
			ds_configure(&ds_cfg_core2);
958 959 960 961 962 963 964 965
			break;
		}
		break;
	case 0xF:
		switch (c->x86_model) {
		case 0x0:
		case 0x1:
		case 0x2: /* Netburst */
966
			ds_configure(&ds_cfg_netburst);
967 968 969 970 971 972 973 974 975 976 977
			break;
		default:
			/* sorry, don't know about them */
			break;
		}
		break;
	default:
		/* sorry, don't know about them */
		break;
	}
}
M
Markus Metzger 已提交
978

979 980 981 982
/*
 * Change the DS configuration from tracing prev to tracing next.
 */
void ds_switch_to(struct task_struct *prev, struct task_struct *next)
M
Markus Metzger 已提交
983
{
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
	struct ds_context *prev_ctx = prev->thread.ds_ctx;
	struct ds_context *next_ctx = next->thread.ds_ctx;

	if (prev_ctx) {
		update_debugctlmsr(0);

		if (prev_ctx->bts_master &&
		    (prev_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) {
			struct bts_struct ts = {
				.qualifier = bts_task_departs,
				.variant.timestamp.jiffies = jiffies_64,
				.variant.timestamp.pid = prev->pid
			};
			bts_write(prev_ctx->bts_master, &ts);
		}
	}

	if (next_ctx) {
		if (next_ctx->bts_master &&
		    (next_ctx->bts_master->trace.ds.flags & BTS_TIMESTAMPS)) {
			struct bts_struct ts = {
				.qualifier = bts_task_arrives,
				.variant.timestamp.jiffies = jiffies_64,
				.variant.timestamp.pid = next->pid
			};
			bts_write(next_ctx->bts_master, &ts);
		}

		wrmsrl(MSR_IA32_DS_AREA, (unsigned long)next_ctx->ds);
1013
	}
1014 1015

	update_debugctlmsr(next->thread.debugctlmsr);
M
Markus Metzger 已提交
1016
}