coresight-etm4x-sysfs.c 55.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9
/*
 * Copyright(C) 2015 Linaro Limited. All rights reserved.
 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
 */

#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
10
#include "coresight-priv.h"
11 12 13

static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
{
14 15 16 17
	u8 idx;
	struct etmv4_config *config = &drvdata->config;

	idx = config->addr_idx;
18 19 20 21 22

	/*
	 * TRCACATRn.TYPE bit[1:0]: type of comparison
	 * the trace unit performs
	 */
23
	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
24 25 26 27 28 29 30 31
		if (idx % 2 != 0)
			return -EINVAL;

		/*
		 * We are performing instruction address comparison. Set the
		 * relevant bit of ViewInst Include/Exclude Control register
		 * for corresponding address comparator pair.
		 */
32 33
		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
34 35 36 37 38 39 40
			return -EINVAL;

		if (exclude == true) {
			/*
			 * Set exclude bit and unset the include bit
			 * corresponding to comparator pair
			 */
41 42
			config->viiectlr |= BIT(idx / 2 + 16);
			config->viiectlr &= ~BIT(idx / 2);
43 44 45 46 47
		} else {
			/*
			 * Set include bit and unset exclude bit
			 * corresponding to comparator pair
			 */
48 49
			config->viiectlr |= BIT(idx / 2);
			config->viiectlr &= ~BIT(idx / 2 + 16);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
		}
	}
	return 0;
}

static ssize_t nr_pe_cmp_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_pe_cmp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_pe_cmp);

static ssize_t nr_addr_cmp_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_addr_cmp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_addr_cmp);

static ssize_t nr_cntr_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_cntr;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_cntr);

static ssize_t nr_ext_inp_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_ext_inp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_ext_inp);

static ssize_t numcidc_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->numcidc;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(numcidc);

static ssize_t numvmidc_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->numvmidc;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(numvmidc);

static ssize_t nrseqstate_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nrseqstate;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nrseqstate);

static ssize_t nr_resource_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_resource;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_resource);

static ssize_t nr_ss_cmp_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_ss_cmp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_ss_cmp);

static ssize_t reset_store(struct device *dev,
			   struct device_attribute *attr,
			   const char *buf, size_t size)
{
	int i;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
170
	struct etmv4_config *config = &drvdata->config;
171 172 173 174 175 176

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	if (val)
177
		config->mode = 0x0;
178 179

	/* Disable data tracing: do not trace load and store data transfers */
180 181
	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
	config->cfg &= ~(BIT(1) | BIT(2));
182 183

	/* Disable data value and data address tracing */
184
	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
185
			   ETM_MODE_DATA_TRACE_VAL);
186
	config->cfg &= ~(BIT(16) | BIT(17));
187 188

	/* Disable all events tracing */
189 190
	config->eventctrl0 = 0x0;
	config->eventctrl1 = 0x0;
191 192

	/* Disable timestamp event */
193
	config->ts_ctrl = 0x0;
194 195

	/* Disable stalling */
196
	config->stall_ctrl = 0x0;
197 198 199

	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
	if (drvdata->syncpr == false)
200
		config->syncfreq = 0x8;
201 202 203 204 205 206

	/*
	 * Enable ViewInst to trace everything with start-stop logic in
	 * started state. ARM recommends start-stop logic is set before
	 * each trace run.
	 */
207
	config->vinst_ctrl |= BIT(0);
208
	if (drvdata->nr_addr_cmp == true) {
209
		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
210
		/* SSSTATUS, bit[9] */
211
		config->vinst_ctrl |= BIT(9);
212 213 214
	}

	/* No address range filtering for ViewInst */
215
	config->viiectlr = 0x0;
216 217

	/* No start-stop filtering for ViewInst */
218
	config->vissctlr = 0x0;
219 220 221

	/* Disable seq events */
	for (i = 0; i < drvdata->nrseqstate-1; i++)
222 223 224
		config->seq_ctrl[i] = 0x0;
	config->seq_rst = 0x0;
	config->seq_state = 0x0;
225 226

	/* Disable external input events */
227
	config->ext_inp = 0x0;
228

229
	config->cntr_idx = 0x0;
230
	for (i = 0; i < drvdata->nr_cntr; i++) {
231 232 233
		config->cntrldvr[i] = 0x0;
		config->cntr_ctrl[i] = 0x0;
		config->cntr_val[i] = 0x0;
234 235
	}

236
	config->res_idx = 0x0;
237
	for (i = 0; i < drvdata->nr_resource; i++)
238
		config->res_ctrl[i] = 0x0;
239 240

	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
241 242
		config->ss_ctrl[i] = 0x0;
		config->ss_pe_cmp[i] = 0x0;
243 244
	}

245
	config->addr_idx = 0x0;
246
	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
247 248 249
		config->addr_val[i] = 0x0;
		config->addr_acc[i] = 0x0;
		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
250 251
	}

252
	config->ctxid_idx = 0x0;
253
	for (i = 0; i < drvdata->numcidc; i++) {
254 255
		config->ctxid_pid[i] = 0x0;
		config->ctxid_vpid[i] = 0x0;
256 257
	}

258 259
	config->ctxid_mask0 = 0x0;
	config->ctxid_mask1 = 0x0;
260

261
	config->vmid_idx = 0x0;
262
	for (i = 0; i < drvdata->numvmidc; i++)
263 264 265
		config->vmid_val[i] = 0x0;
	config->vmid_mask0 = 0x0;
	config->vmid_mask1 = 0x0;
266 267

	drvdata->trcid = drvdata->cpu + 1;
268

269
	spin_unlock(&drvdata->spinlock);
270

271 272 273 274 275 276 277 278 279 280
	return size;
}
static DEVICE_ATTR_WO(reset);

static ssize_t mode_show(struct device *dev,
			 struct device_attribute *attr,
			 char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
281
	struct etmv4_config *config = &drvdata->config;
282

283
	val = config->mode;
284 285 286 287 288 289 290 291 292
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t mode_store(struct device *dev,
			  struct device_attribute *attr,
			  const char *buf, size_t size)
{
	unsigned long val, mode;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
293
	struct etmv4_config *config = &drvdata->config;
294 295 296 297 298

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
299
	config->mode = val & ETMv4_MODE_ALL;
300

301
	if (config->mode & ETM_MODE_EXCLUDE)
302 303 304 305 306 307
		etm4_set_mode_exclude(drvdata, true);
	else
		etm4_set_mode_exclude(drvdata, false);

	if (drvdata->instrp0 == true) {
		/* start by clearing instruction P0 field */
308 309
		config->cfg  &= ~(BIT(1) | BIT(2));
		if (config->mode & ETM_MODE_LOAD)
310
			/* 0b01 Trace load instructions as P0 instructions */
311 312
			config->cfg  |= BIT(1);
		if (config->mode & ETM_MODE_STORE)
313
			/* 0b10 Trace store instructions as P0 instructions */
314 315
			config->cfg  |= BIT(2);
		if (config->mode & ETM_MODE_LOAD_STORE)
316 317 318 319
			/*
			 * 0b11 Trace load and store instructions
			 * as P0 instructions
			 */
320
			config->cfg  |= BIT(1) | BIT(2);
321 322 323
	}

	/* bit[3], Branch broadcast mode */
324 325
	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
		config->cfg |= BIT(3);
326
	else
327
		config->cfg &= ~BIT(3);
328 329

	/* bit[4], Cycle counting instruction trace bit */
330
	if ((config->mode & ETMv4_MODE_CYCACC) &&
331
		(drvdata->trccci == true))
332
		config->cfg |= BIT(4);
333
	else
334
		config->cfg &= ~BIT(4);
335 336

	/* bit[6], Context ID tracing bit */
337 338
	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
		config->cfg |= BIT(6);
339
	else
340
		config->cfg &= ~BIT(6);
341

342 343
	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
		config->cfg |= BIT(7);
344
	else
345
		config->cfg &= ~BIT(7);
346 347

	/* bits[10:8], Conditional instruction tracing bit */
348
	mode = ETM_MODE_COND(config->mode);
349
	if (drvdata->trccond == true) {
350 351
		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
		config->cfg |= mode << 8;
352 353 354
	}

	/* bit[11], Global timestamp tracing bit */
355 356
	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
		config->cfg |= BIT(11);
357
	else
358
		config->cfg &= ~BIT(11);
359 360

	/* bit[12], Return stack enable bit */
361 362 363
	if ((config->mode & ETM_MODE_RETURNSTACK) &&
					(drvdata->retstack == true))
		config->cfg |= BIT(12);
364
	else
365
		config->cfg &= ~BIT(12);
366 367

	/* bits[14:13], Q element enable field */
368
	mode = ETM_MODE_QELEM(config->mode);
369
	/* start by clearing QE bits */
370
	config->cfg &= ~(BIT(13) | BIT(14));
371 372
	/* if supported, Q elements with instruction counts are enabled */
	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
373
		config->cfg |= BIT(13);
374 375 376 377 378
	/*
	 * if supported, Q elements with and without instruction
	 * counts are enabled
	 */
	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
379
		config->cfg |= BIT(14);
380 381

	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
382
	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
383
	    (drvdata->atbtrig == true))
384
		config->eventctrl1 |= BIT(11);
385
	else
386
		config->eventctrl1 &= ~BIT(11);
387 388

	/* bit[12], Low-power state behavior override bit */
389
	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
390
	    (drvdata->lpoverride == true))
391
		config->eventctrl1 |= BIT(12);
392
	else
393
		config->eventctrl1 &= ~BIT(12);
394 395

	/* bit[8], Instruction stall bit */
396 397
	if (config->mode & ETM_MODE_ISTALL_EN)
		config->stall_ctrl |= BIT(8);
398
	else
399
		config->stall_ctrl &= ~BIT(8);
400 401

	/* bit[10], Prioritize instruction trace bit */
402 403
	if (config->mode & ETM_MODE_INSTPRIO)
		config->stall_ctrl |= BIT(10);
404
	else
405
		config->stall_ctrl &= ~BIT(10);
406 407

	/* bit[13], Trace overflow prevention bit */
408
	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
409
		(drvdata->nooverflow == true))
410
		config->stall_ctrl |= BIT(13);
411
	else
412
		config->stall_ctrl &= ~BIT(13);
413 414

	/* bit[9] Start/stop logic control bit */
415 416
	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
		config->vinst_ctrl |= BIT(9);
417
	else
418
		config->vinst_ctrl &= ~BIT(9);
419 420

	/* bit[10], Whether a trace unit must trace a Reset exception */
421 422
	if (config->mode & ETM_MODE_TRACE_RESET)
		config->vinst_ctrl |= BIT(10);
423
	else
424
		config->vinst_ctrl &= ~BIT(10);
425 426

	/* bit[11], Whether a trace unit must trace a system error exception */
427
	if ((config->mode & ETM_MODE_TRACE_ERR) &&
428
		(drvdata->trc_error == true))
429
		config->vinst_ctrl |= BIT(11);
430
	else
431
		config->vinst_ctrl &= ~BIT(11);
432

433 434 435
	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
		etm4_config_trace_mode(config);

436
	spin_unlock(&drvdata->spinlock);
437

438 439 440 441 442 443 444 445 446 447
	return size;
}
static DEVICE_ATTR_RW(mode);

static ssize_t pe_show(struct device *dev,
		       struct device_attribute *attr,
		       char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
448
	struct etmv4_config *config = &drvdata->config;
449

450
	val = config->pe_sel;
451 452 453 454 455 456 457 458 459
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t pe_store(struct device *dev,
			struct device_attribute *attr,
			const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
460
	struct etmv4_config *config = &drvdata->config;
461 462 463 464 465 466 467 468 469 470

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	if (val > drvdata->nr_pe) {
		spin_unlock(&drvdata->spinlock);
		return -EINVAL;
	}

471
	config->pe_sel = val;
472 473 474 475 476 477 478 479 480 481 482
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(pe);

static ssize_t event_show(struct device *dev,
			  struct device_attribute *attr,
			  char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
483
	struct etmv4_config *config = &drvdata->config;
484

485
	val = config->eventctrl0;
486 487 488 489 490 491 492 493 494
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_store(struct device *dev,
			   struct device_attribute *attr,
			   const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
495
	struct etmv4_config *config = &drvdata->config;
496 497 498 499 500 501 502 503

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	switch (drvdata->nr_event) {
	case 0x0:
		/* EVENT0, bits[7:0] */
504
		config->eventctrl0 = val & 0xFF;
505 506 507
		break;
	case 0x1:
		 /* EVENT1, bits[15:8] */
508
		config->eventctrl0 = val & 0xFFFF;
509 510 511
		break;
	case 0x2:
		/* EVENT2, bits[23:16] */
512
		config->eventctrl0 = val & 0xFFFFFF;
513 514 515
		break;
	case 0x3:
		/* EVENT3, bits[31:24] */
516
		config->eventctrl0 = val;
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
		break;
	default:
		break;
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(event);

static ssize_t event_instren_show(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
532
	struct etmv4_config *config = &drvdata->config;
533

534
	val = BMVAL(config->eventctrl1, 0, 3);
535 536 537 538 539 540 541 542 543
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_instren_store(struct device *dev,
				   struct device_attribute *attr,
				   const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
544
	struct etmv4_config *config = &drvdata->config;
545 546 547 548 549 550

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/* start by clearing all instruction event enable bits */
551
	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
552 553 554
	switch (drvdata->nr_event) {
	case 0x0:
		/* generate Event element for event 1 */
555
		config->eventctrl1 |= val & BIT(1);
556 557 558
		break;
	case 0x1:
		/* generate Event element for event 1 and 2 */
559
		config->eventctrl1 |= val & (BIT(0) | BIT(1));
560 561 562
		break;
	case 0x2:
		/* generate Event element for event 1, 2 and 3 */
563
		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
564 565 566
		break;
	case 0x3:
		/* generate Event element for all 4 events */
567
		config->eventctrl1 |= val & 0xF;
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
		break;
	default:
		break;
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(event_instren);

static ssize_t event_ts_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
583
	struct etmv4_config *config = &drvdata->config;
584

585
	val = config->ts_ctrl;
586 587 588 589 590 591 592 593 594
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_ts_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
595
	struct etmv4_config *config = &drvdata->config;
596 597 598 599 600 601

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (!drvdata->ts_size)
		return -EINVAL;

602
	config->ts_ctrl = val & ETMv4_EVENT_MASK;
603 604 605 606 607 608 609 610 611 612
	return size;
}
static DEVICE_ATTR_RW(event_ts);

static ssize_t syncfreq_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
613
	struct etmv4_config *config = &drvdata->config;
614

615
	val = config->syncfreq;
616 617 618 619 620 621 622 623 624
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t syncfreq_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
625
	struct etmv4_config *config = &drvdata->config;
626 627 628 629 630 631

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (drvdata->syncpr == true)
		return -EINVAL;

632
	config->syncfreq = val & ETMv4_SYNC_MASK;
633 634 635 636 637 638 639 640 641 642
	return size;
}
static DEVICE_ATTR_RW(syncfreq);

static ssize_t cyc_threshold_show(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
643
	struct etmv4_config *config = &drvdata->config;
644

645
	val = config->ccctlr;
646 647 648 649 650 651 652 653 654
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cyc_threshold_store(struct device *dev,
				   struct device_attribute *attr,
				   const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
655
	struct etmv4_config *config = &drvdata->config;
656 657 658 659 660 661

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val < drvdata->ccitmin)
		return -EINVAL;

662
	config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
663 664 665 666 667 668 669 670 671 672
	return size;
}
static DEVICE_ATTR_RW(cyc_threshold);

static ssize_t bb_ctrl_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
673
	struct etmv4_config *config = &drvdata->config;
674

675
	val = config->bb_ctrl;
676 677 678 679 680 681 682 683 684
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t bb_ctrl_store(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
685
	struct etmv4_config *config = &drvdata->config;
686 687 688 689 690 691 692 693 694 695 696 697 698 699

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (drvdata->trcbb == false)
		return -EINVAL;
	if (!drvdata->nr_addr_cmp)
		return -EINVAL;
	/*
	 * Bit[7:0] selects which address range comparator is used for
	 * branch broadcast control.
	 */
	if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
		return -EINVAL;

700
	config->bb_ctrl = val;
701 702 703 704 705 706 707 708 709 710
	return size;
}
static DEVICE_ATTR_RW(bb_ctrl);

static ssize_t event_vinst_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
711
	struct etmv4_config *config = &drvdata->config;
712

713
	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
714 715 716 717 718 719 720 721 722
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_vinst_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
723
	struct etmv4_config *config = &drvdata->config;
724 725 726 727 728 729

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	val &= ETMv4_EVENT_MASK;
730 731
	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
	config->vinst_ctrl |= val;
732 733 734 735 736 737 738 739 740 741 742
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(event_vinst);

static ssize_t s_exlevel_vinst_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
743
	struct etmv4_config *config = &drvdata->config;
744

745
	val = BMVAL(config->vinst_ctrl, 16, 19);
746 747 748 749 750 751 752 753 754
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t s_exlevel_vinst_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
755
	struct etmv4_config *config = &drvdata->config;
756 757 758 759 760 761

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/* clear all EXLEVEL_S bits (bit[18] is never implemented) */
762
	config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
763 764
	/* enable instruction tracing for corresponding exception level */
	val &= drvdata->s_ex_level;
765
	config->vinst_ctrl |= (val << 16);
766 767 768 769 770 771 772 773 774 775 776
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(s_exlevel_vinst);

static ssize_t ns_exlevel_vinst_show(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
777
	struct etmv4_config *config = &drvdata->config;
778 779

	/* EXLEVEL_NS, bits[23:20] */
780
	val = BMVAL(config->vinst_ctrl, 20, 23);
781 782 783 784 785 786 787 788 789
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t ns_exlevel_vinst_store(struct device *dev,
				      struct device_attribute *attr,
				      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
790
	struct etmv4_config *config = &drvdata->config;
791 792 793 794 795 796

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/* clear EXLEVEL_NS bits (bit[23] is never implemented */
797
	config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
798 799
	/* enable instruction tracing for corresponding exception level */
	val &= drvdata->ns_ex_level;
800
	config->vinst_ctrl |= (val << 20);
801 802 803 804 805 806 807 808 809 810 811
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ns_exlevel_vinst);

static ssize_t addr_idx_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
812
	struct etmv4_config *config = &drvdata->config;
813

814
	val = config->addr_idx;
815 816 817 818 819 820 821 822 823
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_idx_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
824
	struct etmv4_config *config = &drvdata->config;
825 826 827 828 829 830 831 832 833 834 835

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nr_addr_cmp * 2)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
836
	config->addr_idx = val;
837 838 839 840 841 842 843 844 845 846 847 848
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_idx);

static ssize_t addr_instdatatype_show(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	ssize_t len;
	u8 val, idx;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
849
	struct etmv4_config *config = &drvdata->config;
850 851

	spin_lock(&drvdata->spinlock);
852 853
	idx = config->addr_idx;
	val = BMVAL(config->addr_acc[idx], 0, 1);
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
	len = scnprintf(buf, PAGE_SIZE, "%s\n",
			val == ETM_INSTR_ADDR ? "instr" :
			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
			(val == ETM_DATA_STORE_ADDR ? "data_store" :
			"data_load_store")));
	spin_unlock(&drvdata->spinlock);
	return len;
}

static ssize_t addr_instdatatype_store(struct device *dev,
				       struct device_attribute *attr,
				       const char *buf, size_t size)
{
	u8 idx;
	char str[20] = "";
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
870
	struct etmv4_config *config = &drvdata->config;
871 872 873 874 875 876 877

	if (strlen(buf) >= 20)
		return -EINVAL;
	if (sscanf(buf, "%s", str) != 1)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
878
	idx = config->addr_idx;
879 880
	if (!strcmp(str, "instr"))
		/* TYPE, bits[1:0] */
881
		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
882 883 884 885 886 887 888 889 890 891 892 893 894

	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_instdatatype);

static ssize_t addr_single_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
895
	struct etmv4_config *config = &drvdata->config;
896

897
	idx = config->addr_idx;
898
	spin_lock(&drvdata->spinlock);
899 900
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
901 902 903
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}
904
	val = (unsigned long)config->addr_val[idx];
905 906 907 908 909 910 911 912 913 914 915
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_single_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
916
	struct etmv4_config *config = &drvdata->config;
917 918 919 920 921

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
922 923 924
	idx = config->addr_idx;
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
925 926 927 928
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

929 930
	config->addr_val[idx] = (u64)val;
	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
931 932 933 934 935 936 937 938 939 940 941 942
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_single);

static ssize_t addr_range_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	u8 idx;
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
943
	struct etmv4_config *config = &drvdata->config;
944 945

	spin_lock(&drvdata->spinlock);
946
	idx = config->addr_idx;
947 948 949 950
	if (idx % 2 != 0) {
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}
951 952 953 954
	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
955 956 957 958
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

959 960
	val1 = (unsigned long)config->addr_val[idx];
	val2 = (unsigned long)config->addr_val[idx + 1];
961 962 963 964 965 966 967 968 969 970 971
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}

static ssize_t addr_range_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 idx;
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
972
	struct etmv4_config *config = &drvdata->config;
973 974 975 976 977 978 979 980

	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
		return -EINVAL;
	/* lower address comparator cannot have a higher address value */
	if (val1 > val2)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
981
	idx = config->addr_idx;
982 983 984 985 986
	if (idx % 2 != 0) {
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

987 988 989 990
	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
991 992 993 994
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

995 996 997 998
	config->addr_val[idx] = (u64)val1;
	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
	config->addr_val[idx + 1] = (u64)val2;
	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
999 1000 1001 1002
	/*
	 * Program include or exclude control bits for vinst or vdata
	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
	 */
1003
	if (config->mode & ETM_MODE_EXCLUDE)
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
		etm4_set_mode_exclude(drvdata, true);
	else
		etm4_set_mode_exclude(drvdata, false);

	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_range);

static ssize_t addr_start_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1020
	struct etmv4_config *config = &drvdata->config;
1021 1022

	spin_lock(&drvdata->spinlock);
1023
	idx = config->addr_idx;
1024

1025 1026
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1027 1028 1029 1030
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1031
	val = (unsigned long)config->addr_val[idx];
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_start_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1043
	struct etmv4_config *config = &drvdata->config;
1044 1045 1046 1047 1048

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1049
	idx = config->addr_idx;
1050 1051 1052 1053
	if (!drvdata->nr_addr_cmp) {
		spin_unlock(&drvdata->spinlock);
		return -EINVAL;
	}
1054 1055
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1056 1057 1058 1059
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1060 1061 1062
	config->addr_val[idx] = (u64)val;
	config->addr_type[idx] = ETM_ADDR_TYPE_START;
	config->vissctlr |= BIT(idx);
1063
	/* SSSTATUS, bit[9] - turn on start/stop logic */
1064
	config->vinst_ctrl |= BIT(9);
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_start);

static ssize_t addr_stop_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1077
	struct etmv4_config *config = &drvdata->config;
1078 1079

	spin_lock(&drvdata->spinlock);
1080
	idx = config->addr_idx;
1081

1082 1083
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1084 1085 1086 1087
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1088
	val = (unsigned long)config->addr_val[idx];
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_stop_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1100
	struct etmv4_config *config = &drvdata->config;
1101 1102 1103 1104 1105

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1106
	idx = config->addr_idx;
1107 1108 1109 1110
	if (!drvdata->nr_addr_cmp) {
		spin_unlock(&drvdata->spinlock);
		return -EINVAL;
	}
1111 1112
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1113 1114 1115 1116
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1117 1118 1119
	config->addr_val[idx] = (u64)val;
	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
	config->vissctlr |= BIT(idx + 16);
1120
	/* SSSTATUS, bit[9] - turn on start/stop logic */
1121
	config->vinst_ctrl |= BIT(9);
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_stop);

static ssize_t addr_ctxtype_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
	ssize_t len;
	u8 idx, val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1134
	struct etmv4_config *config = &drvdata->config;
1135 1136

	spin_lock(&drvdata->spinlock);
1137
	idx = config->addr_idx;
1138
	/* CONTEXTTYPE, bits[3:2] */
1139
	val = BMVAL(config->addr_acc[idx], 2, 3);
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
			(val == ETM_CTX_CTXID ? "ctxid" :
			(val == ETM_CTX_VMID ? "vmid" : "all")));
	spin_unlock(&drvdata->spinlock);
	return len;
}

static ssize_t addr_ctxtype_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t size)
{
	u8 idx;
	char str[10] = "";
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1154
	struct etmv4_config *config = &drvdata->config;
1155 1156 1157 1158 1159 1160 1161

	if (strlen(buf) >= 10)
		return -EINVAL;
	if (sscanf(buf, "%s", str) != 1)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1162
	idx = config->addr_idx;
1163 1164
	if (!strcmp(str, "none"))
		/* start by clearing context type bits */
1165
		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1166 1167 1168
	else if (!strcmp(str, "ctxid")) {
		/* 0b01 The trace unit performs a Context ID */
		if (drvdata->numcidc) {
1169 1170
			config->addr_acc[idx] |= BIT(2);
			config->addr_acc[idx] &= ~BIT(3);
1171 1172 1173 1174
		}
	} else if (!strcmp(str, "vmid")) {
		/* 0b10 The trace unit performs a VMID */
		if (drvdata->numvmidc) {
1175 1176
			config->addr_acc[idx] &= ~BIT(2);
			config->addr_acc[idx] |= BIT(3);
1177 1178 1179 1180 1181 1182 1183
		}
	} else if (!strcmp(str, "all")) {
		/*
		 * 0b11 The trace unit performs a Context ID
		 * comparison and a VMID
		 */
		if (drvdata->numcidc)
1184
			config->addr_acc[idx] |= BIT(2);
1185
		if (drvdata->numvmidc)
1186
			config->addr_acc[idx] |= BIT(3);
1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_ctxtype);

static ssize_t addr_context_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1200
	struct etmv4_config *config = &drvdata->config;
1201 1202

	spin_lock(&drvdata->spinlock);
1203
	idx = config->addr_idx;
1204
	/* context ID comparator bits[6:4] */
1205
	val = BMVAL(config->addr_acc[idx], 4, 6);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_context_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1217
	struct etmv4_config *config = &drvdata->config;
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
		return -EINVAL;
	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
		     drvdata->numcidc : drvdata->numvmidc))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1228
	idx = config->addr_idx;
1229
	/* clear context ID comparator bits[6:4] */
1230 1231
	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
	config->addr_acc[idx] |= (val << 4);
1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_context);

static ssize_t seq_idx_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1243
	struct etmv4_config *config = &drvdata->config;
1244

1245
	val = config->seq_idx;
1246 1247 1248 1249 1250 1251 1252 1253 1254
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_idx_store(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1255
	struct etmv4_config *config = &drvdata->config;
1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nrseqstate - 1)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1267
	config->seq_idx = val;
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(seq_idx);

static ssize_t seq_state_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1279
	struct etmv4_config *config = &drvdata->config;
1280

1281
	val = config->seq_state;
1282 1283 1284 1285 1286 1287 1288 1289 1290
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_state_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1291
	struct etmv4_config *config = &drvdata->config;
1292 1293 1294 1295 1296 1297

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nrseqstate)
		return -EINVAL;

1298
	config->seq_state = val;
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
	return size;
}
static DEVICE_ATTR_RW(seq_state);

static ssize_t seq_event_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1310
	struct etmv4_config *config = &drvdata->config;
1311 1312

	spin_lock(&drvdata->spinlock);
1313 1314
	idx = config->seq_idx;
	val = config->seq_ctrl[idx];
1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_event_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1326
	struct etmv4_config *config = &drvdata->config;
1327 1328 1329 1330 1331

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1332
	idx = config->seq_idx;
1333
	/* RST, bits[7:0] */
1334
	config->seq_ctrl[idx] = val & 0xFF;
1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(seq_event);

static ssize_t seq_reset_event_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1346
	struct etmv4_config *config = &drvdata->config;
1347

1348
	val = config->seq_rst;
1349 1350 1351 1352 1353 1354 1355 1356 1357
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_reset_event_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1358
	struct etmv4_config *config = &drvdata->config;
1359 1360 1361 1362 1363 1364

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (!(drvdata->nrseqstate))
		return -EINVAL;

1365
	config->seq_rst = val & ETMv4_EVENT_MASK;
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
	return size;
}
static DEVICE_ATTR_RW(seq_reset_event);

static ssize_t cntr_idx_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1376
	struct etmv4_config *config = &drvdata->config;
1377

1378
	val = config->cntr_idx;
1379 1380 1381 1382 1383 1384 1385 1386 1387
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntr_idx_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1388
	struct etmv4_config *config = &drvdata->config;
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nr_cntr)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1400
	config->cntr_idx = val;
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntr_idx);

static ssize_t cntrldvr_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1413
	struct etmv4_config *config = &drvdata->config;
1414 1415

	spin_lock(&drvdata->spinlock);
1416 1417
	idx = config->cntr_idx;
	val = config->cntrldvr[idx];
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntrldvr_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1429
	struct etmv4_config *config = &drvdata->config;
1430 1431 1432 1433 1434 1435 1436

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val > ETM_CNTR_MAX_VAL)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1437 1438
	idx = config->cntr_idx;
	config->cntrldvr[idx] = val;
1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntrldvr);

static ssize_t cntr_val_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1451
	struct etmv4_config *config = &drvdata->config;
1452 1453

	spin_lock(&drvdata->spinlock);
1454 1455
	idx = config->cntr_idx;
	val = config->cntr_val[idx];
1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntr_val_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1467
	struct etmv4_config *config = &drvdata->config;
1468 1469 1470 1471 1472 1473 1474

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val > ETM_CNTR_MAX_VAL)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1475 1476
	idx = config->cntr_idx;
	config->cntr_val[idx] = val;
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntr_val);

static ssize_t cntr_ctrl_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1489
	struct etmv4_config *config = &drvdata->config;
1490 1491

	spin_lock(&drvdata->spinlock);
1492 1493
	idx = config->cntr_idx;
	val = config->cntr_ctrl[idx];
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntr_ctrl_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1505
	struct etmv4_config *config = &drvdata->config;
1506 1507 1508 1509 1510

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1511 1512
	idx = config->cntr_idx;
	config->cntr_ctrl[idx] = val;
1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntr_ctrl);

static ssize_t res_idx_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1524
	struct etmv4_config *config = &drvdata->config;
1525

1526
	val = config->res_idx;
1527 1528 1529 1530 1531 1532 1533 1534 1535
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t res_idx_store(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1536
	struct etmv4_config *config = &drvdata->config;
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	/* Resource selector pair 0 is always implemented and reserved */
	if ((val == 0) || (val >= drvdata->nr_resource))
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1549
	config->res_idx = val;
1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(res_idx);

static ssize_t res_ctrl_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1562
	struct etmv4_config *config = &drvdata->config;
1563 1564

	spin_lock(&drvdata->spinlock);
1565 1566
	idx = config->res_idx;
	val = config->res_ctrl[idx];
1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t res_ctrl_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1578
	struct etmv4_config *config = &drvdata->config;
1579 1580 1581 1582 1583

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1584
	idx = config->res_idx;
1585 1586 1587 1588
	/* For odd idx pair inversal bit is RES0 */
	if (idx % 2 != 0)
		/* PAIRINV, bit[21] */
		val &= ~BIT(21);
1589
	config->res_ctrl[idx] = val;
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(res_ctrl);

static ssize_t ctxid_idx_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1601
	struct etmv4_config *config = &drvdata->config;
1602

1603
	val = config->ctxid_idx;
1604 1605 1606 1607 1608 1609 1610 1611 1612
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t ctxid_idx_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1613
	struct etmv4_config *config = &drvdata->config;
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->numcidc)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1625
	config->ctxid_idx = val;
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ctxid_idx);

static ssize_t ctxid_pid_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1638
	struct etmv4_config *config = &drvdata->config;
1639 1640

	spin_lock(&drvdata->spinlock);
1641 1642
	idx = config->ctxid_idx;
	val = (unsigned long)config->ctxid_vpid[idx];
1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t ctxid_pid_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long vpid, pid;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1654
	struct etmv4_config *config = &drvdata->config;
1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668

	/*
	 * only implemented when ctxid tracing is enabled, i.e. at least one
	 * ctxid comparator is implemented and ctxid is greater than 0 bits
	 * in length
	 */
	if (!drvdata->ctxid_size || !drvdata->numcidc)
		return -EINVAL;
	if (kstrtoul(buf, 16, &vpid))
		return -EINVAL;

	pid = coresight_vpid_to_pid(vpid);

	spin_lock(&drvdata->spinlock);
1669 1670 1671
	idx = config->ctxid_idx;
	config->ctxid_pid[idx] = (u64)pid;
	config->ctxid_vpid[idx] = (u64)vpid;
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ctxid_pid);

static ssize_t ctxid_masks_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1683
	struct etmv4_config *config = &drvdata->config;
1684 1685

	spin_lock(&drvdata->spinlock);
1686 1687
	val1 = config->ctxid_mask0;
	val2 = config->ctxid_mask1;
1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}

static ssize_t ctxid_masks_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 i, j, maskbyte;
	unsigned long val1, val2, mask;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1699
	struct etmv4_config *config = &drvdata->config;
1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718

	/*
	 * only implemented when ctxid tracing is enabled, i.e. at least one
	 * ctxid comparator is implemented and ctxid is greater than 0 bits
	 * in length
	 */
	if (!drvdata->ctxid_size || !drvdata->numcidc)
		return -EINVAL;
	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/*
	 * each byte[0..3] controls mask value applied to ctxid
	 * comparator[0..3]
	 */
	switch (drvdata->numcidc) {
	case 0x1:
		/* COMP0, bits[7:0] */
1719
		config->ctxid_mask0 = val1 & 0xFF;
1720 1721 1722
		break;
	case 0x2:
		/* COMP1, bits[15:8] */
1723
		config->ctxid_mask0 = val1 & 0xFFFF;
1724 1725 1726
		break;
	case 0x3:
		/* COMP2, bits[23:16] */
1727
		config->ctxid_mask0 = val1 & 0xFFFFFF;
1728 1729 1730
		break;
	case 0x4:
		 /* COMP3, bits[31:24] */
1731
		config->ctxid_mask0 = val1;
1732 1733 1734
		break;
	case 0x5:
		/* COMP4, bits[7:0] */
1735 1736
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2 & 0xFF;
1737 1738 1739
		break;
	case 0x6:
		/* COMP5, bits[15:8] */
1740 1741
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2 & 0xFFFF;
1742 1743 1744
		break;
	case 0x7:
		/* COMP6, bits[23:16] */
1745 1746
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2 & 0xFFFFFF;
1747 1748 1749
		break;
	case 0x8:
		/* COMP7, bits[31:24] */
1750 1751
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2;
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
		break;
	default:
		break;
	}
	/*
	 * If software sets a mask bit to 1, it must program relevant byte
	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
	 * of ctxid comparator0 value (corresponding to byte 0) register.
	 */
1762
	mask = config->ctxid_mask0;
1763 1764 1765 1766 1767 1768 1769 1770 1771
	for (i = 0; i < drvdata->numcidc; i++) {
		/* mask value of corresponding ctxid comparator */
		maskbyte = mask & ETMv4_EVENT_MASK;
		/*
		 * each bit corresponds to a byte of respective ctxid comparator
		 * value register
		 */
		for (j = 0; j < 8; j++) {
			if (maskbyte & 1)
B
Bo Yan 已提交
1772
				config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
1773 1774 1775 1776 1777
			maskbyte >>= 1;
		}
		/* Select the next ctxid comparator mask value */
		if (i == 3)
			/* ctxid comparators[4-7] */
1778
			mask = config->ctxid_mask1;
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
		else
			mask >>= 0x8;
	}

	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ctxid_masks);

static ssize_t vmid_idx_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1794
	struct etmv4_config *config = &drvdata->config;
1795

1796
	val = config->vmid_idx;
1797 1798 1799 1800 1801 1802 1803 1804 1805
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t vmid_idx_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1806
	struct etmv4_config *config = &drvdata->config;
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->numvmidc)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1818
	config->vmid_idx = val;
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(vmid_idx);

static ssize_t vmid_val_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1830
	struct etmv4_config *config = &drvdata->config;
1831

1832
	val = (unsigned long)config->vmid_val[config->vmid_idx];
1833 1834 1835 1836 1837 1838 1839 1840 1841
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t vmid_val_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1842
	struct etmv4_config *config = &drvdata->config;
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853

	/*
	 * only implemented when vmid tracing is enabled, i.e. at least one
	 * vmid comparator is implemented and at least 8 bit vmid size
	 */
	if (!drvdata->vmid_size || !drvdata->numvmidc)
		return -EINVAL;
	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1854
	config->vmid_val[config->vmid_idx] = (u64)val;
1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(vmid_val);

static ssize_t vmid_masks_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1865
	struct etmv4_config *config = &drvdata->config;
1866 1867

	spin_lock(&drvdata->spinlock);
1868 1869
	val1 = config->vmid_mask0;
	val2 = config->vmid_mask1;
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}

static ssize_t vmid_masks_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 i, j, maskbyte;
	unsigned long val1, val2, mask;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1881 1882
	struct etmv4_config *config = &drvdata->config;

1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
	/*
	 * only implemented when vmid tracing is enabled, i.e. at least one
	 * vmid comparator is implemented and at least 8 bit vmid size
	 */
	if (!drvdata->vmid_size || !drvdata->numvmidc)
		return -EINVAL;
	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);

	/*
	 * each byte[0..3] controls mask value applied to vmid
	 * comparator[0..3]
	 */
	switch (drvdata->numvmidc) {
	case 0x1:
		/* COMP0, bits[7:0] */
1901
		config->vmid_mask0 = val1 & 0xFF;
1902 1903 1904
		break;
	case 0x2:
		/* COMP1, bits[15:8] */
1905
		config->vmid_mask0 = val1 & 0xFFFF;
1906 1907 1908
		break;
	case 0x3:
		/* COMP2, bits[23:16] */
1909
		config->vmid_mask0 = val1 & 0xFFFFFF;
1910 1911 1912
		break;
	case 0x4:
		/* COMP3, bits[31:24] */
1913
		config->vmid_mask0 = val1;
1914 1915 1916
		break;
	case 0x5:
		/* COMP4, bits[7:0] */
1917 1918
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2 & 0xFF;
1919 1920 1921
		break;
	case 0x6:
		/* COMP5, bits[15:8] */
1922 1923
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2 & 0xFFFF;
1924 1925 1926
		break;
	case 0x7:
		/* COMP6, bits[23:16] */
1927 1928
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2 & 0xFFFFFF;
1929 1930 1931
		break;
	case 0x8:
		/* COMP7, bits[31:24] */
1932 1933
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2;
1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944
		break;
	default:
		break;
	}

	/*
	 * If software sets a mask bit to 1, it must program relevant byte
	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
	 * of vmid comparator0 value (corresponding to byte 0) register.
	 */
1945
	mask = config->vmid_mask0;
1946 1947 1948 1949 1950 1951 1952 1953 1954
	for (i = 0; i < drvdata->numvmidc; i++) {
		/* mask value of corresponding vmid comparator */
		maskbyte = mask & ETMv4_EVENT_MASK;
		/*
		 * each bit corresponds to a byte of respective vmid comparator
		 * value register
		 */
		for (j = 0; j < 8; j++) {
			if (maskbyte & 1)
B
Bo Yan 已提交
1955
				config->vmid_val[i] &= ~(0xFFUL << (j * 8));
1956 1957 1958 1959 1960
			maskbyte >>= 1;
		}
		/* Select the next vmid comparator mask value */
		if (i == 3)
			/* vmid comparators[4-7] */
1961
			mask = config->vmid_mask1;
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031
		else
			mask >>= 0x8;
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(vmid_masks);

static ssize_t cpu_show(struct device *dev,
			struct device_attribute *attr, char *buf)
{
	int val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->cpu;
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);

}
static DEVICE_ATTR_RO(cpu);

static struct attribute *coresight_etmv4_attrs[] = {
	&dev_attr_nr_pe_cmp.attr,
	&dev_attr_nr_addr_cmp.attr,
	&dev_attr_nr_cntr.attr,
	&dev_attr_nr_ext_inp.attr,
	&dev_attr_numcidc.attr,
	&dev_attr_numvmidc.attr,
	&dev_attr_nrseqstate.attr,
	&dev_attr_nr_resource.attr,
	&dev_attr_nr_ss_cmp.attr,
	&dev_attr_reset.attr,
	&dev_attr_mode.attr,
	&dev_attr_pe.attr,
	&dev_attr_event.attr,
	&dev_attr_event_instren.attr,
	&dev_attr_event_ts.attr,
	&dev_attr_syncfreq.attr,
	&dev_attr_cyc_threshold.attr,
	&dev_attr_bb_ctrl.attr,
	&dev_attr_event_vinst.attr,
	&dev_attr_s_exlevel_vinst.attr,
	&dev_attr_ns_exlevel_vinst.attr,
	&dev_attr_addr_idx.attr,
	&dev_attr_addr_instdatatype.attr,
	&dev_attr_addr_single.attr,
	&dev_attr_addr_range.attr,
	&dev_attr_addr_start.attr,
	&dev_attr_addr_stop.attr,
	&dev_attr_addr_ctxtype.attr,
	&dev_attr_addr_context.attr,
	&dev_attr_seq_idx.attr,
	&dev_attr_seq_state.attr,
	&dev_attr_seq_event.attr,
	&dev_attr_seq_reset_event.attr,
	&dev_attr_cntr_idx.attr,
	&dev_attr_cntrldvr.attr,
	&dev_attr_cntr_val.attr,
	&dev_attr_cntr_ctrl.attr,
	&dev_attr_res_idx.attr,
	&dev_attr_res_ctrl.attr,
	&dev_attr_ctxid_idx.attr,
	&dev_attr_ctxid_pid.attr,
	&dev_attr_ctxid_masks.attr,
	&dev_attr_vmid_idx.attr,
	&dev_attr_vmid_val.attr,
	&dev_attr_vmid_masks.attr,
	&dev_attr_cpu.attr,
	NULL,
};

2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057
struct etmv4_reg {
	void __iomem *addr;
	u32 data;
};

static void do_smp_cross_read(void *data)
{
	struct etmv4_reg *reg = data;

	reg->data = readl_relaxed(reg->addr);
}

static u32 etmv4_cross_read(const struct device *dev, u32 offset)
{
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
	struct etmv4_reg reg;

	reg.addr = drvdata->base + offset;
	/*
	 * smp cross call ensures the CPU will be powered up before
	 * accessing the ETMv4 trace core registers
	 */
	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
	return reg.data;
}

2058 2059
#define coresight_etm4x_reg(name, offset)			\
	coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2060 2061 2062 2063

#define coresight_etm4x_cross_read(name, offset)			\
	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
			      name, offset)
2064

2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
coresight_etm4x_reg(trcpdcr, TRCPDCR);
coresight_etm4x_reg(trcpdsr, TRCPDSR);
coresight_etm4x_reg(trclsr, TRCLSR);
coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
coresight_etm4x_reg(trcdevid, TRCDEVID);
coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
coresight_etm4x_reg(trcpidr0, TRCPIDR0);
coresight_etm4x_reg(trcpidr1, TRCPIDR1);
coresight_etm4x_reg(trcpidr2, TRCPIDR2);
coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2075 2076 2077
coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2078 2079 2080 2081 2082 2083

static struct attribute *coresight_etmv4_mgmt_attrs[] = {
	&dev_attr_trcoslsr.attr,
	&dev_attr_trcpdcr.attr,
	&dev_attr_trcpdsr.attr,
	&dev_attr_trclsr.attr,
2084 2085
	&dev_attr_trcconfig.attr,
	&dev_attr_trctraceid.attr,
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
	&dev_attr_trcauthstatus.attr,
	&dev_attr_trcdevid.attr,
	&dev_attr_trcdevtype.attr,
	&dev_attr_trcpidr0.attr,
	&dev_attr_trcpidr1.attr,
	&dev_attr_trcpidr2.attr,
	&dev_attr_trcpidr3.attr,
	NULL,
};

2096 2097 2098 2099 2100 2101
coresight_etm4x_cross_read(trcidr0, TRCIDR0);
coresight_etm4x_cross_read(trcidr1, TRCIDR1);
coresight_etm4x_cross_read(trcidr2, TRCIDR2);
coresight_etm4x_cross_read(trcidr3, TRCIDR3);
coresight_etm4x_cross_read(trcidr4, TRCIDR4);
coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2102
/* trcidr[6,7] are reserved */
2103 2104 2105 2106 2107 2108
coresight_etm4x_cross_read(trcidr8, TRCIDR8);
coresight_etm4x_cross_read(trcidr9, TRCIDR9);
coresight_etm4x_cross_read(trcidr10, TRCIDR10);
coresight_etm4x_cross_read(trcidr11, TRCIDR11);
coresight_etm4x_cross_read(trcidr12, TRCIDR12);
coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146

static struct attribute *coresight_etmv4_trcidr_attrs[] = {
	&dev_attr_trcidr0.attr,
	&dev_attr_trcidr1.attr,
	&dev_attr_trcidr2.attr,
	&dev_attr_trcidr3.attr,
	&dev_attr_trcidr4.attr,
	&dev_attr_trcidr5.attr,
	/* trcidr[6,7] are reserved */
	&dev_attr_trcidr8.attr,
	&dev_attr_trcidr9.attr,
	&dev_attr_trcidr10.attr,
	&dev_attr_trcidr11.attr,
	&dev_attr_trcidr12.attr,
	&dev_attr_trcidr13.attr,
	NULL,
};

static const struct attribute_group coresight_etmv4_group = {
	.attrs = coresight_etmv4_attrs,
};

static const struct attribute_group coresight_etmv4_mgmt_group = {
	.attrs = coresight_etmv4_mgmt_attrs,
	.name = "mgmt",
};

static const struct attribute_group coresight_etmv4_trcidr_group = {
	.attrs = coresight_etmv4_trcidr_attrs,
	.name = "trcidr",
};

const struct attribute_group *coresight_etmv4_groups[] = {
	&coresight_etmv4_group,
	&coresight_etmv4_mgmt_group,
	&coresight_etmv4_trcidr_group,
	NULL,
};