coresight-etm4x-sysfs.c 56.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Copyright(C) 2015 Linaro Limited. All rights reserved.
 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/pm_runtime.h>
#include <linux/sysfs.h>
#include "coresight-etm4x.h"
21
#include "coresight-priv.h"
22 23 24

static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
{
25 26 27 28
	u8 idx;
	struct etmv4_config *config = &drvdata->config;

	idx = config->addr_idx;
29 30 31 32 33

	/*
	 * TRCACATRn.TYPE bit[1:0]: type of comparison
	 * the trace unit performs
	 */
34
	if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
35 36 37 38 39 40 41 42
		if (idx % 2 != 0)
			return -EINVAL;

		/*
		 * We are performing instruction address comparison. Set the
		 * relevant bit of ViewInst Include/Exclude Control register
		 * for corresponding address comparator pair.
		 */
43 44
		if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
		    config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
45 46 47 48 49 50 51
			return -EINVAL;

		if (exclude == true) {
			/*
			 * Set exclude bit and unset the include bit
			 * corresponding to comparator pair
			 */
52 53
			config->viiectlr |= BIT(idx / 2 + 16);
			config->viiectlr &= ~BIT(idx / 2);
54 55 56 57 58
		} else {
			/*
			 * Set include bit and unset exclude bit
			 * corresponding to comparator pair
			 */
59 60
			config->viiectlr |= BIT(idx / 2);
			config->viiectlr &= ~BIT(idx / 2 + 16);
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
		}
	}
	return 0;
}

static ssize_t nr_pe_cmp_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_pe_cmp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_pe_cmp);

static ssize_t nr_addr_cmp_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_addr_cmp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_addr_cmp);

static ssize_t nr_cntr_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_cntr;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_cntr);

static ssize_t nr_ext_inp_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_ext_inp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_ext_inp);

static ssize_t numcidc_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->numcidc;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(numcidc);

static ssize_t numvmidc_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->numvmidc;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(numvmidc);

static ssize_t nrseqstate_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nrseqstate;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nrseqstate);

static ssize_t nr_resource_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_resource;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_resource);

static ssize_t nr_ss_cmp_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->nr_ss_cmp;
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(nr_ss_cmp);

static ssize_t reset_store(struct device *dev,
			   struct device_attribute *attr,
			   const char *buf, size_t size)
{
	int i;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
181
	struct etmv4_config *config = &drvdata->config;
182 183 184 185 186 187

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	if (val)
188
		config->mode = 0x0;
189 190

	/* Disable data tracing: do not trace load and store data transfers */
191 192
	config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
	config->cfg &= ~(BIT(1) | BIT(2));
193 194

	/* Disable data value and data address tracing */
195
	config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
196
			   ETM_MODE_DATA_TRACE_VAL);
197
	config->cfg &= ~(BIT(16) | BIT(17));
198 199

	/* Disable all events tracing */
200 201
	config->eventctrl0 = 0x0;
	config->eventctrl1 = 0x0;
202 203

	/* Disable timestamp event */
204
	config->ts_ctrl = 0x0;
205 206

	/* Disable stalling */
207
	config->stall_ctrl = 0x0;
208 209 210

	/* Reset trace synchronization period  to 2^8 = 256 bytes*/
	if (drvdata->syncpr == false)
211
		config->syncfreq = 0x8;
212 213 214 215 216 217

	/*
	 * Enable ViewInst to trace everything with start-stop logic in
	 * started state. ARM recommends start-stop logic is set before
	 * each trace run.
	 */
218
	config->vinst_ctrl |= BIT(0);
219
	if (drvdata->nr_addr_cmp == true) {
220
		config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
221
		/* SSSTATUS, bit[9] */
222
		config->vinst_ctrl |= BIT(9);
223 224 225
	}

	/* No address range filtering for ViewInst */
226
	config->viiectlr = 0x0;
227 228

	/* No start-stop filtering for ViewInst */
229
	config->vissctlr = 0x0;
230 231 232

	/* Disable seq events */
	for (i = 0; i < drvdata->nrseqstate-1; i++)
233 234 235
		config->seq_ctrl[i] = 0x0;
	config->seq_rst = 0x0;
	config->seq_state = 0x0;
236 237

	/* Disable external input events */
238
	config->ext_inp = 0x0;
239

240
	config->cntr_idx = 0x0;
241
	for (i = 0; i < drvdata->nr_cntr; i++) {
242 243 244
		config->cntrldvr[i] = 0x0;
		config->cntr_ctrl[i] = 0x0;
		config->cntr_val[i] = 0x0;
245 246
	}

247
	config->res_idx = 0x0;
248
	for (i = 0; i < drvdata->nr_resource; i++)
249
		config->res_ctrl[i] = 0x0;
250 251

	for (i = 0; i < drvdata->nr_ss_cmp; i++) {
252 253
		config->ss_ctrl[i] = 0x0;
		config->ss_pe_cmp[i] = 0x0;
254 255
	}

256
	config->addr_idx = 0x0;
257
	for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
258 259 260
		config->addr_val[i] = 0x0;
		config->addr_acc[i] = 0x0;
		config->addr_type[i] = ETM_ADDR_TYPE_NONE;
261 262
	}

263
	config->ctxid_idx = 0x0;
264
	for (i = 0; i < drvdata->numcidc; i++) {
265 266
		config->ctxid_pid[i] = 0x0;
		config->ctxid_vpid[i] = 0x0;
267 268
	}

269 270
	config->ctxid_mask0 = 0x0;
	config->ctxid_mask1 = 0x0;
271

272
	config->vmid_idx = 0x0;
273
	for (i = 0; i < drvdata->numvmidc; i++)
274 275 276
		config->vmid_val[i] = 0x0;
	config->vmid_mask0 = 0x0;
	config->vmid_mask1 = 0x0;
277 278

	drvdata->trcid = drvdata->cpu + 1;
279

280
	spin_unlock(&drvdata->spinlock);
281

282 283 284 285 286 287 288 289 290 291
	return size;
}
static DEVICE_ATTR_WO(reset);

static ssize_t mode_show(struct device *dev,
			 struct device_attribute *attr,
			 char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
292
	struct etmv4_config *config = &drvdata->config;
293

294
	val = config->mode;
295 296 297 298 299 300 301 302 303
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t mode_store(struct device *dev,
			  struct device_attribute *attr,
			  const char *buf, size_t size)
{
	unsigned long val, mode;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
304
	struct etmv4_config *config = &drvdata->config;
305 306 307 308 309

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
310
	config->mode = val & ETMv4_MODE_ALL;
311

312
	if (config->mode & ETM_MODE_EXCLUDE)
313 314 315 316 317 318
		etm4_set_mode_exclude(drvdata, true);
	else
		etm4_set_mode_exclude(drvdata, false);

	if (drvdata->instrp0 == true) {
		/* start by clearing instruction P0 field */
319 320
		config->cfg  &= ~(BIT(1) | BIT(2));
		if (config->mode & ETM_MODE_LOAD)
321
			/* 0b01 Trace load instructions as P0 instructions */
322 323
			config->cfg  |= BIT(1);
		if (config->mode & ETM_MODE_STORE)
324
			/* 0b10 Trace store instructions as P0 instructions */
325 326
			config->cfg  |= BIT(2);
		if (config->mode & ETM_MODE_LOAD_STORE)
327 328 329 330
			/*
			 * 0b11 Trace load and store instructions
			 * as P0 instructions
			 */
331
			config->cfg  |= BIT(1) | BIT(2);
332 333 334
	}

	/* bit[3], Branch broadcast mode */
335 336
	if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
		config->cfg |= BIT(3);
337
	else
338
		config->cfg &= ~BIT(3);
339 340

	/* bit[4], Cycle counting instruction trace bit */
341
	if ((config->mode & ETMv4_MODE_CYCACC) &&
342
		(drvdata->trccci == true))
343
		config->cfg |= BIT(4);
344
	else
345
		config->cfg &= ~BIT(4);
346 347

	/* bit[6], Context ID tracing bit */
348 349
	if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
		config->cfg |= BIT(6);
350
	else
351
		config->cfg &= ~BIT(6);
352

353 354
	if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
		config->cfg |= BIT(7);
355
	else
356
		config->cfg &= ~BIT(7);
357 358

	/* bits[10:8], Conditional instruction tracing bit */
359
	mode = ETM_MODE_COND(config->mode);
360
	if (drvdata->trccond == true) {
361 362
		config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
		config->cfg |= mode << 8;
363 364 365
	}

	/* bit[11], Global timestamp tracing bit */
366 367
	if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
		config->cfg |= BIT(11);
368
	else
369
		config->cfg &= ~BIT(11);
370 371

	/* bit[12], Return stack enable bit */
372 373 374
	if ((config->mode & ETM_MODE_RETURNSTACK) &&
					(drvdata->retstack == true))
		config->cfg |= BIT(12);
375
	else
376
		config->cfg &= ~BIT(12);
377 378

	/* bits[14:13], Q element enable field */
379
	mode = ETM_MODE_QELEM(config->mode);
380
	/* start by clearing QE bits */
381
	config->cfg &= ~(BIT(13) | BIT(14));
382 383
	/* if supported, Q elements with instruction counts are enabled */
	if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
384
		config->cfg |= BIT(13);
385 386 387 388 389
	/*
	 * if supported, Q elements with and without instruction
	 * counts are enabled
	 */
	if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
390
		config->cfg |= BIT(14);
391 392

	/* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
393
	if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
394
	    (drvdata->atbtrig == true))
395
		config->eventctrl1 |= BIT(11);
396
	else
397
		config->eventctrl1 &= ~BIT(11);
398 399

	/* bit[12], Low-power state behavior override bit */
400
	if ((config->mode & ETM_MODE_LPOVERRIDE) &&
401
	    (drvdata->lpoverride == true))
402
		config->eventctrl1 |= BIT(12);
403
	else
404
		config->eventctrl1 &= ~BIT(12);
405 406

	/* bit[8], Instruction stall bit */
407 408
	if (config->mode & ETM_MODE_ISTALL_EN)
		config->stall_ctrl |= BIT(8);
409
	else
410
		config->stall_ctrl &= ~BIT(8);
411 412

	/* bit[10], Prioritize instruction trace bit */
413 414
	if (config->mode & ETM_MODE_INSTPRIO)
		config->stall_ctrl |= BIT(10);
415
	else
416
		config->stall_ctrl &= ~BIT(10);
417 418

	/* bit[13], Trace overflow prevention bit */
419
	if ((config->mode & ETM_MODE_NOOVERFLOW) &&
420
		(drvdata->nooverflow == true))
421
		config->stall_ctrl |= BIT(13);
422
	else
423
		config->stall_ctrl &= ~BIT(13);
424 425

	/* bit[9] Start/stop logic control bit */
426 427
	if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
		config->vinst_ctrl |= BIT(9);
428
	else
429
		config->vinst_ctrl &= ~BIT(9);
430 431

	/* bit[10], Whether a trace unit must trace a Reset exception */
432 433
	if (config->mode & ETM_MODE_TRACE_RESET)
		config->vinst_ctrl |= BIT(10);
434
	else
435
		config->vinst_ctrl &= ~BIT(10);
436 437

	/* bit[11], Whether a trace unit must trace a system error exception */
438
	if ((config->mode & ETM_MODE_TRACE_ERR) &&
439
		(drvdata->trc_error == true))
440
		config->vinst_ctrl |= BIT(11);
441
	else
442
		config->vinst_ctrl &= ~BIT(11);
443

444 445 446
	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
		etm4_config_trace_mode(config);

447
	spin_unlock(&drvdata->spinlock);
448

449 450 451 452 453 454 455 456 457 458
	return size;
}
static DEVICE_ATTR_RW(mode);

static ssize_t pe_show(struct device *dev,
		       struct device_attribute *attr,
		       char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
459
	struct etmv4_config *config = &drvdata->config;
460

461
	val = config->pe_sel;
462 463 464 465 466 467 468 469 470
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t pe_store(struct device *dev,
			struct device_attribute *attr,
			const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
471
	struct etmv4_config *config = &drvdata->config;
472 473 474 475 476 477 478 479 480 481

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	if (val > drvdata->nr_pe) {
		spin_unlock(&drvdata->spinlock);
		return -EINVAL;
	}

482
	config->pe_sel = val;
483 484 485 486 487 488 489 490 491 492 493
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(pe);

static ssize_t event_show(struct device *dev,
			  struct device_attribute *attr,
			  char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
494
	struct etmv4_config *config = &drvdata->config;
495

496
	val = config->eventctrl0;
497 498 499 500 501 502 503 504 505
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_store(struct device *dev,
			   struct device_attribute *attr,
			   const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
506
	struct etmv4_config *config = &drvdata->config;
507 508 509 510 511 512 513 514

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	switch (drvdata->nr_event) {
	case 0x0:
		/* EVENT0, bits[7:0] */
515
		config->eventctrl0 = val & 0xFF;
516 517 518
		break;
	case 0x1:
		 /* EVENT1, bits[15:8] */
519
		config->eventctrl0 = val & 0xFFFF;
520 521 522
		break;
	case 0x2:
		/* EVENT2, bits[23:16] */
523
		config->eventctrl0 = val & 0xFFFFFF;
524 525 526
		break;
	case 0x3:
		/* EVENT3, bits[31:24] */
527
		config->eventctrl0 = val;
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
		break;
	default:
		break;
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(event);

static ssize_t event_instren_show(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
543
	struct etmv4_config *config = &drvdata->config;
544

545
	val = BMVAL(config->eventctrl1, 0, 3);
546 547 548 549 550 551 552 553 554
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_instren_store(struct device *dev,
				   struct device_attribute *attr,
				   const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
555
	struct etmv4_config *config = &drvdata->config;
556 557 558 559 560 561

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/* start by clearing all instruction event enable bits */
562
	config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
563 564 565
	switch (drvdata->nr_event) {
	case 0x0:
		/* generate Event element for event 1 */
566
		config->eventctrl1 |= val & BIT(1);
567 568 569
		break;
	case 0x1:
		/* generate Event element for event 1 and 2 */
570
		config->eventctrl1 |= val & (BIT(0) | BIT(1));
571 572 573
		break;
	case 0x2:
		/* generate Event element for event 1, 2 and 3 */
574
		config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
575 576 577
		break;
	case 0x3:
		/* generate Event element for all 4 events */
578
		config->eventctrl1 |= val & 0xF;
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
		break;
	default:
		break;
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(event_instren);

static ssize_t event_ts_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
594
	struct etmv4_config *config = &drvdata->config;
595

596
	val = config->ts_ctrl;
597 598 599 600 601 602 603 604 605
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_ts_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
606
	struct etmv4_config *config = &drvdata->config;
607 608 609 610 611 612

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (!drvdata->ts_size)
		return -EINVAL;

613
	config->ts_ctrl = val & ETMv4_EVENT_MASK;
614 615 616 617 618 619 620 621 622 623
	return size;
}
static DEVICE_ATTR_RW(event_ts);

static ssize_t syncfreq_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
624
	struct etmv4_config *config = &drvdata->config;
625

626
	val = config->syncfreq;
627 628 629 630 631 632 633 634 635
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t syncfreq_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
636
	struct etmv4_config *config = &drvdata->config;
637 638 639 640 641 642

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (drvdata->syncpr == true)
		return -EINVAL;

643
	config->syncfreq = val & ETMv4_SYNC_MASK;
644 645 646 647 648 649 650 651 652 653
	return size;
}
static DEVICE_ATTR_RW(syncfreq);

static ssize_t cyc_threshold_show(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
654
	struct etmv4_config *config = &drvdata->config;
655

656
	val = config->ccctlr;
657 658 659 660 661 662 663 664 665
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cyc_threshold_store(struct device *dev,
				   struct device_attribute *attr,
				   const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
666
	struct etmv4_config *config = &drvdata->config;
667 668 669 670 671 672

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val < drvdata->ccitmin)
		return -EINVAL;

673
	config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
674 675 676 677 678 679 680 681 682 683
	return size;
}
static DEVICE_ATTR_RW(cyc_threshold);

static ssize_t bb_ctrl_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
684
	struct etmv4_config *config = &drvdata->config;
685

686
	val = config->bb_ctrl;
687 688 689 690 691 692 693 694 695
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t bb_ctrl_store(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
696
	struct etmv4_config *config = &drvdata->config;
697 698 699 700 701 702 703 704 705 706 707 708 709 710

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (drvdata->trcbb == false)
		return -EINVAL;
	if (!drvdata->nr_addr_cmp)
		return -EINVAL;
	/*
	 * Bit[7:0] selects which address range comparator is used for
	 * branch broadcast control.
	 */
	if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
		return -EINVAL;

711
	config->bb_ctrl = val;
712 713 714 715 716 717 718 719 720 721
	return size;
}
static DEVICE_ATTR_RW(bb_ctrl);

static ssize_t event_vinst_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
722
	struct etmv4_config *config = &drvdata->config;
723

724
	val = config->vinst_ctrl & ETMv4_EVENT_MASK;
725 726 727 728 729 730 731 732 733
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t event_vinst_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
734
	struct etmv4_config *config = &drvdata->config;
735 736 737 738 739 740

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	val &= ETMv4_EVENT_MASK;
741 742
	config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
	config->vinst_ctrl |= val;
743 744 745 746 747 748 749 750 751 752 753
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(event_vinst);

static ssize_t s_exlevel_vinst_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
754
	struct etmv4_config *config = &drvdata->config;
755

756
	val = BMVAL(config->vinst_ctrl, 16, 19);
757 758 759 760 761 762 763 764 765
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t s_exlevel_vinst_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
766
	struct etmv4_config *config = &drvdata->config;
767 768 769 770 771 772

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/* clear all EXLEVEL_S bits (bit[18] is never implemented) */
773
	config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
774 775
	/* enable instruction tracing for corresponding exception level */
	val &= drvdata->s_ex_level;
776
	config->vinst_ctrl |= (val << 16);
777 778 779 780 781 782 783 784 785 786 787
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(s_exlevel_vinst);

static ssize_t ns_exlevel_vinst_show(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
788
	struct etmv4_config *config = &drvdata->config;
789 790

	/* EXLEVEL_NS, bits[23:20] */
791
	val = BMVAL(config->vinst_ctrl, 20, 23);
792 793 794 795 796 797 798 799 800
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t ns_exlevel_vinst_store(struct device *dev,
				      struct device_attribute *attr,
				      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
801
	struct etmv4_config *config = &drvdata->config;
802 803 804 805 806 807

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/* clear EXLEVEL_NS bits (bit[23] is never implemented */
808
	config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
809 810
	/* enable instruction tracing for corresponding exception level */
	val &= drvdata->ns_ex_level;
811
	config->vinst_ctrl |= (val << 20);
812 813 814 815 816 817 818 819 820 821 822
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ns_exlevel_vinst);

static ssize_t addr_idx_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
823
	struct etmv4_config *config = &drvdata->config;
824

825
	val = config->addr_idx;
826 827 828 829 830 831 832 833 834
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_idx_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
835
	struct etmv4_config *config = &drvdata->config;
836 837 838 839 840 841 842 843 844 845 846

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nr_addr_cmp * 2)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
847
	config->addr_idx = val;
848 849 850 851 852 853 854 855 856 857 858 859
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_idx);

static ssize_t addr_instdatatype_show(struct device *dev,
				      struct device_attribute *attr,
				      char *buf)
{
	ssize_t len;
	u8 val, idx;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
860
	struct etmv4_config *config = &drvdata->config;
861 862

	spin_lock(&drvdata->spinlock);
863 864
	idx = config->addr_idx;
	val = BMVAL(config->addr_acc[idx], 0, 1);
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
	len = scnprintf(buf, PAGE_SIZE, "%s\n",
			val == ETM_INSTR_ADDR ? "instr" :
			(val == ETM_DATA_LOAD_ADDR ? "data_load" :
			(val == ETM_DATA_STORE_ADDR ? "data_store" :
			"data_load_store")));
	spin_unlock(&drvdata->spinlock);
	return len;
}

static ssize_t addr_instdatatype_store(struct device *dev,
				       struct device_attribute *attr,
				       const char *buf, size_t size)
{
	u8 idx;
	char str[20] = "";
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
881
	struct etmv4_config *config = &drvdata->config;
882 883 884 885 886 887 888

	if (strlen(buf) >= 20)
		return -EINVAL;
	if (sscanf(buf, "%s", str) != 1)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
889
	idx = config->addr_idx;
890 891
	if (!strcmp(str, "instr"))
		/* TYPE, bits[1:0] */
892
		config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
893 894 895 896 897 898 899 900 901 902 903 904 905

	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_instdatatype);

static ssize_t addr_single_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
906
	struct etmv4_config *config = &drvdata->config;
907

908
	idx = config->addr_idx;
909
	spin_lock(&drvdata->spinlock);
910 911
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
912 913 914
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}
915
	val = (unsigned long)config->addr_val[idx];
916 917 918 919 920 921 922 923 924 925 926
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_single_store(struct device *dev,
				 struct device_attribute *attr,
				 const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
927
	struct etmv4_config *config = &drvdata->config;
928 929 930 931 932

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
933 934 935
	idx = config->addr_idx;
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
936 937 938 939
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

940 941
	config->addr_val[idx] = (u64)val;
	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
942 943 944 945 946 947 948 949 950 951 952 953
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_single);

static ssize_t addr_range_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	u8 idx;
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
954
	struct etmv4_config *config = &drvdata->config;
955 956

	spin_lock(&drvdata->spinlock);
957
	idx = config->addr_idx;
958 959 960 961
	if (idx % 2 != 0) {
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}
962 963 964 965
	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
966 967 968 969
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

970 971
	val1 = (unsigned long)config->addr_val[idx];
	val2 = (unsigned long)config->addr_val[idx + 1];
972 973 974 975 976 977 978 979 980 981 982
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}

static ssize_t addr_range_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 idx;
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
983
	struct etmv4_config *config = &drvdata->config;
984 985 986 987 988 989 990 991

	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
		return -EINVAL;
	/* lower address comparator cannot have a higher address value */
	if (val1 > val2)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
992
	idx = config->addr_idx;
993 994 995 996 997
	if (idx % 2 != 0) {
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

998 999 1000 1001
	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1002 1003 1004 1005
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1006 1007 1008 1009
	config->addr_val[idx] = (u64)val1;
	config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
	config->addr_val[idx + 1] = (u64)val2;
	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1010 1011 1012 1013
	/*
	 * Program include or exclude control bits for vinst or vdata
	 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
	 */
1014
	if (config->mode & ETM_MODE_EXCLUDE)
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
		etm4_set_mode_exclude(drvdata, true);
	else
		etm4_set_mode_exclude(drvdata, false);

	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_range);

static ssize_t addr_start_show(struct device *dev,
			       struct device_attribute *attr,
			       char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1031
	struct etmv4_config *config = &drvdata->config;
1032 1033

	spin_lock(&drvdata->spinlock);
1034
	idx = config->addr_idx;
1035

1036 1037
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1038 1039 1040 1041
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1042
	val = (unsigned long)config->addr_val[idx];
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_start_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1054
	struct etmv4_config *config = &drvdata->config;
1055 1056 1057 1058 1059

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1060
	idx = config->addr_idx;
1061 1062 1063 1064
	if (!drvdata->nr_addr_cmp) {
		spin_unlock(&drvdata->spinlock);
		return -EINVAL;
	}
1065 1066
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1067 1068 1069 1070
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1071 1072 1073
	config->addr_val[idx] = (u64)val;
	config->addr_type[idx] = ETM_ADDR_TYPE_START;
	config->vissctlr |= BIT(idx);
1074
	/* SSSTATUS, bit[9] - turn on start/stop logic */
1075
	config->vinst_ctrl |= BIT(9);
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_start);

static ssize_t addr_stop_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1088
	struct etmv4_config *config = &drvdata->config;
1089 1090

	spin_lock(&drvdata->spinlock);
1091
	idx = config->addr_idx;
1092

1093 1094
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1095 1096 1097 1098
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1099
	val = (unsigned long)config->addr_val[idx];
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_stop_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1111
	struct etmv4_config *config = &drvdata->config;
1112 1113 1114 1115 1116

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1117
	idx = config->addr_idx;
1118 1119 1120 1121
	if (!drvdata->nr_addr_cmp) {
		spin_unlock(&drvdata->spinlock);
		return -EINVAL;
	}
1122 1123
	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
	       config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1124 1125 1126 1127
		spin_unlock(&drvdata->spinlock);
		return -EPERM;
	}

1128 1129 1130
	config->addr_val[idx] = (u64)val;
	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
	config->vissctlr |= BIT(idx + 16);
1131
	/* SSSTATUS, bit[9] - turn on start/stop logic */
1132
	config->vinst_ctrl |= BIT(9);
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_stop);

static ssize_t addr_ctxtype_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
	ssize_t len;
	u8 idx, val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1145
	struct etmv4_config *config = &drvdata->config;
1146 1147

	spin_lock(&drvdata->spinlock);
1148
	idx = config->addr_idx;
1149
	/* CONTEXTTYPE, bits[3:2] */
1150
	val = BMVAL(config->addr_acc[idx], 2, 3);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
	len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
			(val == ETM_CTX_CTXID ? "ctxid" :
			(val == ETM_CTX_VMID ? "vmid" : "all")));
	spin_unlock(&drvdata->spinlock);
	return len;
}

static ssize_t addr_ctxtype_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t size)
{
	u8 idx;
	char str[10] = "";
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1165
	struct etmv4_config *config = &drvdata->config;
1166 1167 1168 1169 1170 1171 1172

	if (strlen(buf) >= 10)
		return -EINVAL;
	if (sscanf(buf, "%s", str) != 1)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1173
	idx = config->addr_idx;
1174 1175
	if (!strcmp(str, "none"))
		/* start by clearing context type bits */
1176
		config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1177 1178 1179
	else if (!strcmp(str, "ctxid")) {
		/* 0b01 The trace unit performs a Context ID */
		if (drvdata->numcidc) {
1180 1181
			config->addr_acc[idx] |= BIT(2);
			config->addr_acc[idx] &= ~BIT(3);
1182 1183 1184 1185
		}
	} else if (!strcmp(str, "vmid")) {
		/* 0b10 The trace unit performs a VMID */
		if (drvdata->numvmidc) {
1186 1187
			config->addr_acc[idx] &= ~BIT(2);
			config->addr_acc[idx] |= BIT(3);
1188 1189 1190 1191 1192 1193 1194
		}
	} else if (!strcmp(str, "all")) {
		/*
		 * 0b11 The trace unit performs a Context ID
		 * comparison and a VMID
		 */
		if (drvdata->numcidc)
1195
			config->addr_acc[idx] |= BIT(2);
1196
		if (drvdata->numvmidc)
1197
			config->addr_acc[idx] |= BIT(3);
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_ctxtype);

static ssize_t addr_context_show(struct device *dev,
				 struct device_attribute *attr,
				 char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1211
	struct etmv4_config *config = &drvdata->config;
1212 1213

	spin_lock(&drvdata->spinlock);
1214
	idx = config->addr_idx;
1215
	/* context ID comparator bits[6:4] */
1216
	val = BMVAL(config->addr_acc[idx], 4, 6);
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t addr_context_store(struct device *dev,
				  struct device_attribute *attr,
				  const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1228
	struct etmv4_config *config = &drvdata->config;
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
		return -EINVAL;
	if (val >=  (drvdata->numcidc >= drvdata->numvmidc ?
		     drvdata->numcidc : drvdata->numvmidc))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1239
	idx = config->addr_idx;
1240
	/* clear context ID comparator bits[6:4] */
1241 1242
	config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
	config->addr_acc[idx] |= (val << 4);
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(addr_context);

static ssize_t seq_idx_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1254
	struct etmv4_config *config = &drvdata->config;
1255

1256
	val = config->seq_idx;
1257 1258 1259 1260 1261 1262 1263 1264 1265
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_idx_store(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1266
	struct etmv4_config *config = &drvdata->config;
1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nrseqstate - 1)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1278
	config->seq_idx = val;
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(seq_idx);

static ssize_t seq_state_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1290
	struct etmv4_config *config = &drvdata->config;
1291

1292
	val = config->seq_state;
1293 1294 1295 1296 1297 1298 1299 1300 1301
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_state_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1302
	struct etmv4_config *config = &drvdata->config;
1303 1304 1305 1306 1307 1308

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nrseqstate)
		return -EINVAL;

1309
	config->seq_state = val;
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
	return size;
}
static DEVICE_ATTR_RW(seq_state);

static ssize_t seq_event_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1321
	struct etmv4_config *config = &drvdata->config;
1322 1323

	spin_lock(&drvdata->spinlock);
1324 1325
	idx = config->seq_idx;
	val = config->seq_ctrl[idx];
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_event_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1337
	struct etmv4_config *config = &drvdata->config;
1338 1339 1340 1341 1342

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1343
	idx = config->seq_idx;
1344
	/* RST, bits[7:0] */
1345
	config->seq_ctrl[idx] = val & 0xFF;
1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(seq_event);

static ssize_t seq_reset_event_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1357
	struct etmv4_config *config = &drvdata->config;
1358

1359
	val = config->seq_rst;
1360 1361 1362 1363 1364 1365 1366 1367 1368
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t seq_reset_event_store(struct device *dev,
				     struct device_attribute *attr,
				     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1369
	struct etmv4_config *config = &drvdata->config;
1370 1371 1372 1373 1374 1375

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (!(drvdata->nrseqstate))
		return -EINVAL;

1376
	config->seq_rst = val & ETMv4_EVENT_MASK;
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
	return size;
}
static DEVICE_ATTR_RW(seq_reset_event);

static ssize_t cntr_idx_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1387
	struct etmv4_config *config = &drvdata->config;
1388

1389
	val = config->cntr_idx;
1390 1391 1392 1393 1394 1395 1396 1397 1398
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntr_idx_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1399
	struct etmv4_config *config = &drvdata->config;
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->nr_cntr)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1411
	config->cntr_idx = val;
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntr_idx);

static ssize_t cntrldvr_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1424
	struct etmv4_config *config = &drvdata->config;
1425 1426

	spin_lock(&drvdata->spinlock);
1427 1428
	idx = config->cntr_idx;
	val = config->cntrldvr[idx];
1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntrldvr_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1440
	struct etmv4_config *config = &drvdata->config;
1441 1442 1443 1444 1445 1446 1447

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val > ETM_CNTR_MAX_VAL)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1448 1449
	idx = config->cntr_idx;
	config->cntrldvr[idx] = val;
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntrldvr);

static ssize_t cntr_val_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1462
	struct etmv4_config *config = &drvdata->config;
1463 1464

	spin_lock(&drvdata->spinlock);
1465 1466
	idx = config->cntr_idx;
	val = config->cntr_val[idx];
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntr_val_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1478
	struct etmv4_config *config = &drvdata->config;
1479 1480 1481 1482 1483 1484 1485

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val > ETM_CNTR_MAX_VAL)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1486 1487
	idx = config->cntr_idx;
	config->cntr_val[idx] = val;
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntr_val);

static ssize_t cntr_ctrl_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1500
	struct etmv4_config *config = &drvdata->config;
1501 1502

	spin_lock(&drvdata->spinlock);
1503 1504
	idx = config->cntr_idx;
	val = config->cntr_ctrl[idx];
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t cntr_ctrl_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1516
	struct etmv4_config *config = &drvdata->config;
1517 1518 1519 1520 1521

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1522 1523
	idx = config->cntr_idx;
	config->cntr_ctrl[idx] = val;
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(cntr_ctrl);

static ssize_t res_idx_show(struct device *dev,
			    struct device_attribute *attr,
			    char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1535
	struct etmv4_config *config = &drvdata->config;
1536

1537
	val = config->res_idx;
1538 1539 1540 1541 1542 1543 1544 1545 1546
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t res_idx_store(struct device *dev,
			     struct device_attribute *attr,
			     const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1547
	struct etmv4_config *config = &drvdata->config;
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	/* Resource selector pair 0 is always implemented and reserved */
	if ((val == 0) || (val >= drvdata->nr_resource))
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1560
	config->res_idx = val;
1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(res_idx);

static ssize_t res_ctrl_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1573
	struct etmv4_config *config = &drvdata->config;
1574 1575

	spin_lock(&drvdata->spinlock);
1576 1577
	idx = config->res_idx;
	val = config->res_ctrl[idx];
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t res_ctrl_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1589
	struct etmv4_config *config = &drvdata->config;
1590 1591 1592 1593 1594

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1595
	idx = config->res_idx;
1596 1597 1598 1599
	/* For odd idx pair inversal bit is RES0 */
	if (idx % 2 != 0)
		/* PAIRINV, bit[21] */
		val &= ~BIT(21);
1600
	config->res_ctrl[idx] = val;
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(res_ctrl);

static ssize_t ctxid_idx_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1612
	struct etmv4_config *config = &drvdata->config;
1613

1614
	val = config->ctxid_idx;
1615 1616 1617 1618 1619 1620 1621 1622 1623
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t ctxid_idx_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1624
	struct etmv4_config *config = &drvdata->config;
1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->numcidc)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1636
	config->ctxid_idx = val;
1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ctxid_idx);

static ssize_t ctxid_pid_show(struct device *dev,
			      struct device_attribute *attr,
			      char *buf)
{
	u8 idx;
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1649
	struct etmv4_config *config = &drvdata->config;
1650 1651

	spin_lock(&drvdata->spinlock);
1652 1653
	idx = config->ctxid_idx;
	val = (unsigned long)config->ctxid_vpid[idx];
1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t ctxid_pid_store(struct device *dev,
			       struct device_attribute *attr,
			       const char *buf, size_t size)
{
	u8 idx;
	unsigned long vpid, pid;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1665
	struct etmv4_config *config = &drvdata->config;
1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679

	/*
	 * only implemented when ctxid tracing is enabled, i.e. at least one
	 * ctxid comparator is implemented and ctxid is greater than 0 bits
	 * in length
	 */
	if (!drvdata->ctxid_size || !drvdata->numcidc)
		return -EINVAL;
	if (kstrtoul(buf, 16, &vpid))
		return -EINVAL;

	pid = coresight_vpid_to_pid(vpid);

	spin_lock(&drvdata->spinlock);
1680 1681 1682
	idx = config->ctxid_idx;
	config->ctxid_pid[idx] = (u64)pid;
	config->ctxid_vpid[idx] = (u64)vpid;
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ctxid_pid);

static ssize_t ctxid_masks_show(struct device *dev,
				struct device_attribute *attr,
				char *buf)
{
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1694
	struct etmv4_config *config = &drvdata->config;
1695 1696

	spin_lock(&drvdata->spinlock);
1697 1698
	val1 = config->ctxid_mask0;
	val2 = config->ctxid_mask1;
1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}

static ssize_t ctxid_masks_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 i, j, maskbyte;
	unsigned long val1, val2, mask;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1710
	struct etmv4_config *config = &drvdata->config;
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729

	/*
	 * only implemented when ctxid tracing is enabled, i.e. at least one
	 * ctxid comparator is implemented and ctxid is greater than 0 bits
	 * in length
	 */
	if (!drvdata->ctxid_size || !drvdata->numcidc)
		return -EINVAL;
	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
	/*
	 * each byte[0..3] controls mask value applied to ctxid
	 * comparator[0..3]
	 */
	switch (drvdata->numcidc) {
	case 0x1:
		/* COMP0, bits[7:0] */
1730
		config->ctxid_mask0 = val1 & 0xFF;
1731 1732 1733
		break;
	case 0x2:
		/* COMP1, bits[15:8] */
1734
		config->ctxid_mask0 = val1 & 0xFFFF;
1735 1736 1737
		break;
	case 0x3:
		/* COMP2, bits[23:16] */
1738
		config->ctxid_mask0 = val1 & 0xFFFFFF;
1739 1740 1741
		break;
	case 0x4:
		 /* COMP3, bits[31:24] */
1742
		config->ctxid_mask0 = val1;
1743 1744 1745
		break;
	case 0x5:
		/* COMP4, bits[7:0] */
1746 1747
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2 & 0xFF;
1748 1749 1750
		break;
	case 0x6:
		/* COMP5, bits[15:8] */
1751 1752
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2 & 0xFFFF;
1753 1754 1755
		break;
	case 0x7:
		/* COMP6, bits[23:16] */
1756 1757
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2 & 0xFFFFFF;
1758 1759 1760
		break;
	case 0x8:
		/* COMP7, bits[31:24] */
1761 1762
		config->ctxid_mask0 = val1;
		config->ctxid_mask1 = val2;
1763 1764 1765 1766 1767 1768 1769 1770 1771 1772
		break;
	default:
		break;
	}
	/*
	 * If software sets a mask bit to 1, it must program relevant byte
	 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
	 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
	 * of ctxid comparator0 value (corresponding to byte 0) register.
	 */
1773
	mask = config->ctxid_mask0;
1774 1775 1776 1777 1778 1779 1780 1781 1782
	for (i = 0; i < drvdata->numcidc; i++) {
		/* mask value of corresponding ctxid comparator */
		maskbyte = mask & ETMv4_EVENT_MASK;
		/*
		 * each bit corresponds to a byte of respective ctxid comparator
		 * value register
		 */
		for (j = 0; j < 8; j++) {
			if (maskbyte & 1)
1783
				config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1784 1785 1786 1787 1788
			maskbyte >>= 1;
		}
		/* Select the next ctxid comparator mask value */
		if (i == 3)
			/* ctxid comparators[4-7] */
1789
			mask = config->ctxid_mask1;
1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804
		else
			mask >>= 0x8;
	}

	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(ctxid_masks);

static ssize_t vmid_idx_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1805
	struct etmv4_config *config = &drvdata->config;
1806

1807
	val = config->vmid_idx;
1808 1809 1810 1811 1812 1813 1814 1815 1816
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t vmid_idx_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1817
	struct etmv4_config *config = &drvdata->config;
1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828

	if (kstrtoul(buf, 16, &val))
		return -EINVAL;
	if (val >= drvdata->numvmidc)
		return -EINVAL;

	/*
	 * Use spinlock to ensure index doesn't change while it gets
	 * dereferenced multiple times within a spinlock block elsewhere.
	 */
	spin_lock(&drvdata->spinlock);
1829
	config->vmid_idx = val;
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(vmid_idx);

static ssize_t vmid_val_show(struct device *dev,
			     struct device_attribute *attr,
			     char *buf)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1841
	struct etmv4_config *config = &drvdata->config;
1842

1843
	val = (unsigned long)config->vmid_val[config->vmid_idx];
1844 1845 1846 1847 1848 1849 1850 1851 1852
	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}

static ssize_t vmid_val_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t size)
{
	unsigned long val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1853
	struct etmv4_config *config = &drvdata->config;
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864

	/*
	 * only implemented when vmid tracing is enabled, i.e. at least one
	 * vmid comparator is implemented and at least 8 bit vmid size
	 */
	if (!drvdata->vmid_size || !drvdata->numvmidc)
		return -EINVAL;
	if (kstrtoul(buf, 16, &val))
		return -EINVAL;

	spin_lock(&drvdata->spinlock);
1865
	config->vmid_val[config->vmid_idx] = (u64)val;
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(vmid_val);

static ssize_t vmid_masks_show(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	unsigned long val1, val2;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1876
	struct etmv4_config *config = &drvdata->config;
1877 1878

	spin_lock(&drvdata->spinlock);
1879 1880
	val1 = config->vmid_mask0;
	val2 = config->vmid_mask1;
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891
	spin_unlock(&drvdata->spinlock);
	return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
}

static ssize_t vmid_masks_store(struct device *dev,
				struct device_attribute *attr,
				const char *buf, size_t size)
{
	u8 i, j, maskbyte;
	unsigned long val1, val2, mask;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1892 1893
	struct etmv4_config *config = &drvdata->config;

1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911
	/*
	 * only implemented when vmid tracing is enabled, i.e. at least one
	 * vmid comparator is implemented and at least 8 bit vmid size
	 */
	if (!drvdata->vmid_size || !drvdata->numvmidc)
		return -EINVAL;
	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
		return -EINVAL;

	spin_lock(&drvdata->spinlock);

	/*
	 * each byte[0..3] controls mask value applied to vmid
	 * comparator[0..3]
	 */
	switch (drvdata->numvmidc) {
	case 0x1:
		/* COMP0, bits[7:0] */
1912
		config->vmid_mask0 = val1 & 0xFF;
1913 1914 1915
		break;
	case 0x2:
		/* COMP1, bits[15:8] */
1916
		config->vmid_mask0 = val1 & 0xFFFF;
1917 1918 1919
		break;
	case 0x3:
		/* COMP2, bits[23:16] */
1920
		config->vmid_mask0 = val1 & 0xFFFFFF;
1921 1922 1923
		break;
	case 0x4:
		/* COMP3, bits[31:24] */
1924
		config->vmid_mask0 = val1;
1925 1926 1927
		break;
	case 0x5:
		/* COMP4, bits[7:0] */
1928 1929
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2 & 0xFF;
1930 1931 1932
		break;
	case 0x6:
		/* COMP5, bits[15:8] */
1933 1934
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2 & 0xFFFF;
1935 1936 1937
		break;
	case 0x7:
		/* COMP6, bits[23:16] */
1938 1939
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2 & 0xFFFFFF;
1940 1941 1942
		break;
	case 0x8:
		/* COMP7, bits[31:24] */
1943 1944
		config->vmid_mask0 = val1;
		config->vmid_mask1 = val2;
1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
		break;
	default:
		break;
	}

	/*
	 * If software sets a mask bit to 1, it must program relevant byte
	 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
	 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
	 * of vmid comparator0 value (corresponding to byte 0) register.
	 */
1956
	mask = config->vmid_mask0;
1957 1958 1959 1960 1961 1962 1963 1964 1965
	for (i = 0; i < drvdata->numvmidc; i++) {
		/* mask value of corresponding vmid comparator */
		maskbyte = mask & ETMv4_EVENT_MASK;
		/*
		 * each bit corresponds to a byte of respective vmid comparator
		 * value register
		 */
		for (j = 0; j < 8; j++) {
			if (maskbyte & 1)
1966
				config->vmid_val[i] &= ~(0xFF << (j * 8));
1967 1968 1969 1970 1971
			maskbyte >>= 1;
		}
		/* Select the next vmid comparator mask value */
		if (i == 3)
			/* vmid comparators[4-7] */
1972
			mask = config->vmid_mask1;
1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
		else
			mask >>= 0x8;
	}
	spin_unlock(&drvdata->spinlock);
	return size;
}
static DEVICE_ATTR_RW(vmid_masks);

static ssize_t cpu_show(struct device *dev,
			struct device_attribute *attr, char *buf)
{
	int val;
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);

	val = drvdata->cpu;
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);

}
static DEVICE_ATTR_RO(cpu);

static struct attribute *coresight_etmv4_attrs[] = {
	&dev_attr_nr_pe_cmp.attr,
	&dev_attr_nr_addr_cmp.attr,
	&dev_attr_nr_cntr.attr,
	&dev_attr_nr_ext_inp.attr,
	&dev_attr_numcidc.attr,
	&dev_attr_numvmidc.attr,
	&dev_attr_nrseqstate.attr,
	&dev_attr_nr_resource.attr,
	&dev_attr_nr_ss_cmp.attr,
	&dev_attr_reset.attr,
	&dev_attr_mode.attr,
	&dev_attr_pe.attr,
	&dev_attr_event.attr,
	&dev_attr_event_instren.attr,
	&dev_attr_event_ts.attr,
	&dev_attr_syncfreq.attr,
	&dev_attr_cyc_threshold.attr,
	&dev_attr_bb_ctrl.attr,
	&dev_attr_event_vinst.attr,
	&dev_attr_s_exlevel_vinst.attr,
	&dev_attr_ns_exlevel_vinst.attr,
	&dev_attr_addr_idx.attr,
	&dev_attr_addr_instdatatype.attr,
	&dev_attr_addr_single.attr,
	&dev_attr_addr_range.attr,
	&dev_attr_addr_start.attr,
	&dev_attr_addr_stop.attr,
	&dev_attr_addr_ctxtype.attr,
	&dev_attr_addr_context.attr,
	&dev_attr_seq_idx.attr,
	&dev_attr_seq_state.attr,
	&dev_attr_seq_event.attr,
	&dev_attr_seq_reset_event.attr,
	&dev_attr_cntr_idx.attr,
	&dev_attr_cntrldvr.attr,
	&dev_attr_cntr_val.attr,
	&dev_attr_cntr_ctrl.attr,
	&dev_attr_res_idx.attr,
	&dev_attr_res_ctrl.attr,
	&dev_attr_ctxid_idx.attr,
	&dev_attr_ctxid_pid.attr,
	&dev_attr_ctxid_masks.attr,
	&dev_attr_vmid_idx.attr,
	&dev_attr_vmid_val.attr,
	&dev_attr_vmid_masks.attr,
	&dev_attr_cpu.attr,
	NULL,
};

2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068
struct etmv4_reg {
	void __iomem *addr;
	u32 data;
};

static void do_smp_cross_read(void *data)
{
	struct etmv4_reg *reg = data;

	reg->data = readl_relaxed(reg->addr);
}

static u32 etmv4_cross_read(const struct device *dev, u32 offset)
{
	struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
	struct etmv4_reg reg;

	reg.addr = drvdata->base + offset;
	/*
	 * smp cross call ensures the CPU will be powered up before
	 * accessing the ETMv4 trace core registers
	 */
	smp_call_function_single(drvdata->cpu, do_smp_cross_read, &reg, 1);
	return reg.data;
}

2069 2070
#define coresight_etm4x_reg(name, offset)			\
	coresight_simple_reg32(struct etmv4_drvdata, name, offset)
2071 2072 2073 2074

#define coresight_etm4x_cross_read(name, offset)			\
	coresight_simple_func(struct etmv4_drvdata, etmv4_cross_read,	\
			      name, offset)
2075

2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
coresight_etm4x_reg(trcpdcr, TRCPDCR);
coresight_etm4x_reg(trcpdsr, TRCPDSR);
coresight_etm4x_reg(trclsr, TRCLSR);
coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS);
coresight_etm4x_reg(trcdevid, TRCDEVID);
coresight_etm4x_reg(trcdevtype, TRCDEVTYPE);
coresight_etm4x_reg(trcpidr0, TRCPIDR0);
coresight_etm4x_reg(trcpidr1, TRCPIDR1);
coresight_etm4x_reg(trcpidr2, TRCPIDR2);
coresight_etm4x_reg(trcpidr3, TRCPIDR3);
2086 2087 2088
coresight_etm4x_cross_read(trcoslsr, TRCOSLSR);
coresight_etm4x_cross_read(trcconfig, TRCCONFIGR);
coresight_etm4x_cross_read(trctraceid, TRCTRACEIDR);
2089 2090 2091 2092 2093 2094

static struct attribute *coresight_etmv4_mgmt_attrs[] = {
	&dev_attr_trcoslsr.attr,
	&dev_attr_trcpdcr.attr,
	&dev_attr_trcpdsr.attr,
	&dev_attr_trclsr.attr,
2095 2096
	&dev_attr_trcconfig.attr,
	&dev_attr_trctraceid.attr,
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
	&dev_attr_trcauthstatus.attr,
	&dev_attr_trcdevid.attr,
	&dev_attr_trcdevtype.attr,
	&dev_attr_trcpidr0.attr,
	&dev_attr_trcpidr1.attr,
	&dev_attr_trcpidr2.attr,
	&dev_attr_trcpidr3.attr,
	NULL,
};

2107 2108 2109 2110 2111 2112
coresight_etm4x_cross_read(trcidr0, TRCIDR0);
coresight_etm4x_cross_read(trcidr1, TRCIDR1);
coresight_etm4x_cross_read(trcidr2, TRCIDR2);
coresight_etm4x_cross_read(trcidr3, TRCIDR3);
coresight_etm4x_cross_read(trcidr4, TRCIDR4);
coresight_etm4x_cross_read(trcidr5, TRCIDR5);
2113
/* trcidr[6,7] are reserved */
2114 2115 2116 2117 2118 2119
coresight_etm4x_cross_read(trcidr8, TRCIDR8);
coresight_etm4x_cross_read(trcidr9, TRCIDR9);
coresight_etm4x_cross_read(trcidr10, TRCIDR10);
coresight_etm4x_cross_read(trcidr11, TRCIDR11);
coresight_etm4x_cross_read(trcidr12, TRCIDR12);
coresight_etm4x_cross_read(trcidr13, TRCIDR13);
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157

static struct attribute *coresight_etmv4_trcidr_attrs[] = {
	&dev_attr_trcidr0.attr,
	&dev_attr_trcidr1.attr,
	&dev_attr_trcidr2.attr,
	&dev_attr_trcidr3.attr,
	&dev_attr_trcidr4.attr,
	&dev_attr_trcidr5.attr,
	/* trcidr[6,7] are reserved */
	&dev_attr_trcidr8.attr,
	&dev_attr_trcidr9.attr,
	&dev_attr_trcidr10.attr,
	&dev_attr_trcidr11.attr,
	&dev_attr_trcidr12.attr,
	&dev_attr_trcidr13.attr,
	NULL,
};

static const struct attribute_group coresight_etmv4_group = {
	.attrs = coresight_etmv4_attrs,
};

static const struct attribute_group coresight_etmv4_mgmt_group = {
	.attrs = coresight_etmv4_mgmt_attrs,
	.name = "mgmt",
};

static const struct attribute_group coresight_etmv4_trcidr_group = {
	.attrs = coresight_etmv4_trcidr_attrs,
	.name = "trcidr",
};

const struct attribute_group *coresight_etmv4_groups[] = {
	&coresight_etmv4_group,
	&coresight_etmv4_mgmt_group,
	&coresight_etmv4_trcidr_group,
	NULL,
};