coresight-tmc-etr.c 8.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * Copyright(C) 2016 Linaro Limited. All rights reserved.
 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
 */

#include <linux/coresight.h>
8
#include <linux/dma-mapping.h>
9 10 11
#include "coresight-priv.h"
#include "coresight-tmc.h"

12
static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
13
{
14
	u32 axictl, sts;
15 16 17 18 19 20 21 22 23 24

	CS_UNLOCK(drvdata->base);

	/* Wait for TMCSReady bit to be set */
	tmc_wait_for_tmcready(drvdata);

	writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);

	axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
25 26 27
	axictl &= ~TMC_AXICTL_CLEAR_MASK;
	axictl |= (TMC_AXICTL_PROT_CTL_B1 | TMC_AXICTL_WR_BURST_16);
	axictl |= TMC_AXICTL_AXCACHE_OS;
28 29 30 31 32 33

	if (tmc_etr_has_cap(drvdata, TMC_ETR_AXI_ARCACHE)) {
		axictl &= ~TMC_AXICTL_ARCACHE_MASK;
		axictl |= TMC_AXICTL_ARCACHE_OS;
	}

34
	writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
35
	tmc_write_dba(drvdata, drvdata->paddr);
36 37 38 39 40 41 42 43 44 45 46
	/*
	 * If the TMC pointers must be programmed before the session,
	 * we have to set it properly (i.e, RRP/RWP to base address and
	 * STS to "not full").
	 */
	if (tmc_etr_has_cap(drvdata, TMC_ETR_SAVE_RESTORE)) {
		tmc_write_rrp(drvdata, drvdata->paddr);
		tmc_write_rwp(drvdata, drvdata->paddr);
		sts = readl_relaxed(drvdata->base + TMC_STS) & ~TMC_STS_FULL;
		writel_relaxed(sts, drvdata->base + TMC_STS);
	}
47 48 49 50 51 52 53 54 55 56 57

	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
		       TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
		       TMC_FFCR_TRIGON_TRIGIN,
		       drvdata->base + TMC_FFCR);
	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
	tmc_enable_hw(drvdata);

	CS_LOCK(drvdata->base);
}

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * Return the available trace data in the buffer @pos, with a maximum
 * limit of @len, also updating the @bufpp on where to find it.
 */
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
				loff_t pos, size_t len, char **bufpp)
{
	ssize_t actual = len;
	char *bufp = drvdata->buf + pos;
	char *bufend = (char *)(drvdata->vaddr + drvdata->size);

	/* Adjust the len to available size @pos */
	if (pos + actual > drvdata->len)
		actual = drvdata->len - pos;

	if (actual <= 0)
		return actual;

	/*
	 * Since we use a circular buffer, with trace data starting
	 * @drvdata->buf, possibly anywhere in the buffer @drvdata->vaddr,
	 * wrap the current @pos to within the buffer.
	 */
	if (bufp >= bufend)
		bufp -= drvdata->size;
	/*
	 * For simplicity, avoid copying over a wrapped around buffer.
	 */
	if ((bufp + actual) > bufend)
		actual = bufend - bufp;
	*bufpp = bufp;
	return actual;
}

92 93
static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
{
94
	const u32 *barrier;
95
	u32 val;
96
	u32 *temp;
97
	u64 rwp;
98

99
	rwp = tmc_read_rwp(drvdata);
100 101
	val = readl_relaxed(drvdata->base + TMC_STS);

102 103 104 105
	/*
	 * Adjust the buffer to point to the beginning of the trace data
	 * and update the available trace data.
	 */
106
	if (val & TMC_STS_FULL) {
107
		drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
108
		drvdata->len = drvdata->size;
109 110 111 112 113 114 115 116 117 118

		barrier = barrier_pkt;
		temp = (u32 *)drvdata->buf;

		while (*barrier) {
			*temp = *barrier;
			temp++;
			barrier++;
		}

119
	} else {
120
		drvdata->buf = drvdata->vaddr;
121 122
		drvdata->len = rwp - drvdata->paddr;
	}
123 124
}

125
static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
126 127 128 129
{
	CS_UNLOCK(drvdata->base);

	tmc_flush_and_stop(drvdata);
130 131 132 133
	/*
	 * When operating in sysFS mode the content of the buffer needs to be
	 * read before the TMC is disabled.
	 */
134
	if (drvdata->mode == CS_MODE_SYSFS)
135
		tmc_etr_dump_hw(drvdata);
136 137 138 139 140
	tmc_disable_hw(drvdata);

	CS_LOCK(drvdata->base);
}

141
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
142
{
143 144
	int ret = 0;
	bool used = false;
145
	unsigned long flags;
146
	void __iomem *vaddr = NULL;
147
	dma_addr_t paddr = 0;
148 149
	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

150 151 152 153
	/*
	 * If we don't have a buffer release the lock and allocate memory.
	 * Otherwise keep the lock and move along.
	 */
154
	spin_lock_irqsave(&drvdata->spinlock, flags);
155
	if (!drvdata->vaddr) {
156
		spin_unlock_irqrestore(&drvdata->spinlock, flags);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

		/*
		 * Contiguous  memory can't be allocated while a spinlock is
		 * held.  As such allocate memory here and free it if a buffer
		 * has already been allocated (from a previous session).
		 */
		vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
					   &paddr, GFP_KERNEL);
		if (!vaddr)
			return -ENOMEM;

		/* Let's try again */
		spin_lock_irqsave(&drvdata->spinlock, flags);
	}

	if (drvdata->reading) {
		ret = -EBUSY;
		goto out;
	}

177 178 179 180 181
	/*
	 * In sysFS mode we can have multiple writers per sink.  Since this
	 * sink is already enabled no memory is needed and the HW need not be
	 * touched.
	 */
182
	if (drvdata->mode == CS_MODE_SYSFS)
183 184
		goto out;

185
	/*
186
	 * If drvdata::vaddr == NULL, use the memory allocated above.
187 188 189
	 * Otherwise a buffer still exists from a previous session, so
	 * simply use that.
	 */
190
	if (drvdata->vaddr == NULL) {
191 192 193 194
		used = true;
		drvdata->vaddr = vaddr;
		drvdata->paddr = paddr;
		drvdata->buf = drvdata->vaddr;
195 196
	}

197
	drvdata->mode = CS_MODE_SYSFS;
198
	tmc_etr_enable_hw(drvdata);
199
out:
200 201
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

202 203 204 205 206 207 208 209
	/* Free memory outside the spinlock if need be */
	if (!used && vaddr)
		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);

	if (!ret)
		dev_info(drvdata->dev, "TMC-ETR enabled\n");

	return ret;
210 211
}

212
static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
213
{
214 215
	/* We don't support perf mode yet ! */
	return -EINVAL;
216 217 218 219 220 221
}

static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
{
	switch (mode) {
	case CS_MODE_SYSFS:
222
		return tmc_enable_etr_sink_sysfs(csdev);
223
	case CS_MODE_PERF:
224
		return tmc_enable_etr_sink_perf(csdev);
225 226 227 228 229 230
	}

	/* We shouldn't be here */
	return -EINVAL;
}

231 232 233 234 235 236 237 238 239 240 241
static void tmc_disable_etr_sink(struct coresight_device *csdev)
{
	unsigned long flags;
	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

	spin_lock_irqsave(&drvdata->spinlock, flags);
	if (drvdata->reading) {
		spin_unlock_irqrestore(&drvdata->spinlock, flags);
		return;
	}

242
	/* Disable the TMC only if it needs to */
243
	if (drvdata->mode != CS_MODE_DISABLED) {
244
		tmc_etr_disable_hw(drvdata);
245 246
		drvdata->mode = CS_MODE_DISABLED;
	}
247

248 249 250 251 252 253 254 255 256 257 258 259 260
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

	dev_info(drvdata->dev, "TMC-ETR disabled\n");
}

static const struct coresight_ops_sink tmc_etr_sink_ops = {
	.enable		= tmc_enable_etr_sink,
	.disable	= tmc_disable_etr_sink,
};

const struct coresight_ops tmc_etr_cs_ops = {
	.sink_ops	= &tmc_etr_sink_ops,
};
261 262 263

int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
{
264
	int ret = 0;
265 266 267 268 269 270 271
	unsigned long flags;

	/* config types are set a boot time and never change */
	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
		return -EINVAL;

	spin_lock_irqsave(&drvdata->spinlock, flags);
272 273 274 275
	if (drvdata->reading) {
		ret = -EBUSY;
		goto out;
	}
276

277
	/* Don't interfere if operated from Perf */
278
	if (drvdata->mode == CS_MODE_PERF) {
279 280 281 282
		ret = -EINVAL;
		goto out;
	}

283 284 285 286 287 288
	/* If drvdata::buf is NULL the trace data has been read already */
	if (drvdata->buf == NULL) {
		ret = -EINVAL;
		goto out;
	}

289
	/* Disable the TMC if need be */
290
	if (drvdata->mode == CS_MODE_SYSFS)
291 292 293
		tmc_etr_disable_hw(drvdata);

	drvdata->reading = true;
294
out:
295 296
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

297
	return ret;
298 299 300 301 302
}

int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
	unsigned long flags;
303 304
	dma_addr_t paddr;
	void __iomem *vaddr = NULL;
305 306 307 308 309 310 311 312

	/* config types are set a boot time and never change */
	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
		return -EINVAL;

	spin_lock_irqsave(&drvdata->spinlock, flags);

	/* RE-enable the TMC if need be */
313
	if (drvdata->mode == CS_MODE_SYSFS) {
314 315
		/*
		 * The trace run will continue with the same allocated trace
316 317
		 * buffer. Since the tracer is still enabled drvdata::buf can't
		 * be NULL.
318
		 */
319
		tmc_etr_enable_hw(drvdata);
320 321 322 323 324 325 326
	} else {
		/*
		 * The ETR is not tracing and the buffer was just read.
		 * As such prepare to free the trace buffer.
		 */
		vaddr = drvdata->vaddr;
		paddr = drvdata->paddr;
327
		drvdata->buf = drvdata->vaddr = NULL;
328
	}
329 330 331 332

	drvdata->reading = false;
	spin_unlock_irqrestore(&drvdata->spinlock, flags);

333 334 335 336
	/* Free allocated memory out side of the spinlock */
	if (vaddr)
		dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);

337 338
	return 0;
}