remoteproc_core.c 41.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * Remote Processor Framework
 *
 * Copyright (C) 2011 Texas Instruments, Inc.
 * Copyright (C) 2011 Google, Inc.
 *
 * Ohad Ben-Cohen <ohad@wizery.com>
 * Brian Swetland <swetland@google.com>
 * Mark Grosen <mgrosen@ti.com>
 * Fernando Guzman Lugo <fernando.lugo@ti.com>
 * Suman Anna <s-anna@ti.com>
 * Robert Tivy <rtivy@ti.com>
 * Armando Uribe De Leon <x0095078@ti.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#define pr_fmt(fmt)    "%s: " fmt, __func__

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
#include <linux/remoteproc.h>
#include <linux/iommu.h>
38
#include <linux/idr.h>
39
#include <linux/elf.h>
40
#include <linux/crc32.h>
41 42
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
43
#include <asm/byteorder.h>
44 45 46

#include "remoteproc_internal.h"

47 48 49
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);

50
typedef int (*rproc_handle_resources_t)(struct rproc *rproc,
51
				struct resource_table *table, int len);
52 53
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
				 void *, int offset, int avail);
54

55 56 57
/* Unique indices for remoteproc devices */
static DEFINE_IDA(rproc_dev_index);

58 59
static const char * const rproc_crash_names[] = {
	[RPROC_MMUFAULT]	= "mmufault",
60 61
	[RPROC_WATCHDOG]	= "watchdog",
	[RPROC_FATAL_ERROR]	= "fatal error",
62 63 64 65 66 67 68
};

/* translate rproc_crash_type to string */
static const char *rproc_crash_to_string(enum rproc_crash_type type)
{
	if (type < ARRAY_SIZE(rproc_crash_names))
		return rproc_crash_names[type];
69
	return "unknown";
70 71
}

72 73 74 75 76 77 78 79 80
/*
 * This is the IOMMU fault handler we register with the IOMMU API
 * (when relevant; not all remote processors access memory through
 * an IOMMU).
 *
 * IOMMU core will invoke this handler whenever the remote processor
 * will try to access an unmapped device address.
 */
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
81
		unsigned long iova, int flags, void *token)
82
{
83 84
	struct rproc *rproc = token;

85 86
	dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);

87 88
	rproc_report_crash(rproc, RPROC_MMUFAULT);

89 90
	/*
	 * Let the iommu core know we're not really handling this fault;
91
	 * we just used it as a recovery trigger.
92 93 94 95 96 97 98
	 */
	return -ENOSYS;
}

static int rproc_enable_iommu(struct rproc *rproc)
{
	struct iommu_domain *domain;
99
	struct device *dev = rproc->dev.parent;
100 101
	int ret;

102 103
	if (!rproc->has_iommu) {
		dev_dbg(dev, "iommu not present\n");
104
		return 0;
105 106 107 108 109 110 111 112
	}

	domain = iommu_domain_alloc(dev->bus);
	if (!domain) {
		dev_err(dev, "can't alloc iommu domain\n");
		return -ENOMEM;
	}

113
	iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132

	ret = iommu_attach_device(domain, dev);
	if (ret) {
		dev_err(dev, "can't attach iommu device: %d\n", ret);
		goto free_domain;
	}

	rproc->domain = domain;

	return 0;

free_domain:
	iommu_domain_free(domain);
	return ret;
}

static void rproc_disable_iommu(struct rproc *rproc)
{
	struct iommu_domain *domain = rproc->domain;
133
	struct device *dev = rproc->dev.parent;
134 135 136 137 138 139 140 141

	if (!domain)
		return;

	iommu_detach_device(domain, dev);
	iommu_domain_free(domain);
}

142 143 144 145 146 147
/**
 * rproc_da_to_va() - lookup the kernel virtual address for a remoteproc address
 * @rproc: handle of a remote processor
 * @da: remoteproc device address to translate
 * @len: length of the memory region @da is pointing to
 *
148 149
 * Some remote processors will ask us to allocate them physically contiguous
 * memory regions (which we call "carveouts"), and map them to specific
150 151 152
 * device addresses (which are hardcoded in the firmware). They may also have
 * dedicated memory regions internal to the processors, and use them either
 * exclusively or alongside carveouts.
153 154 155 156 157
 *
 * They may then ask us to copy objects into specific device addresses (e.g.
 * code/data sections) or expose us certain symbols in other device address
 * (e.g. their trace buffer).
 *
158 159 160 161 162 163 164
 * This function is a helper function with which we can go over the allocated
 * carveouts and translate specific device addresses to kernel virtual addresses
 * so we can access the referenced memory. This function also allows to perform
 * translations on the internal remoteproc memory regions through a platform
 * implementation specific da_to_va ops, if present.
 *
 * The function returns a valid kernel address on success or NULL on failure.
165 166 167
 *
 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
 * but only on kernel direct mapped RAM memory. Instead, we're just using
168 169
 * here the output of the DMA API for the carveouts, which should be more
 * correct.
170
 */
171
void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
172 173 174 175
{
	struct rproc_mem_entry *carveout;
	void *ptr = NULL;

176 177 178 179 180 181
	if (rproc->ops->da_to_va) {
		ptr = rproc->ops->da_to_va(rproc, da, len);
		if (ptr)
			goto out;
	}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	list_for_each_entry(carveout, &rproc->carveouts, node) {
		int offset = da - carveout->da;

		/* try next carveout if da is too small */
		if (offset < 0)
			continue;

		/* try next carveout if da is too large */
		if (offset + len > carveout->len)
			continue;

		ptr = carveout->va + offset;

		break;
	}

198
out:
199 200
	return ptr;
}
201
EXPORT_SYMBOL(rproc_da_to_va);
202

203
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
204
{
205
	struct rproc *rproc = rvdev->rproc;
206
	struct device *dev = &rproc->dev;
207
	struct rproc_vring *rvring = &rvdev->vring[i];
208
	struct fw_rsc_vdev *rsc;
209 210 211
	dma_addr_t dma;
	void *va;
	int ret, size, notifyid;
212

213
	/* actual size of vring (in bytes) */
214
	size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
215 216 217 218 219

	/*
	 * Allocate non-cacheable memory for the vring. In the future
	 * this call will also configure the IOMMU for us
	 */
220
	va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
221
	if (!va) {
222
		dev_err(dev->parent, "dma_alloc_coherent failed\n");
223 224 225
		return -EINVAL;
	}

226 227 228 229 230
	/*
	 * Assign an rproc-wide unique index for this vring
	 * TODO: assign a notifyid for rvdev updates as well
	 * TODO: support predefined notifyids (via resource table)
	 */
T
Tejun Heo 已提交
231
	ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
232
	if (ret < 0) {
T
Tejun Heo 已提交
233
		dev_err(dev, "idr_alloc failed: %d\n", ret);
234
		dma_free_coherent(dev->parent, size, va, dma);
235 236
		return ret;
	}
T
Tejun Heo 已提交
237
	notifyid = ret;
238

239 240
	dev_dbg(dev, "vring%d: va %p dma %pad size %x idr %d\n",
		i, va, &dma, size, notifyid);
241

242 243 244
	rvring->va = va;
	rvring->dma = dma;
	rvring->notifyid = notifyid;
245

246 247 248 249 250 251 252 253 254
	/*
	 * Let the rproc know the notifyid and da of this vring.
	 * Not all platforms use dma_alloc_coherent to automatically
	 * set up the iommu. In this case the device address (da) will
	 * hold the physical address and not the device address.
	 */
	rsc = (void *)rproc->table_ptr + rvdev->rsc_offset;
	rsc->vring[i].da = dma;
	rsc->vring[i].notifyid = notifyid;
255 256 257
	return 0;
}

258 259
static int
rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
260 261
{
	struct rproc *rproc = rvdev->rproc;
262
	struct device *dev = &rproc->dev;
263 264
	struct fw_rsc_vdev_vring *vring = &rsc->vring[i];
	struct rproc_vring *rvring = &rvdev->vring[i];
265

266 267
	dev_dbg(dev, "vdev rsc: vring%d: da %x, qsz %d, align %d\n",
				i, vring->da, vring->num, vring->align);
268

269 270 271 272 273
	/* make sure reserved bytes are zeroes */
	if (vring->reserved) {
		dev_err(dev, "vring rsc has non zero reserved bytes\n");
		return -EINVAL;
	}
274

275 276 277 278 279
	/* verify queue size and vring alignment are sane */
	if (!vring->num || !vring->align) {
		dev_err(dev, "invalid qsz (%d) or alignment (%d)\n",
						vring->num, vring->align);
		return -EINVAL;
280
	}
281 282 283 284 285 286 287 288 289 290 291 292

	rvring->len = vring->num;
	rvring->align = vring->align;
	rvring->rvdev = rvdev;

	return 0;
}

void rproc_free_vring(struct rproc_vring *rvring)
{
	int size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
	struct rproc *rproc = rvring->rvdev->rproc;
293 294
	int idx = rvring->rvdev->vring - rvring;
	struct fw_rsc_vdev *rsc;
295

296
	dma_free_coherent(rproc->dev.parent, size, rvring->va, rvring->dma);
297
	idr_remove(&rproc->notifyids, rvring->notifyid);
298

299 300 301 302
	/* reset resource entry info */
	rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
	rsc->vring[idx].da = 0;
	rsc->vring[idx].notifyid = -1;
303 304
}

305
/**
306
 * rproc_handle_vdev() - handle a vdev fw resource
307 308
 * @rproc: the remote processor
 * @rsc: the vring resource descriptor
309
 * @avail: size of available data (for sanity checking the image)
310
 *
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
 * This resource entry requests the host to statically register a virtio
 * device (vdev), and setup everything needed to support it. It contains
 * everything needed to make it possible: the virtio device id, virtio
 * device features, vrings information, virtio config space, etc...
 *
 * Before registering the vdev, the vrings are allocated from non-cacheable
 * physically contiguous memory. Currently we only support two vrings per
 * remote processor (temporary limitation). We might also want to consider
 * doing the vring allocation only later when ->find_vqs() is invoked, and
 * then release them upon ->del_vqs().
 *
 * Note: @da is currently not really handled correctly: we dynamically
 * allocate it using the DMA API, ignoring requested hard coded addresses,
 * and we don't take care of any required IOMMU programming. This is all
 * going to be taken care of when the generic iommu-based DMA API will be
 * merged. Meanwhile, statically-addressed iommu-based firmware images should
 * use RSC_DEVMEM resource entries to map their required @da to the physical
 * address of their base CMA region (ouch, hacky!).
329 330 331
 *
 * Returns 0 on success, or an appropriate error code otherwise
 */
332
static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
333
							int offset, int avail)
334
{
335
	struct device *dev = &rproc->dev;
336 337
	struct rproc_vdev *rvdev;
	int i, ret;
338

339 340 341
	/* make sure resource isn't truncated */
	if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
			+ rsc->config_len > avail) {
342
		dev_err(dev, "vdev rsc is truncated\n");
343 344 345
		return -EINVAL;
	}

346 347 348
	/* make sure reserved bytes are zeroes */
	if (rsc->reserved[0] || rsc->reserved[1]) {
		dev_err(dev, "vdev rsc has non zero reserved bytes\n");
349 350 351
		return -EINVAL;
	}

352 353 354
	dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n",
		rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings);

355 356
	/* we currently support only two vrings per rvdev */
	if (rsc->num_of_vrings > ARRAY_SIZE(rvdev->vring)) {
357
		dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings);
358 359 360
		return -EINVAL;
	}

361
	rvdev = kzalloc(sizeof(*rvdev), GFP_KERNEL);
362 363
	if (!rvdev)
		return -ENOMEM;
364

365
	rvdev->rproc = rproc;
366

367
	/* parse the vrings */
368
	for (i = 0; i < rsc->num_of_vrings; i++) {
369
		ret = rproc_parse_vring(rvdev, rsc, i);
370
		if (ret)
371
			goto free_rvdev;
372
	}
373

374 375
	/* remember the resource offset*/
	rvdev->rsc_offset = offset;
376

377
	list_add_tail(&rvdev->node, &rproc->rvdevs);
378

379 380 381
	/* it is now safe to add the virtio device */
	ret = rproc_add_virtio_dev(rvdev, rsc->id);
	if (ret)
382
		goto remove_rvdev;
383 384

	return 0;
385

386 387
remove_rvdev:
	list_del(&rvdev->node);
388
free_rvdev:
389 390
	kfree(rvdev);
	return ret;
391 392 393 394 395 396
}

/**
 * rproc_handle_trace() - handle a shared trace buffer resource
 * @rproc: the remote processor
 * @rsc: the trace resource descriptor
397
 * @avail: size of available data (for sanity checking the image)
398 399 400 401 402 403 404 405 406 407 408
 *
 * In case the remote processor dumps trace logs into memory,
 * export it via debugfs.
 *
 * Currently, the 'da' member of @rsc should contain the device address
 * where the remote processor is dumping the traces. Later we could also
 * support dynamically allocating this address using the generic
 * DMA API (but currently there isn't a use case for that).
 *
 * Returns 0 on success, or an appropriate error code otherwise
 */
409
static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
410
							int offset, int avail)
411 412
{
	struct rproc_mem_entry *trace;
413
	struct device *dev = &rproc->dev;
414 415 416
	void *ptr;
	char name[15];

417
	if (sizeof(*rsc) > avail) {
418
		dev_err(dev, "trace rsc is truncated\n");
419 420 421 422 423 424 425 426 427
		return -EINVAL;
	}

	/* make sure reserved bytes are zeroes */
	if (rsc->reserved) {
		dev_err(dev, "trace rsc has non zero reserved bytes\n");
		return -EINVAL;
	}

428 429 430 431 432 433 434 435
	/* what's the kernel address of this resource ? */
	ptr = rproc_da_to_va(rproc, rsc->da, rsc->len);
	if (!ptr) {
		dev_err(dev, "erroneous trace resource entry\n");
		return -EINVAL;
	}

	trace = kzalloc(sizeof(*trace), GFP_KERNEL);
436
	if (!trace)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
		return -ENOMEM;

	/* set the trace buffer dma properties */
	trace->len = rsc->len;
	trace->va = ptr;

	/* make sure snprintf always null terminates, even if truncating */
	snprintf(name, sizeof(name), "trace%d", rproc->num_traces);

	/* create the debugfs entry */
	trace->priv = rproc_create_trace_file(name, rproc, trace);
	if (!trace->priv) {
		trace->va = NULL;
		kfree(trace);
		return -EINVAL;
	}

	list_add_tail(&trace->node, &rproc->traces);

	rproc->num_traces++;

458 459
	dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n",
		name, ptr, rsc->da, rsc->len);
460 461 462 463 464 465 466 467

	return 0;
}

/**
 * rproc_handle_devmem() - handle devmem resource entry
 * @rproc: remote processor handle
 * @rsc: the devmem resource entry
468
 * @avail: size of available data (for sanity checking the image)
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488
 *
 * Remote processors commonly need to access certain on-chip peripherals.
 *
 * Some of these remote processors access memory via an iommu device,
 * and might require us to configure their iommu before they can access
 * the on-chip peripherals they need.
 *
 * This resource entry is a request to map such a peripheral device.
 *
 * These devmem entries will contain the physical address of the device in
 * the 'pa' member. If a specific device address is expected, then 'da' will
 * contain it (currently this is the only use case supported). 'len' will
 * contain the size of the physical region we need to map.
 *
 * Currently we just "trust" those devmem entries to contain valid physical
 * addresses, but this is going to change: we want the implementations to
 * tell us ranges of physical addresses the firmware is allowed to request,
 * and not allow firmwares to request access to physical addresses that
 * are outside those ranges.
 */
489
static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
490
							int offset, int avail)
491 492
{
	struct rproc_mem_entry *mapping;
493
	struct device *dev = &rproc->dev;
494 495 496 497 498 499
	int ret;

	/* no point in handling this resource without a valid iommu domain */
	if (!rproc->domain)
		return -EINVAL;

500
	if (sizeof(*rsc) > avail) {
501
		dev_err(dev, "devmem rsc is truncated\n");
502 503 504 505 506
		return -EINVAL;
	}

	/* make sure reserved bytes are zeroes */
	if (rsc->reserved) {
507
		dev_err(dev, "devmem rsc has non zero reserved bytes\n");
508 509 510
		return -EINVAL;
	}

511
	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
512
	if (!mapping)
513 514 515 516
		return -ENOMEM;

	ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
	if (ret) {
517
		dev_err(dev, "failed to map devmem: %d\n", ret);
518 519 520 521 522 523 524 525 526 527 528 529 530 531
		goto out;
	}

	/*
	 * We'll need this info later when we'll want to unmap everything
	 * (e.g. on shutdown).
	 *
	 * We can't trust the remote processor not to change the resource
	 * table, so we must maintain this info independently.
	 */
	mapping->da = rsc->da;
	mapping->len = rsc->len;
	list_add_tail(&mapping->node, &rproc->mappings);

532
	dev_dbg(dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n",
533 534 535 536 537 538 539 540 541 542 543 544 545
					rsc->pa, rsc->da, rsc->len);

	return 0;

out:
	kfree(mapping);
	return ret;
}

/**
 * rproc_handle_carveout() - handle phys contig memory allocation requests
 * @rproc: rproc handle
 * @rsc: the resource entry
546
 * @avail: size of available data (for image validation)
547 548 549 550 551 552 553 554 555 556 557 558 559
 *
 * This function will handle firmware requests for allocation of physically
 * contiguous memory regions.
 *
 * These request entries should come first in the firmware's resource table,
 * as other firmware entries might request placing other data objects inside
 * these memory regions (e.g. data/code segments, trace resource entries, ...).
 *
 * Allocating memory this way helps utilizing the reserved physical memory
 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
 * pressure is important; it may have a substantial impact on performance.
 */
560
static int rproc_handle_carveout(struct rproc *rproc,
561 562 563
						struct fw_rsc_carveout *rsc,
						int offset, int avail)

564 565
{
	struct rproc_mem_entry *carveout, *mapping;
566
	struct device *dev = &rproc->dev;
567 568 569 570
	dma_addr_t dma;
	void *va;
	int ret;

571
	if (sizeof(*rsc) > avail) {
572
		dev_err(dev, "carveout rsc is truncated\n");
573 574 575 576 577 578 579 580 581
		return -EINVAL;
	}

	/* make sure reserved bytes are zeroes */
	if (rsc->reserved) {
		dev_err(dev, "carveout rsc has non zero reserved bytes\n");
		return -EINVAL;
	}

582 583
	dev_dbg(dev, "carveout rsc: name: %s, da %x, pa %x, len 0x%x, flags %x\n",
		rsc->name, rsc->da, rsc->pa, rsc->len, rsc->flags);
584

585
	carveout = kzalloc(sizeof(*carveout), GFP_KERNEL);
586
	if (!carveout)
587
		return -ENOMEM;
588

589
	va = dma_alloc_coherent(dev->parent, rsc->len, &dma, GFP_KERNEL);
590
	if (!va) {
591 592
		dev_err(dev->parent,
			"failed to allocate dma memory: len 0x%x\n", rsc->len);
593 594 595 596
		ret = -ENOMEM;
		goto free_carv;
	}

597 598
	dev_dbg(dev, "carveout va %p, dma %pad, len 0x%x\n",
		va, &dma, rsc->len);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617

	/*
	 * Ok, this is non-standard.
	 *
	 * Sometimes we can't rely on the generic iommu-based DMA API
	 * to dynamically allocate the device address and then set the IOMMU
	 * tables accordingly, because some remote processors might
	 * _require_ us to use hard coded device addresses that their
	 * firmware was compiled with.
	 *
	 * In this case, we must use the IOMMU API directly and map
	 * the memory to the device address as expected by the remote
	 * processor.
	 *
	 * Obviously such remote processor devices should not be configured
	 * to use the iommu-based DMA API: we expect 'dma' to contain the
	 * physical address in this case.
	 */
	if (rproc->domain) {
618 619 620 621 622 623
		mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
		if (!mapping) {
			ret = -ENOMEM;
			goto dma_free;
		}

624 625 626 627
		ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len,
								rsc->flags);
		if (ret) {
			dev_err(dev, "iommu_map failed: %d\n", ret);
628
			goto free_mapping;
629 630 631 632 633 634 635 636 637 638 639 640 641
		}

		/*
		 * We'll need this info later when we'll want to unmap
		 * everything (e.g. on shutdown).
		 *
		 * We can't trust the remote processor not to change the
		 * resource table, so we must maintain this info independently.
		 */
		mapping->da = rsc->da;
		mapping->len = rsc->len;
		list_add_tail(&mapping->node, &rproc->mappings);

642 643
		dev_dbg(dev, "carveout mapped 0x%x to %pad\n",
			rsc->da, &dma);
644 645
	}

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
	/*
	 * Some remote processors might need to know the pa
	 * even though they are behind an IOMMU. E.g., OMAP4's
	 * remote M3 processor needs this so it can control
	 * on-chip hardware accelerators that are not behind
	 * the IOMMU, and therefor must know the pa.
	 *
	 * Generally we don't want to expose physical addresses
	 * if we don't have to (remote processors are generally
	 * _not_ trusted), so we might want to do this only for
	 * remote processor that _must_ have this (e.g. OMAP4's
	 * dual M3 subsystem).
	 *
	 * Non-IOMMU processors might also want to have this info.
	 * In this case, the device address and the physical address
	 * are the same.
	 */
	rsc->pa = dma;

665 666 667 668 669 670 671 672 673
	carveout->va = va;
	carveout->len = rsc->len;
	carveout->dma = dma;
	carveout->da = rsc->da;

	list_add_tail(&carveout->node, &rproc->carveouts);

	return 0;

674 675
free_mapping:
	kfree(mapping);
676
dma_free:
677
	dma_free_coherent(dev->parent, rsc->len, va, dma);
678 679 680 681 682
free_carv:
	kfree(carveout);
	return ret;
}

683
static int rproc_count_vrings(struct rproc *rproc, struct fw_rsc_vdev *rsc,
684
			      int offset, int avail)
685 686 687 688 689 690 691
{
	/* Summarize the number of notification IDs */
	rproc->max_notifyid += rsc->num_of_vrings;

	return 0;
}

692 693 694 695
/*
 * A lookup table for resource handlers. The indices are defined in
 * enum fw_resource_type.
 */
696
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
697 698 699
	[RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
	[RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
	[RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
700
	[RSC_VDEV] = NULL, /* VDEVs were handled upon registration */
701 702
};

703 704 705 706
static rproc_handle_resource_t rproc_vdev_handler[RSC_LAST] = {
	[RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
};

707 708 709 710
static rproc_handle_resource_t rproc_count_vrings_handler[RSC_LAST] = {
	[RSC_VDEV] = (rproc_handle_resource_t)rproc_count_vrings,
};

711
/* handle firmware resource entries before booting the remote processor */
712
static int rproc_handle_resources(struct rproc *rproc, int len,
713
				  rproc_handle_resource_t handlers[RSC_LAST])
714
{
715
	struct device *dev = &rproc->dev;
716
	rproc_handle_resource_t handler;
717 718
	int ret = 0, i;

719 720 721
	for (i = 0; i < rproc->table_ptr->num; i++) {
		int offset = rproc->table_ptr->offset[i];
		struct fw_rsc_hdr *hdr = (void *)rproc->table_ptr + offset;
722 723 724 725 726 727 728 729
		int avail = len - offset - sizeof(*hdr);
		void *rsc = (void *)hdr + sizeof(*hdr);

		/* make sure table isn't truncated */
		if (avail < 0) {
			dev_err(dev, "rsc table is truncated\n");
			return -EINVAL;
		}
730

731
		dev_dbg(dev, "rsc: type %d\n", hdr->type);
732

733 734
		if (hdr->type >= RSC_LAST) {
			dev_warn(dev, "unsupported resource %d\n", hdr->type);
735
			continue;
736 737
		}

738
		handler = handlers[hdr->type];
739 740 741
		if (!handler)
			continue;

742
		ret = handler(rproc, rsc, offset + sizeof(*hdr), avail);
743
		if (ret)
744
			break;
745
	}
746 747 748 749 750 751 752 753 754

	return ret;
}

/**
 * rproc_resource_cleanup() - clean up and free all acquired resources
 * @rproc: rproc handle
 *
 * This function will free all resources acquired for @rproc, and it
755
 * is called whenever @rproc either shuts down or fails to boot.
756 757 758 759
 */
static void rproc_resource_cleanup(struct rproc *rproc)
{
	struct rproc_mem_entry *entry, *tmp;
760
	struct device *dev = &rproc->dev;
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776

	/* clean up debugfs trace entries */
	list_for_each_entry_safe(entry, tmp, &rproc->traces, node) {
		rproc_remove_trace_file(entry->priv);
		rproc->num_traces--;
		list_del(&entry->node);
		kfree(entry);
	}

	/* clean up iommu mapping entries */
	list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) {
		size_t unmapped;

		unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
		if (unmapped != entry->len) {
			/* nothing much to do besides complaining */
777
			dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
778 779 780 781 782 783
								unmapped);
		}

		list_del(&entry->node);
		kfree(entry);
	}
784 785 786

	/* clean up carveout allocations */
	list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) {
787 788
		dma_free_coherent(dev->parent, entry->len, entry->va,
				  entry->dma);
789 790 791
		list_del(&entry->node);
		kfree(entry);
	}
792 793 794 795 796 797 798
}

/*
 * take a firmware and boot a remote processor with it.
 */
static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
{
799
	struct device *dev = &rproc->dev;
800
	const char *name = rproc->firmware;
801
	struct resource_table *table, *loaded_table;
802
	int ret, tablesz;
803

804 805 806
	if (!rproc->table_ptr)
		return -ENOMEM;

807 808 809 810
	ret = rproc_fw_sanity_check(rproc, fw);
	if (ret)
		return ret;

811
	dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
812 813 814 815 816 817 818 819 820 821 822

	/*
	 * if enabling an IOMMU isn't relevant for this rproc, this is
	 * just a nop
	 */
	ret = rproc_enable_iommu(rproc);
	if (ret) {
		dev_err(dev, "can't enable iommu: %d\n", ret);
		return ret;
	}

823
	rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
824
	ret = -EINVAL;
825

826
	/* look for the resource table */
827
	table = rproc_find_rsc_table(rproc, fw, &tablesz);
828 829
	if (!table) {
		dev_err(dev, "Failed to find resource table\n");
830
		goto clean_up;
831
	}
832

833 834 835 836 837 838
	/* Verify that resource table in loaded fw is unchanged */
	if (rproc->table_csum != crc32(0, table, tablesz)) {
		dev_err(dev, "resource checksum failed, fw changed?\n");
		goto clean_up;
	}

839
	/* handle fw resources which are required to boot rproc */
840
	ret = rproc_handle_resources(rproc, tablesz, rproc_loading_handlers);
841 842 843 844 845 846
	if (ret) {
		dev_err(dev, "Failed to process resources: %d\n", ret);
		goto clean_up;
	}

	/* load the ELF segments to memory */
847
	ret = rproc_load_segments(rproc, fw);
848 849 850 851 852
	if (ret) {
		dev_err(dev, "Failed to load program segments: %d\n", ret);
		goto clean_up;
	}

853 854 855 856 857 858 859 860
	/*
	 * The starting device has been given the rproc->cached_table as the
	 * resource table. The address of the vring along with the other
	 * allocated resources (carveouts etc) is stored in cached_table.
	 * In order to pass this information to the remote device we must
	 * copy this information to device memory.
	 */
	loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
861 862
	if (loaded_table)
		memcpy(loaded_table, rproc->cached_table, tablesz);
863

864 865 866 867 868 869 870
	/* power up the remote processor */
	ret = rproc->ops->start(rproc);
	if (ret) {
		dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret);
		goto clean_up;
	}

871 872 873 874 875 876 877
	/*
	 * Update table_ptr so that all subsequent vring allocations and
	 * virtio fields manipulation update the actual loaded resource table
	 * in device memory.
	 */
	rproc->table_ptr = loaded_table;

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
	rproc->state = RPROC_RUNNING;

	dev_info(dev, "remote processor %s is now up\n", rproc->name);

	return 0;

clean_up:
	rproc_resource_cleanup(rproc);
	rproc_disable_iommu(rproc);
	return ret;
}

/*
 * take a firmware and look for virtio devices to register.
 *
 * Note: this function is called asynchronously upon registration of the
 * remote processor (so we must wait until it completes before we try
 * to unregister the device. one other option is just to use kref here,
 * that might be cleaner).
 */
static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
{
	struct rproc *rproc = context;
901 902
	struct resource_table *table;
	int ret, tablesz;
903 904 905 906

	if (rproc_fw_sanity_check(rproc, fw) < 0)
		goto out;

907
	/* look for the resource table */
908
	table = rproc_find_rsc_table(rproc, fw,  &tablesz);
909 910 911
	if (!table)
		goto out;

912 913 914 915 916 917
	rproc->table_csum = crc32(0, table, tablesz);

	/*
	 * Create a copy of the resource table. When a virtio device starts
	 * and calls vring_new_virtqueue() the address of the allocated vring
	 * will be stored in the cached_table. Before the device is started,
918
	 * cached_table will be copied into device memory.
919
	 */
920
	rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
921 922 923 924 925
	if (!rproc->cached_table)
		goto out;

	rproc->table_ptr = rproc->cached_table;

926 927
	/* count the number of notify-ids */
	rproc->max_notifyid = -1;
928 929
	ret = rproc_handle_resources(rproc, tablesz,
				     rproc_count_vrings_handler);
930
	if (ret)
931 932
		goto out;

933 934 935
	/* look for virtio devices and register them */
	ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);

936
out:
937
	release_firmware(fw);
938
	/* allow rproc_del() contexts, if any, to proceed */
939 940 941
	complete_all(&rproc->firmware_loading_complete);
}

942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
static int rproc_add_virtio_devices(struct rproc *rproc)
{
	int ret;

	/* rproc_del() calls must wait until async loader completes */
	init_completion(&rproc->firmware_loading_complete);

	/*
	 * We must retrieve early virtio configuration info from
	 * the firmware (e.g. whether to register a virtio device,
	 * what virtio features does it support, ...).
	 *
	 * We're initiating an asynchronous firmware loading, so we can
	 * be built-in kernel code, without hanging the boot process.
	 */
	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
				      rproc->firmware, &rproc->dev, GFP_KERNEL,
				      rproc, rproc_fw_config_virtio);
	if (ret < 0) {
		dev_err(&rproc->dev, "request_firmware_nowait err: %d\n", ret);
		complete_all(&rproc->firmware_loading_complete);
	}

	return ret;
}

/**
 * rproc_trigger_recovery() - recover a remoteproc
 * @rproc: the remote processor
 *
972
 * The recovery is done by resetting all the virtio devices, that way all the
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
 * rpmsg drivers will be reseted along with the remote processor making the
 * remoteproc functional again.
 *
 * This function can sleep, so it cannot be called from atomic context.
 */
int rproc_trigger_recovery(struct rproc *rproc)
{
	struct rproc_vdev *rvdev, *rvtmp;

	dev_err(&rproc->dev, "recovering %s\n", rproc->name);

	init_completion(&rproc->crash_comp);

	/* clean up remote vdev entries */
	list_for_each_entry_safe(rvdev, rvtmp, &rproc->rvdevs, node)
		rproc_remove_virtio_dev(rvdev);

	/* wait until there is no more rproc users */
	wait_for_completion(&rproc->crash_comp);

993 994 995
	/* Free the copy of the resource table */
	kfree(rproc->cached_table);

996 997 998
	return rproc_add_virtio_devices(rproc);
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
/**
 * rproc_crash_handler_work() - handle a crash
 *
 * This function needs to handle everything related to a crash, like cpu
 * registers and stack dump, information to help to debug the fatal error, etc.
 */
static void rproc_crash_handler_work(struct work_struct *work)
{
	struct rproc *rproc = container_of(work, struct rproc, crash_handler);
	struct device *dev = &rproc->dev;

	dev_dbg(dev, "enter %s\n", __func__);

	mutex_lock(&rproc->lock);

	if (rproc->state == RPROC_CRASHED || rproc->state == RPROC_OFFLINE) {
		/* handle only the first crash detected */
		mutex_unlock(&rproc->lock);
		return;
	}

	rproc->state = RPROC_CRASHED;
	dev_err(dev, "handling crash #%u in %s\n", ++rproc->crash_cnt,
		rproc->name);

	mutex_unlock(&rproc->lock);

1026 1027
	if (!rproc->recovery_disabled)
		rproc_trigger_recovery(rproc);
1028 1029
}

1030
/**
1031
 * __rproc_boot() - boot a remote processor
1032
 * @rproc: handle of a remote processor
1033
 * @wait: wait for rproc registration completion
1034 1035 1036 1037 1038 1039 1040 1041
 *
 * Boot a remote processor (i.e. load its firmware, power it on, ...).
 *
 * If the remote processor is already powered on, this function immediately
 * returns (successfully).
 *
 * Returns 0 on success, and an appropriate error value otherwise.
 */
1042
static int __rproc_boot(struct rproc *rproc, bool wait)
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
{
	const struct firmware *firmware_p;
	struct device *dev;
	int ret;

	if (!rproc) {
		pr_err("invalid rproc handle\n");
		return -EINVAL;
	}

1053
	dev = &rproc->dev;
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068

	ret = mutex_lock_interruptible(&rproc->lock);
	if (ret) {
		dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
		return ret;
	}

	/* loading a firmware is required */
	if (!rproc->firmware) {
		dev_err(dev, "%s: no firmware to load\n", __func__);
		ret = -EINVAL;
		goto unlock_mutex;
	}

	/* prevent underlying implementation from being removed */
1069
	if (!try_module_get(dev->parent->driver->owner)) {
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
		dev_err(dev, "%s: can't get owner\n", __func__);
		ret = -EINVAL;
		goto unlock_mutex;
	}

	/* skip the boot process if rproc is already powered up */
	if (atomic_inc_return(&rproc->power) > 1) {
		ret = 0;
		goto unlock_mutex;
	}

	dev_info(dev, "powering up %s\n", rproc->name);

	/* load firmware */
	ret = request_firmware(&firmware_p, rproc->firmware, dev);
	if (ret < 0) {
		dev_err(dev, "request_firmware failed: %d\n", ret);
		goto downref_rproc;
	}

1090 1091 1092 1093
	/* if rproc virtio is not yet configured, wait */
	if (wait)
		wait_for_completion(&rproc->firmware_loading_complete);

1094 1095 1096 1097 1098 1099
	ret = rproc_fw_boot(rproc, firmware_p);

	release_firmware(firmware_p);

downref_rproc:
	if (ret) {
1100
		module_put(dev->parent->driver->owner);
1101 1102 1103 1104 1105 1106
		atomic_dec(&rproc->power);
	}
unlock_mutex:
	mutex_unlock(&rproc->lock);
	return ret;
}
1107 1108 1109 1110 1111 1112 1113 1114 1115

/**
 * rproc_boot() - boot a remote processor
 * @rproc: handle of a remote processor
 */
int rproc_boot(struct rproc *rproc)
{
	return __rproc_boot(rproc, true);
}
1116 1117
EXPORT_SYMBOL(rproc_boot);

1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
/**
 * rproc_boot_nowait() - boot a remote processor
 * @rproc: handle of a remote processor
 *
 * Same as rproc_boot() but don't wait for rproc registration completion
 */
int rproc_boot_nowait(struct rproc *rproc)
{
	return __rproc_boot(rproc, false);
}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
/**
 * rproc_shutdown() - power off the remote processor
 * @rproc: the remote processor
 *
 * Power off a remote processor (previously booted with rproc_boot()).
 *
 * In case @rproc is still being used by an additional user(s), then
 * this function will just decrement the power refcount and exit,
 * without really powering off the device.
 *
 * Every call to rproc_boot() must (eventually) be accompanied by a call
 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug.
 *
 * Notes:
 * - we're not decrementing the rproc's refcount, only the power refcount.
 *   which means that the @rproc handle stays valid even after rproc_shutdown()
 *   returns, and users can still use it with a subsequent rproc_boot(), if
 *   needed.
 */
void rproc_shutdown(struct rproc *rproc)
{
1150
	struct device *dev = &rproc->dev;
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
	int ret;

	ret = mutex_lock_interruptible(&rproc->lock);
	if (ret) {
		dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
		return;
	}

	/* if the remote proc is still needed, bail out */
	if (!atomic_dec_and_test(&rproc->power))
		goto out;

	/* power off the remote processor */
	ret = rproc->ops->stop(rproc);
	if (ret) {
		atomic_inc(&rproc->power);
		dev_err(dev, "can't stop rproc: %d\n", ret);
		goto out;
	}

	/* clean up all acquired resources */
	rproc_resource_cleanup(rproc);

	rproc_disable_iommu(rproc);

1176 1177 1178
	/* Give the next start a clean resource table */
	rproc->table_ptr = rproc->cached_table;

1179 1180 1181 1182
	/* if in crash state, unlock crash handler */
	if (rproc->state == RPROC_CRASHED)
		complete_all(&rproc->crash_comp);

1183 1184 1185 1186 1187 1188 1189
	rproc->state = RPROC_OFFLINE;

	dev_info(dev, "stopped remote processor %s\n", rproc->name);

out:
	mutex_unlock(&rproc->lock);
	if (!ret)
1190
		module_put(dev->parent->driver->owner);
1191 1192 1193
}
EXPORT_SYMBOL(rproc_shutdown);

1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
/**
 * rproc_get_by_phandle() - find a remote processor by phandle
 * @phandle: phandle to the rproc
 *
 * Finds an rproc handle using the remote processor's phandle, and then
 * return a handle to the rproc.
 *
 * This function increments the remote processor's refcount, so always
 * use rproc_put() to decrement it back once rproc isn't needed anymore.
 *
 * Returns the rproc handle on success, and NULL on failure.
 */
1206
#ifdef CONFIG_OF
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229
struct rproc *rproc_get_by_phandle(phandle phandle)
{
	struct rproc *rproc = NULL, *r;
	struct device_node *np;

	np = of_find_node_by_phandle(phandle);
	if (!np)
		return NULL;

	mutex_lock(&rproc_list_mutex);
	list_for_each_entry(r, &rproc_list, node) {
		if (r->dev.parent && r->dev.parent->of_node == np) {
			rproc = r;
			get_device(&rproc->dev);
			break;
		}
	}
	mutex_unlock(&rproc_list_mutex);

	of_node_put(np);

	return rproc;
}
1230 1231 1232 1233 1234 1235
#else
struct rproc *rproc_get_by_phandle(phandle phandle)
{
	return NULL;
}
#endif
1236 1237
EXPORT_SYMBOL(rproc_get_by_phandle);

1238
/**
1239
 * rproc_add() - register a remote processor
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
 * @rproc: the remote processor handle to register
 *
 * Registers @rproc with the remoteproc framework, after it has been
 * allocated with rproc_alloc().
 *
 * This is called by the platform-specific rproc implementation, whenever
 * a new remote processor device is probed.
 *
 * Returns 0 on success and an appropriate error code otherwise.
 *
 * Note: this function initiates an asynchronous firmware loading
 * context, which will look for virtio devices supported by the rproc's
 * firmware.
 *
 * If found, those virtio devices will be created and added, so as a result
1255
 * of registering this remote processor, additional virtio drivers might be
1256 1257
 * probed.
 */
1258
int rproc_add(struct rproc *rproc)
1259
{
1260
	struct device *dev = &rproc->dev;
1261
	int ret;
1262

1263 1264 1265
	ret = device_add(dev);
	if (ret < 0)
		return ret;
1266

1267
	dev_info(dev, "%s is available\n", rproc->name);
1268

1269 1270 1271
	dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n");
	dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n");

1272 1273
	/* create debugfs entries */
	rproc_create_debug_dir(rproc);
1274 1275 1276
	ret = rproc_add_virtio_devices(rproc);
	if (ret < 0)
		return ret;
1277

1278 1279 1280 1281 1282 1283
	/* expose to rproc_get_by_phandle users */
	mutex_lock(&rproc_list_mutex);
	list_add(&rproc->node, &rproc_list);
	mutex_unlock(&rproc_list_mutex);

	return 0;
1284
}
1285
EXPORT_SYMBOL(rproc_add);
1286

1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299
/**
 * rproc_type_release() - release a remote processor instance
 * @dev: the rproc's device
 *
 * This function should _never_ be called directly.
 *
 * It will be called by the driver core when no one holds a valid pointer
 * to @dev anymore.
 */
static void rproc_type_release(struct device *dev)
{
	struct rproc *rproc = container_of(dev, struct rproc, dev);

1300 1301 1302 1303
	dev_info(&rproc->dev, "releasing %s\n", rproc->name);

	rproc_delete_debug_dir(rproc);

1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
	idr_destroy(&rproc->notifyids);

	if (rproc->index >= 0)
		ida_simple_remove(&rproc_dev_index, rproc->index);

	kfree(rproc);
}

static struct device_type rproc_type = {
	.name		= "remoteproc",
	.release	= rproc_type_release,
};
1316 1317 1318 1319 1320 1321

/**
 * rproc_alloc() - allocate a remote processor handle
 * @dev: the underlying device
 * @name: name of this remote processor
 * @ops: platform-specific handlers (mainly start/stop)
1322
 * @firmware: name of firmware file to load, can be NULL
1323 1324 1325
 * @len: length of private data needed by the rproc driver (in bytes)
 *
 * Allocates a new remote processor handle, but does not register
1326
 * it yet. if @firmware is NULL, a default name is used.
1327 1328 1329 1330 1331
 *
 * This function should be used by rproc implementations during initialization
 * of the remote processor.
 *
 * After creating an rproc handle using this function, and when ready,
1332
 * implementations should then call rproc_add() to complete
1333 1334 1335 1336 1337
 * the registration of the remote processor.
 *
 * On success the new rproc is returned, and on failure, NULL.
 *
 * Note: _never_ directly deallocate @rproc, even if it was not registered
1338
 * yet. Instead, when you need to unroll rproc_alloc(), use rproc_put().
1339 1340 1341 1342 1343 1344
 */
struct rproc *rproc_alloc(struct device *dev, const char *name,
				const struct rproc_ops *ops,
				const char *firmware, int len)
{
	struct rproc *rproc;
1345 1346
	char *p, *template = "rproc-%s-fw";
	int name_len = 0;
1347 1348 1349 1350

	if (!dev || !name || !ops)
		return NULL;

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
	if (!firmware)
		/*
		 * Make room for default firmware name (minus %s plus '\0').
		 * If the caller didn't pass in a firmware name then
		 * construct a default name.  We're already glomming 'len'
		 * bytes onto the end of the struct rproc allocation, so do
		 * a few more for the default firmware name (but only if
		 * the caller doesn't pass one).
		 */
		name_len = strlen(name) + strlen(template) - 2 + 1;

1362
	rproc = kzalloc(sizeof(*rproc) + len + name_len, GFP_KERNEL);
1363
	if (!rproc)
1364 1365
		return NULL;

1366 1367 1368 1369 1370 1371 1372 1373
	if (!firmware) {
		p = (char *)rproc + sizeof(struct rproc) + len;
		snprintf(p, name_len, template, name);
	} else {
		p = (char *)firmware;
	}

	rproc->firmware = p;
1374 1375 1376 1377
	rproc->name = name;
	rproc->ops = ops;
	rproc->priv = &rproc[1];

1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
	device_initialize(&rproc->dev);
	rproc->dev.parent = dev;
	rproc->dev.type = &rproc_type;

	/* Assign a unique device index and name */
	rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
	if (rproc->index < 0) {
		dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
		put_device(&rproc->dev);
		return NULL;
	}

	dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);

1392 1393
	atomic_set(&rproc->power, 0);

1394 1395
	/* Set ELF as the default fw_ops handler */
	rproc->fw_ops = &rproc_elf_fw_ops;
1396 1397 1398

	mutex_init(&rproc->lock);

1399 1400
	idr_init(&rproc->notifyids);

1401 1402 1403
	INIT_LIST_HEAD(&rproc->carveouts);
	INIT_LIST_HEAD(&rproc->mappings);
	INIT_LIST_HEAD(&rproc->traces);
1404
	INIT_LIST_HEAD(&rproc->rvdevs);
1405

1406
	INIT_WORK(&rproc->crash_handler, rproc_crash_handler_work);
1407
	init_completion(&rproc->crash_comp);
1408

1409 1410 1411 1412 1413 1414 1415
	rproc->state = RPROC_OFFLINE;

	return rproc;
}
EXPORT_SYMBOL(rproc_alloc);

/**
1416
 * rproc_put() - unroll rproc_alloc()
1417 1418
 * @rproc: the remote processor handle
 *
1419
 * This function decrements the rproc dev refcount.
1420
 *
1421 1422
 * If no one holds any reference to rproc anymore, then its refcount would
 * now drop to zero, and it would be freed.
1423
 */
1424
void rproc_put(struct rproc *rproc)
1425
{
1426
	put_device(&rproc->dev);
1427
}
1428
EXPORT_SYMBOL(rproc_put);
1429 1430

/**
1431
 * rproc_del() - unregister a remote processor
1432 1433 1434 1435
 * @rproc: rproc handle to unregister
 *
 * This function should be called when the platform specific rproc
 * implementation decides to remove the rproc device. it should
1436
 * _only_ be called if a previous invocation of rproc_add()
1437 1438
 * has completed successfully.
 *
1439
 * After rproc_del() returns, @rproc isn't freed yet, because
1440
 * of the outstanding reference created by rproc_alloc. To decrement that
1441
 * one last refcount, one still needs to call rproc_put().
1442 1443 1444
 *
 * Returns 0 on success and -EINVAL if @rproc isn't valid.
 */
1445
int rproc_del(struct rproc *rproc)
1446
{
1447
	struct rproc_vdev *rvdev, *tmp;
1448

1449 1450 1451 1452 1453 1454
	if (!rproc)
		return -EINVAL;

	/* if rproc is just being registered, wait */
	wait_for_completion(&rproc->firmware_loading_complete);

1455
	/* clean up remote vdev entries */
1456
	list_for_each_entry_safe(rvdev, tmp, &rproc->rvdevs, node)
1457
		rproc_remove_virtio_dev(rvdev);
1458

1459 1460 1461
	/* Free the copy of the resource table */
	kfree(rproc->cached_table);

1462 1463 1464 1465 1466
	/* the rproc is downref'ed as soon as it's removed from the klist */
	mutex_lock(&rproc_list_mutex);
	list_del(&rproc->node);
	mutex_unlock(&rproc_list_mutex);

1467
	device_del(&rproc->dev);
1468 1469 1470

	return 0;
}
1471
EXPORT_SYMBOL(rproc_del);
1472

1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498
/**
 * rproc_report_crash() - rproc crash reporter function
 * @rproc: remote processor
 * @type: crash type
 *
 * This function must be called every time a crash is detected by the low-level
 * drivers implementing a specific remoteproc. This should not be called from a
 * non-remoteproc driver.
 *
 * This function can be called from atomic/interrupt context.
 */
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
{
	if (!rproc) {
		pr_err("NULL rproc pointer\n");
		return;
	}

	dev_err(&rproc->dev, "crash detected in %s: type %s\n",
		rproc->name, rproc_crash_to_string(type));

	/* create a new task to handle the error */
	schedule_work(&rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);

1499 1500 1501
static int __init remoteproc_init(void)
{
	rproc_init_debugfs();
1502

1503 1504 1505 1506 1507 1508
	return 0;
}
module_init(remoteproc_init);

static void __exit remoteproc_exit(void)
{
1509 1510
	ida_destroy(&rproc_dev_index);

1511 1512 1513 1514 1515 1516
	rproc_exit_debugfs();
}
module_exit(remoteproc_exit);

MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Generic Remote Processor Framework");