driver.c 25.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
// SPDX-License-Identifier: GPL-2.0
/*
 * System Control and Management Interface (SCMI) Message Protocol driver
 *
 * SCMI Message Protocol is used between the System Control Processor(SCP)
 * and the Application Processors(AP). The Message Handling Unit(MHU)
 * provides a mechanism for inter-processor communication between SCP's
 * Cortex M3 and AP.
 *
 * SCP offers control and management of the core/cluster power states,
 * various power domain DVFS including the core/cluster, certain system
 * clocks configuration, thermal sensors and many others.
 *
 * Copyright (C) 2018 ARM Ltd.
 */

#include <linux/bitmap.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/kernel.h>
21
#include <linux/ktime.h>
22 23 24
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
25
#include <linux/processor.h>
26 27 28
#include <linux/slab.h>

#include "common.h"
29
#include "notify.h"
30

31 32 33
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
enum scmi_error_codes {
	SCMI_SUCCESS = 0,	/* Success */
	SCMI_ERR_SUPPORT = -1,	/* Not supported */
	SCMI_ERR_PARAMS = -2,	/* Invalid Parameters */
	SCMI_ERR_ACCESS = -3,	/* Invalid access/permission denied */
	SCMI_ERR_ENTRY = -4,	/* Not found */
	SCMI_ERR_RANGE = -5,	/* Value out of range */
	SCMI_ERR_BUSY = -6,	/* Device busy */
	SCMI_ERR_COMMS = -7,	/* Communication Error */
	SCMI_ERR_GENERIC = -8,	/* Generic Error */
	SCMI_ERR_HARDWARE = -9,	/* Hardware Error */
	SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
	SCMI_ERR_MAX
};

49
/* List of all SCMI devices active in system */
50 51 52
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
53 54
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71

/**
 * struct scmi_xfers_info - Structure to manage transfer information
 *
 * @xfer_block: Preallocated Message array
 * @xfer_alloc_table: Bitmap table for allocated messages.
 *	Index of this bitmap table is also used for message
 *	sequence identifier.
 * @xfer_lock: Protection for message allocation
 */
struct scmi_xfers_info {
	struct scmi_xfer *xfer_block;
	unsigned long *xfer_alloc_table;
	spinlock_t xfer_lock;
};

/**
72
 * struct scmi_info - Structure representing a SCMI instance
73 74 75
 *
 * @dev: Device pointer
 * @desc: SoC description for this instance
76 77
 * @version: SCMI revision information containing protocol version,
 *	implementation version and (sub-)vendor identification.
78
 * @handle: Instance of SCMI handle to send to clients
79
 * @tx_minfo: Universal Transmit Message management info
80
 * @rx_minfo: Universal Receive Message management info
81
 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
82
 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
83
 * @protocols_imp: List of protocols implemented, currently maximum of
84
 *	MAX_PROTOCOLS_IMP elements allocated by the base protocol
85
 * @node: List head
86 87 88 89 90
 * @users: Number of users of this instance
 */
struct scmi_info {
	struct device *dev;
	const struct scmi_desc *desc;
91
	struct scmi_revision_info version;
92
	struct scmi_handle handle;
93
	struct scmi_xfers_info tx_minfo;
94
	struct scmi_xfers_info rx_minfo;
95
	struct idr tx_idr;
96
	struct idr rx_idr;
97
	u8 *protocols_imp;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
	struct list_head node;
	int users;
};

#define handle_to_scmi_info(h)	container_of(h, struct scmi_info, handle)

static const int scmi_linux_errmap[] = {
	/* better than switch case as long as return value is continuous */
	0,			/* SCMI_SUCCESS */
	-EOPNOTSUPP,		/* SCMI_ERR_SUPPORT */
	-EINVAL,		/* SCMI_ERR_PARAM */
	-EACCES,		/* SCMI_ERR_ACCESS */
	-ENOENT,		/* SCMI_ERR_ENTRY */
	-ERANGE,		/* SCMI_ERR_RANGE */
	-EBUSY,			/* SCMI_ERR_BUSY */
	-ECOMM,			/* SCMI_ERR_COMMS */
	-EIO,			/* SCMI_ERR_GENERIC */
	-EREMOTEIO,		/* SCMI_ERR_HARDWARE */
	-EPROTO,		/* SCMI_ERR_PROTOCOL */
};

static inline int scmi_to_linux_errno(int errno)
{
	if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
		return scmi_linux_errmap[-errno];
	return -EIO;
}

/**
 * scmi_dump_header_dbg() - Helper to dump a message header.
 *
 * @dev: Device pointer corresponding to the SCMI entity
 * @hdr: pointer to header.
 */
static inline void scmi_dump_header_dbg(struct device *dev,
					struct scmi_msg_hdr *hdr)
{
135
	dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
136 137 138 139
		hdr->id, hdr->seq, hdr->protocol_id);
}

/**
140
 * scmi_xfer_get() - Allocate one message
141
 *
142
 * @handle: Pointer to SCMI entity handle
143
 * @minfo: Pointer to Tx/Rx Message management info based on channel type
144
 *
145
 * Helper function which is used by various message functions that are
146 147 148 149 150 151 152 153
 * exposed to clients of this driver for allocating a message traffic event.
 *
 * This function can sleep depending on pending requests already in the system
 * for the SCMI entity. Further, this also holds a spinlock to maintain
 * integrity of internal data structures.
 *
 * Return: 0 if all went fine, else corresponding error.
 */
154 155
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
				       struct scmi_xfers_info *minfo)
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
{
	u16 xfer_id;
	struct scmi_xfer *xfer;
	unsigned long flags, bit_pos;
	struct scmi_info *info = handle_to_scmi_info(handle);

	/* Keep the locked section as small as possible */
	spin_lock_irqsave(&minfo->xfer_lock, flags);
	bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
				      info->desc->max_msg);
	if (bit_pos == info->desc->max_msg) {
		spin_unlock_irqrestore(&minfo->xfer_lock, flags);
		return ERR_PTR(-ENOMEM);
	}
	set_bit(bit_pos, minfo->xfer_alloc_table);
	spin_unlock_irqrestore(&minfo->xfer_lock, flags);

	xfer_id = bit_pos;

	xfer = &minfo->xfer_block[xfer_id];
	xfer->hdr.seq = xfer_id;
	reinit_completion(&xfer->done);
178
	xfer->transfer_id = atomic_inc_return(&transfer_last_id);
179 180 181 182 183

	return xfer;
}

/**
184
 * __scmi_xfer_put() - Release a message
185
 *
186
 * @minfo: Pointer to Tx/Rx Message management info based on channel type
187
 * @xfer: message that was reserved by scmi_xfer_get
188 189 190
 *
 * This holds a spinlock to maintain integrity of internal data structures.
 */
191 192
static void
__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
193 194 195 196 197 198 199 200 201 202 203 204 205
{
	unsigned long flags;

	/*
	 * Keep the locked section as small as possible
	 * NOTE: we might escape with smp_mb and no lock here..
	 * but just be conservative and symmetric.
	 */
	spin_lock_irqsave(&minfo->xfer_lock, flags);
	clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
	spin_unlock_irqrestore(&minfo->xfer_lock, flags);
}

206
static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
207
{
208
	struct scmi_xfer *xfer;
209 210 211
	struct device *dev = cinfo->dev;
	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
	struct scmi_xfers_info *minfo = &info->rx_minfo;
212
	ktime_t ts;
213

214
	ts = ktime_get_boottime();
215 216 217 218
	xfer = scmi_xfer_get(cinfo->handle, minfo);
	if (IS_ERR(xfer)) {
		dev_err(dev, "failed to get free message slot (%ld)\n",
			PTR_ERR(xfer));
219
		info->desc->ops->clear_channel(cinfo);
220 221 222 223 224 225 226
		return;
	}

	unpack_scmi_header(msg_hdr, &xfer->hdr);
	scmi_dump_header_dbg(dev, &xfer->hdr);
	info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
					    xfer);
227 228
	scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
		    xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
229 230 231 232

	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
			   xfer->hdr.protocol_id, xfer->hdr.seq,
			   MSG_TYPE_NOTIFICATION);
233

234 235
	__scmi_xfer_put(minfo, xfer);

236
	info->desc->ops->clear_channel(cinfo);
237 238 239 240 241 242 243 244 245
}

static void scmi_handle_response(struct scmi_chan_info *cinfo,
				 u16 xfer_id, u8 msg_type)
{
	struct scmi_xfer *xfer;
	struct device *dev = cinfo->dev;
	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
	struct scmi_xfers_info *minfo = &info->tx_minfo;
246 247 248 249

	/* Are we even expecting this? */
	if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
		dev_err(dev, "message for %d is not expected!\n", xfer_id);
250
		info->desc->ops->clear_channel(cinfo);
251 252 253 254
		return;
	}

	xfer = &minfo->xfer_block[xfer_id];
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	/*
	 * Even if a response was indeed expected on this slot at this point,
	 * a buggy platform could wrongly reply feeding us an unexpected
	 * delayed response we're not prepared to handle: bail-out safely
	 * blaming firmware.
	 */
	if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
		dev_err(dev,
			"Delayed Response for %d not expected! Buggy F/W ?\n",
			xfer_id);
		info->desc->ops->clear_channel(cinfo);
		/* It was unexpected, so nobody will clear the xfer if not us */
		__scmi_xfer_put(minfo, xfer);
		return;
	}
270 271 272

	scmi_dump_header_dbg(dev, &xfer->hdr);

273
	info->desc->ops->fetch_response(cinfo, xfer);
274

275 276 277 278
	trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
			   xfer->hdr.protocol_id, xfer->hdr.seq,
			   msg_type);

279 280
	if (msg_type == MSG_TYPE_DELAYED_RESP) {
		info->desc->ops->clear_channel(cinfo);
281
		complete(xfer->async_done);
282
	} else {
283
		complete(&xfer->done);
284
	}
285 286
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
/**
 * scmi_rx_callback() - callback for receiving messages
 *
 * @cinfo: SCMI channel info
 * @msg_hdr: Message header
 *
 * Processes one received message to appropriate transfer information and
 * signals completion of the transfer.
 *
 * NOTE: This function will be invoked in IRQ context, hence should be
 * as optimal as possible.
 */
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
{
	u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
	u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);

	switch (msg_type) {
	case MSG_TYPE_NOTIFICATION:
		scmi_handle_notification(cinfo, msg_hdr);
		break;
	case MSG_TYPE_COMMAND:
	case MSG_TYPE_DELAYED_RESP:
		scmi_handle_response(cinfo, xfer_id, msg_type);
		break;
	default:
		WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
		break;
	}
}

318 319 320 321 322 323 324 325 326 327 328 329 330
/**
 * scmi_xfer_put() - Release a transmit message
 *
 * @handle: Pointer to SCMI entity handle
 * @xfer: message that was reserved by scmi_xfer_get
 */
void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
	struct scmi_info *info = handle_to_scmi_info(handle);

	__scmi_xfer_put(&info->tx_minfo, xfer);
}

331 332
#define SCMI_MAX_POLL_TO_NS	(100 * NSEC_PER_USEC)

333
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
334 335
				      struct scmi_xfer *xfer, ktime_t stop)
{
336
	struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
337

338 339
	return info->desc->ops->poll_done(cinfo, xfer) ||
	       ktime_after(ktime_get(), stop);
340 341
}

342 343 344
/**
 * scmi_do_xfer() - Do one transfer
 *
345
 * @handle: Pointer to SCMI entity handle
346 347 348
 * @xfer: Transfer to initiate and wait for response
 *
 * Return: -ETIMEDOUT in case of no response, if transmit error,
349 350
 *	return corresponding error, else if all goes well,
 *	return 0.
351 352 353 354 355 356 357
 */
int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
	int ret;
	int timeout;
	struct scmi_info *info = handle_to_scmi_info(handle);
	struct device *dev = info->dev;
358 359 360 361 362
	struct scmi_chan_info *cinfo;

	cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
	if (unlikely(!cinfo))
		return -EINVAL;
363

364 365 366 367
	trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
			      xfer->hdr.protocol_id, xfer->hdr.seq,
			      xfer->hdr.poll_completion);

368
	ret = info->desc->ops->send_message(cinfo, xfer);
369
	if (ret < 0) {
370
		dev_dbg(dev, "Failed to send message %d\n", ret);
371 372 373
		return ret;
	}

374 375 376
	if (xfer->hdr.poll_completion) {
		ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);

377
		spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
378 379

		if (ktime_before(ktime_get(), stop))
380
			info->desc->ops->fetch_response(cinfo, xfer);
381 382 383 384 385 386
		else
			ret = -ETIMEDOUT;
	} else {
		/* And we wait for the response. */
		timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
		if (!wait_for_completion_timeout(&xfer->done, timeout)) {
387
			dev_err(dev, "timed out in resp(caller: %pS)\n",
388 389 390
				(void *)_RET_IP_);
			ret = -ETIMEDOUT;
		}
391
	}
392 393 394 395

	if (!ret && xfer->hdr.status)
		ret = scmi_to_linux_errno(xfer->hdr.status);

396 397
	if (info->desc->ops->mark_txdone)
		info->desc->ops->mark_txdone(cinfo, ret);
398

399
	trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
400
			    xfer->hdr.protocol_id, xfer->hdr.seq, ret);
401

402 403 404
	return ret;
}

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
#define SCMI_MAX_RESPONSE_TIMEOUT	(2 * MSEC_PER_SEC)

/**
 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
 *	response is received
 *
 * @handle: Pointer to SCMI entity handle
 * @xfer: Transfer to initiate and wait for response
 *
 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
 *	return corresponding error, else if all goes well, return 0.
 */
int scmi_do_xfer_with_response(const struct scmi_handle *handle,
			       struct scmi_xfer *xfer)
{
	int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
	DECLARE_COMPLETION_ONSTACK(async_response);

	xfer->async_done = &async_response;

	ret = scmi_do_xfer(handle, xfer);
	if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
		ret = -ETIMEDOUT;

	xfer->async_done = NULL;
	return ret;
}

433
/**
434
 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
435
 *
436
 * @handle: Pointer to SCMI entity handle
437
 * @msg_id: Message identifier
438
 * @prot_id: Protocol identifier for the message
439 440 441 442
 * @tx_size: transmit message size
 * @rx_size: receive message size
 * @p: pointer to the allocated and initialised message
 *
443
 * This function allocates the message using @scmi_xfer_get and
444 445 446 447 448
 * initialise the header.
 *
 * Return: 0 if all went fine with @p pointing to message, else
 *	corresponding error.
 */
449
int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
450 451 452 453 454
		       size_t tx_size, size_t rx_size, struct scmi_xfer **p)
{
	int ret;
	struct scmi_xfer *xfer;
	struct scmi_info *info = handle_to_scmi_info(handle);
455
	struct scmi_xfers_info *minfo = &info->tx_minfo;
456 457 458 459 460 461 462
	struct device *dev = info->dev;

	/* Ensure we have sane transfer sizes */
	if (rx_size > info->desc->max_msg_size ||
	    tx_size > info->desc->max_msg_size)
		return -ERANGE;

463
	xfer = scmi_xfer_get(handle, minfo);
464 465 466 467 468 469 470 471 472 473 474 475 476
	if (IS_ERR(xfer)) {
		ret = PTR_ERR(xfer);
		dev_err(dev, "failed to get free message slot(%d)\n", ret);
		return ret;
	}

	xfer->tx.len = tx_size;
	xfer->rx.len = rx_size ? : info->desc->max_msg_size;
	xfer->hdr.id = msg_id;
	xfer->hdr.protocol_id = prot_id;
	xfer->hdr.poll_completion = false;

	*p = xfer;
477

478 479 480
	return 0;
}

481 482 483
/**
 * scmi_version_get() - command to get the revision of the SCMI entity
 *
484 485 486
 * @handle: Pointer to SCMI entity handle
 * @protocol: Protocol identifier for the message
 * @version: Holds returned version of protocol.
487 488 489 490 491 492 493 494 495 496 497 498
 *
 * Updates the SCMI information in the internal data structure.
 *
 * Return: 0 if all went fine, else return appropriate error.
 */
int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
		     u32 *version)
{
	int ret;
	__le32 *rev_info;
	struct scmi_xfer *t;

499
	ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
500 501 502 503 504 505 506 507 508 509
				 sizeof(*version), &t);
	if (ret)
		return ret;

	ret = scmi_do_xfer(handle, t);
	if (!ret) {
		rev_info = t->rx.buf;
		*version = le32_to_cpu(*rev_info);
	}

510
	scmi_xfer_put(handle, t);
511 512 513 514 515 516 517 518 519 520 521
	return ret;
}

void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
				     u8 *prot_imp)
{
	struct scmi_info *info = handle_to_scmi_info(handle);

	info->protocols_imp = prot_imp;
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
static bool
scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
{
	int i;
	struct scmi_info *info = handle_to_scmi_info(handle);

	if (!info->protocols_imp)
		return false;

	for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
		if (info->protocols_imp[i] == prot_id)
			return true;
	return false;
}

537
/**
538
 * scmi_handle_get() - Get the SCMI handle for a device
539 540 541 542
 *
 * @dev: pointer to device for which we want SCMI handle
 *
 * NOTE: The function does not track individual clients of the framework
543
 * and is expected to be maintained by caller of SCMI protocol library.
544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
 * scmi_handle_put must be balanced with successful scmi_handle_get
 *
 * Return: pointer to handle if successful, NULL on error
 */
struct scmi_handle *scmi_handle_get(struct device *dev)
{
	struct list_head *p;
	struct scmi_info *info;
	struct scmi_handle *handle = NULL;

	mutex_lock(&scmi_list_mutex);
	list_for_each(p, &scmi_list) {
		info = list_entry(p, struct scmi_info, node);
		if (dev->parent == info->dev) {
			handle = &info->handle;
			info->users++;
			break;
		}
	}
	mutex_unlock(&scmi_list_mutex);

	return handle;
}

/**
 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
 *
 * @handle: handle acquired by scmi_handle_get
 *
 * NOTE: The function does not track individual clients of the framework
574
 * and is expected to be maintained by caller of SCMI protocol library.
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
 * scmi_handle_put must be balanced with successful scmi_handle_get
 *
 * Return: 0 is successfully released
 *	if null was passed, it returns -EINVAL;
 */
int scmi_handle_put(const struct scmi_handle *handle)
{
	struct scmi_info *info;

	if (!handle)
		return -EINVAL;

	info = handle_to_scmi_info(handle);
	mutex_lock(&scmi_list_mutex);
	if (!WARN_ON(!info->users))
		info->users--;
	mutex_unlock(&scmi_list_mutex);

	return 0;
}

596 597
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
				 struct scmi_xfers_info *info)
598 599 600 601 602 603 604
{
	int i;
	struct scmi_xfer *xfer;
	struct device *dev = sinfo->dev;
	const struct scmi_desc *desc = sinfo->desc;

	/* Pre-allocated messages, no more than what hdr.seq can support */
605 606 607
	if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
		dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
			desc->max_msg, MSG_TOKEN_MAX);
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
		return -EINVAL;
	}

	info->xfer_block = devm_kcalloc(dev, desc->max_msg,
					sizeof(*info->xfer_block), GFP_KERNEL);
	if (!info->xfer_block)
		return -ENOMEM;

	info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
					      sizeof(long), GFP_KERNEL);
	if (!info->xfer_alloc_table)
		return -ENOMEM;

	/* Pre-initialize the buffer pointer to pre-allocated buffers */
	for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
		xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
					    GFP_KERNEL);
		if (!xfer->rx.buf)
			return -ENOMEM;

		xfer->tx.buf = xfer->rx.buf;
		init_completion(&xfer->done);
	}

	spin_lock_init(&info->xfer_lock);

	return 0;
}

637 638 639 640 641 642 643 644 645 646
static int scmi_xfer_info_init(struct scmi_info *sinfo)
{
	int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);

	if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
		ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);

	return ret;
}

647 648
static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
			   int prot_id, bool tx)
649
{
650
	int ret, idx;
651
	struct scmi_chan_info *cinfo;
652
	struct idr *idr;
653 654 655

	/* Transmit channel is first entry i.e. index 0 */
	idx = tx ? 0 : 1;
656
	idr = tx ? &info->tx_idr : &info->rx_idr;
657

658 659 660 661 662
	/* check if already allocated, used for multiple device per protocol */
	cinfo = idr_find(idr, prot_id);
	if (cinfo)
		return 0;

663
	if (!info->desc->ops->chan_available(dev, idx)) {
664 665 666
		cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
		if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
			return -EINVAL;
667 668 669
		goto idr_alloc;
	}

670 671 672 673 674 675
	cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
	if (!cinfo)
		return -ENOMEM;

	cinfo->dev = dev;

676 677
	ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
	if (ret)
678 679
		return ret;

680
idr_alloc:
681
	ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
682 683 684 685 686 687
	if (ret != prot_id) {
		dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
		return ret;
	}

	cinfo->handle = &info->handle;
688 689 690
	return 0;
}

691
static inline int
692
scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
693
{
694
	int ret = scmi_chan_setup(info, dev, prot_id, true);
695 696

	if (!ret) /* Rx is optional, hence no error check */
697
		scmi_chan_setup(info, dev, prot_id, false);
698 699 700 701

	return ret;
}

702 703
static inline void
scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
704
			    int prot_id, const char *name)
705 706 707
{
	struct scmi_device *sdev;

708
	sdev = scmi_device_create(np, info->dev, prot_id, name);
709 710 711 712 713 714
	if (!sdev) {
		dev_err(info->dev, "failed to create %d protocol device\n",
			prot_id);
		return;
	}

715
	if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
716 717
		dev_err(&sdev->dev, "failed to setup transport\n");
		scmi_device_destroy(sdev);
718
		return;
719 720
	}

721 722 723 724
	/* setup handle now as the transport is ready */
	scmi_set_handle(sdev);
}

725 726 727 728 729 730 731 732
#define MAX_SCMI_DEV_PER_PROTOCOL	2
struct scmi_prot_devnames {
	int protocol_id;
	char *names[MAX_SCMI_DEV_PER_PROTOCOL];
};

static struct scmi_prot_devnames devnames[] = {
	{ SCMI_PROTOCOL_POWER,  { "genpd" },},
733
	{ SCMI_PROTOCOL_SYSTEM, { "syspower" },},
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
	{ SCMI_PROTOCOL_PERF,   { "cpufreq" },},
	{ SCMI_PROTOCOL_CLOCK,  { "clocks" },},
	{ SCMI_PROTOCOL_SENSOR, { "hwmon" },},
	{ SCMI_PROTOCOL_RESET,  { "reset" },},
};

static inline void
scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
			     int prot_id)
{
	int loop, cnt;

	for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
		if (devnames[loop].protocol_id != prot_id)
			continue;

		for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
			const char *name = devnames[loop].names[cnt];

			if (name)
				scmi_create_protocol_device(np, info, prot_id,
							    name);
		}
	}
}

760 761 762 763 764 765 766
static int scmi_probe(struct platform_device *pdev)
{
	int ret;
	struct scmi_handle *handle;
	const struct scmi_desc *desc;
	struct scmi_info *info;
	struct device *dev = &pdev->dev;
767
	struct device_node *child, *np = dev->of_node;
768

769 770 771
	desc = of_device_get_match_data(dev);
	if (!desc)
		return -EINVAL;
772 773 774 775 776 777 778 779 780 781

	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
	if (!info)
		return -ENOMEM;

	info->dev = dev;
	info->desc = desc;
	INIT_LIST_HEAD(&info->node);

	platform_set_drvdata(pdev, info);
782
	idr_init(&info->tx_idr);
783
	idr_init(&info->rx_idr);
784 785 786

	handle = &info->handle;
	handle->dev = info->dev;
787
	handle->version = &info->version;
788

789
	ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
790 791 792
	if (ret)
		return ret;

793 794 795 796
	ret = scmi_xfer_info_init(info);
	if (ret)
		return ret;

797 798 799
	if (scmi_notification_init(handle))
		dev_err(dev, "SCMI Notifications NOT available.\n");

800 801 802 803 804 805
	ret = scmi_base_protocol_init(handle);
	if (ret) {
		dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
		return ret;
	}

806 807 808 809
	mutex_lock(&scmi_list_mutex);
	list_add_tail(&info->node, &scmi_list);
	mutex_unlock(&scmi_list_mutex);

810 811 812 813 814 815
	for_each_available_child_of_node(np, child) {
		u32 prot_id;

		if (of_property_read_u32(child, "reg", &prot_id))
			continue;

816 817
		if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
			dev_err(dev, "Out of range protocol %d\n", prot_id);
818 819 820 821 822 823 824

		if (!scmi_is_protocol_implemented(handle, prot_id)) {
			dev_err(dev, "SCMI protocol %d not implemented\n",
				prot_id);
			continue;
		}

825
		scmi_create_protocol_devices(child, info, prot_id);
826 827
	}

828 829 830
	return 0;
}

831
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
832 833 834 835 836 837 838 839 840 841
{
	idr_remove(idr, id);
}

static int scmi_remove(struct platform_device *pdev)
{
	int ret = 0;
	struct scmi_info *info = platform_get_drvdata(pdev);
	struct idr *idr = &info->tx_idr;

842 843
	scmi_notification_exit(&info->handle);

844 845 846 847 848 849 850 851 852 853 854
	mutex_lock(&scmi_list_mutex);
	if (info->users)
		ret = -EBUSY;
	else
		list_del(&info->node);
	mutex_unlock(&scmi_list_mutex);

	if (ret)
		return ret;

	/* Safe to free channels since no more users */
855
	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
856 857
	idr_destroy(&info->tx_idr);

858
	idr = &info->rx_idr;
859
	ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
860 861
	idr_destroy(&info->rx_idr);

862 863 864
	return ret;
}

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
static ssize_t protocol_version_show(struct device *dev,
				     struct device_attribute *attr, char *buf)
{
	struct scmi_info *info = dev_get_drvdata(dev);

	return sprintf(buf, "%u.%u\n", info->version.major_ver,
		       info->version.minor_ver);
}
static DEVICE_ATTR_RO(protocol_version);

static ssize_t firmware_version_show(struct device *dev,
				     struct device_attribute *attr, char *buf)
{
	struct scmi_info *info = dev_get_drvdata(dev);

	return sprintf(buf, "0x%x\n", info->version.impl_ver);
}
static DEVICE_ATTR_RO(firmware_version);

static ssize_t vendor_id_show(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct scmi_info *info = dev_get_drvdata(dev);

	return sprintf(buf, "%s\n", info->version.vendor_id);
}
static DEVICE_ATTR_RO(vendor_id);

static ssize_t sub_vendor_id_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
{
	struct scmi_info *info = dev_get_drvdata(dev);

	return sprintf(buf, "%s\n", info->version.sub_vendor_id);
}
static DEVICE_ATTR_RO(sub_vendor_id);

static struct attribute *versions_attrs[] = {
	&dev_attr_firmware_version.attr,
	&dev_attr_protocol_version.attr,
	&dev_attr_vendor_id.attr,
	&dev_attr_sub_vendor_id.attr,
	NULL,
};
ATTRIBUTE_GROUPS(versions);

911 912
/* Each compatible listed below must have descriptor associated with it */
static const struct of_device_id scmi_of_match[] = {
913
	{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
914
#ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
915 916
	{ .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
#endif
917 918 919 920 921
	{ /* Sentinel */ },
};

MODULE_DEVICE_TABLE(of, scmi_of_match);

922 923 924 925
static struct platform_driver scmi_driver = {
	.driver = {
		   .name = "arm-scmi",
		   .of_match_table = scmi_of_match,
926
		   .dev_groups = versions_groups,
927 928 929 930 931
		   },
	.probe = scmi_probe,
	.remove = scmi_remove,
};

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946
static int __init scmi_driver_init(void)
{
	scmi_bus_init();

	return platform_driver_register(&scmi_driver);
}
module_init(scmi_driver_init);

static void __exit scmi_driver_exit(void)
{
	scmi_bus_exit();

	platform_driver_unregister(&scmi_driver);
}
module_exit(scmi_driver_exit);
947 948 949 950 951

MODULE_ALIAS("platform: arm-scmi");
MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
MODULE_DESCRIPTION("ARM SCMI protocol driver");
MODULE_LICENSE("GPL v2");