perf.c 22.0 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/*
 * System Control and Management Interface (SCMI) Performance Protocol
 *
5
 * Copyright (C) 2018-2021 ARM Ltd.
6 7
 */

8 9
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt

10
#include <linux/bits.h>
11
#include <linux/of.h>
12
#include <linux/io.h>
13
#include <linux/io-64-nonatomic-hi-lo.h>
14
#include <linux/module.h>
15 16
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
17
#include <linux/scmi_protocol.h>
18 19 20
#include <linux/sort.h>

#include "common.h"
21
#include "notify.h"
22 23 24 25 26 27 28 29 30 31

enum scmi_performance_protocol_cmd {
	PERF_DOMAIN_ATTRIBUTES = 0x3,
	PERF_DESCRIBE_LEVELS = 0x4,
	PERF_LIMITS_SET = 0x5,
	PERF_LIMITS_GET = 0x6,
	PERF_LEVEL_SET = 0x7,
	PERF_LEVEL_GET = 0x8,
	PERF_NOTIFY_LIMITS = 0x9,
	PERF_NOTIFY_LEVEL = 0xa,
32
	PERF_DESCRIBE_FASTCHANNEL = 0xb,
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
};

struct scmi_opp {
	u32 perf;
	u32 power;
	u32 trans_latency_us;
};

struct scmi_msg_resp_perf_attributes {
	__le16 num_domains;
	__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
	__le32 stats_addr_low;
	__le32 stats_addr_high;
	__le32 stats_size;
};

struct scmi_msg_resp_perf_domain_attributes {
	__le32 flags;
#define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
#define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
56
#define SUPPORTS_PERF_FASTCHANNELS(x)	((x) & BIT(27))
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
	__le32 rate_limit_us;
	__le32 sustained_freq_khz;
	__le32 sustained_perf_level;
	    u8 name[SCMI_MAX_STR_SIZE];
};

struct scmi_msg_perf_describe_levels {
	__le32 domain;
	__le32 level_index;
};

struct scmi_perf_set_limits {
	__le32 domain;
	__le32 max_level;
	__le32 min_level;
};

struct scmi_perf_get_limits {
	__le32 max_level;
	__le32 min_level;
};

struct scmi_perf_set_level {
	__le32 domain;
	__le32 level;
};

struct scmi_perf_notify_level_or_limits {
	__le32 domain;
	__le32 notify_enable;
};

89 90 91 92 93 94 95 96 97 98 99 100 101
struct scmi_perf_limits_notify_payld {
	__le32 agent_id;
	__le32 domain_id;
	__le32 range_max;
	__le32 range_min;
};

struct scmi_perf_level_notify_payld {
	__le32 agent_id;
	__le32 domain_id;
	__le32 performance_level;
};

102 103 104 105 106 107 108 109
struct scmi_msg_resp_perf_describe_levels {
	__le16 num_returned;
	__le16 num_remaining;
	struct {
		__le32 perf_val;
		__le32 power;
		__le16 transition_latency_us;
		__le16 reserved;
110
	} opp[];
111 112
};

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
struct scmi_perf_get_fc_info {
	__le32 domain;
	__le32 message_id;
};

struct scmi_msg_resp_perf_desc_fc {
	__le32 attr;
#define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
#define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
	__le32 rate_limit;
	__le32 chan_addr_low;
	__le32 chan_addr_high;
	__le32 chan_size;
	__le32 db_addr_low;
	__le32 db_addr_high;
	__le32 db_set_lmask;
	__le32 db_set_hmask;
	__le32 db_preserve_lmask;
	__le32 db_preserve_hmask;
};

struct scmi_fc_db_info {
	int width;
	u64 set;
	u64 mask;
	void __iomem *addr;
};

struct scmi_fc_info {
	void __iomem *level_set_addr;
	void __iomem *limit_set_addr;
	void __iomem *level_get_addr;
	void __iomem *limit_get_addr;
	struct scmi_fc_db_info *level_set_db;
	struct scmi_fc_db_info *limit_set_db;
};

150 151 152 153 154
struct perf_dom_info {
	bool set_limits;
	bool set_perf;
	bool perf_limit_notify;
	bool perf_level_notify;
155
	bool perf_fastchannels;
156 157 158 159 160 161
	u32 opp_count;
	u32 sustained_freq_khz;
	u32 sustained_perf_level;
	u32 mult_factor;
	char name[SCMI_MAX_STR_SIZE];
	struct scmi_opp opp[MAX_OPPS];
162
	struct scmi_fc_info *fc_info;
163 164 165
};

struct scmi_perf_info {
166
	u32 version;
167 168 169 170 171 172 173
	int num_domains;
	bool power_scale_mw;
	u64 stats_addr;
	u32 stats_size;
	struct perf_dom_info *dom_info;
};

174 175 176 177 178
static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
	PERF_NOTIFY_LIMITS,
	PERF_NOTIFY_LEVEL,
};

179
static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
180 181 182 183 184 185
				    struct scmi_perf_info *pi)
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_msg_resp_perf_attributes *attr;

186 187
	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
				      sizeof(*attr), &t);
188 189 190 191 192
	if (ret)
		return ret;

	attr = t->rx.buf;

193
	ret = ph->xops->do_xfer(ph, t);
194 195 196 197 198 199 200 201 202 203
	if (!ret) {
		u16 flags = le16_to_cpu(attr->flags);

		pi->num_domains = le16_to_cpu(attr->num_domains);
		pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
		pi->stats_size = le32_to_cpu(attr->stats_size);
	}

204
	ph->xops->xfer_put(ph, t);
205 206 207 208
	return ret;
}

static int
209 210
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
				u32 domain, struct perf_dom_info *dom_info)
211 212 213 214 215
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_msg_resp_perf_domain_attributes *attr;

216 217
	ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
				     sizeof(domain), sizeof(*attr), &t);
218 219 220
	if (ret)
		return ret;

221
	put_unaligned_le32(domain, t->tx.buf);
222 223
	attr = t->rx.buf;

224
	ret = ph->xops->do_xfer(ph, t);
225 226 227 228 229 230 231
	if (!ret) {
		u32 flags = le32_to_cpu(attr->flags);

		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
232
		dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
233 234 235 236
		dom_info->sustained_freq_khz =
					le32_to_cpu(attr->sustained_freq_khz);
		dom_info->sustained_perf_level =
					le32_to_cpu(attr->sustained_perf_level);
237 238 239 240 241 242 243
		if (!dom_info->sustained_freq_khz ||
		    !dom_info->sustained_perf_level)
			/* CPUFreq converts to kHz, hence default 1000 */
			dom_info->mult_factor =	1000;
		else
			dom_info->mult_factor =
					(dom_info->sustained_freq_khz * 1000) /
244
					dom_info->sustained_perf_level;
245
		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
246 247
	}

248
	ph->xops->xfer_put(ph, t);
249 250 251 252 253 254 255 256 257 258 259
	return ret;
}

static int opp_cmp_func(const void *opp1, const void *opp2)
{
	const struct scmi_opp *t1 = opp1, *t2 = opp2;

	return t1->perf - t2->perf;
}

static int
260
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
261 262 263 264 265 266 267 268 269 270
			      struct perf_dom_info *perf_dom)
{
	int ret, cnt;
	u32 tot_opp_cnt = 0;
	u16 num_returned, num_remaining;
	struct scmi_xfer *t;
	struct scmi_opp *opp;
	struct scmi_msg_perf_describe_levels *dom_info;
	struct scmi_msg_resp_perf_describe_levels *level_info;

271 272
	ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
				      sizeof(*dom_info), 0, &t);
273 274 275 276 277 278 279 280 281 282 283
	if (ret)
		return ret;

	dom_info = t->tx.buf;
	level_info = t->rx.buf;

	do {
		dom_info->domain = cpu_to_le32(domain);
		/* Set the number of OPPs to be skipped/already read */
		dom_info->level_index = cpu_to_le32(tot_opp_cnt);

284
		ret = ph->xops->do_xfer(ph, t);
285 286 287 288 289 290
		if (ret)
			break;

		num_returned = le16_to_cpu(level_info->num_returned);
		num_remaining = le16_to_cpu(level_info->num_remaining);
		if (tot_opp_cnt + num_returned > MAX_OPPS) {
291
			dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
292 293 294 295 296 297 298 299 300 301
			break;
		}

		opp = &perf_dom->opp[tot_opp_cnt];
		for (cnt = 0; cnt < num_returned; cnt++, opp++) {
			opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
			opp->power = le32_to_cpu(level_info->opp[cnt].power);
			opp->trans_latency_us = le16_to_cpu
				(level_info->opp[cnt].transition_latency_us);

302
			dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
303 304 305 306
				opp->perf, opp->power, opp->trans_latency_us);
		}

		tot_opp_cnt += num_returned;
307

308
		ph->xops->reset_rx_to_maxsz(ph, t);
309 310 311 312 313 314 315
		/*
		 * check for both returned and remaining to avoid infinite
		 * loop due to buggy firmware
		 */
	} while (num_returned && num_remaining);

	perf_dom->opp_count = tot_opp_cnt;
316
	ph->xops->xfer_put(ph, t);
317 318 319 320 321

	sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
	return ret;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
#define SCMI_PERF_FC_RING_DB(w)				\
do {							\
	u##w val = 0;					\
							\
	if (db->mask)					\
		val = ioread##w(db->addr) & db->mask;	\
	iowrite##w((u##w)db->set | val, db->addr);	\
} while (0)

static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
{
	if (!db || !db->addr)
		return;

	if (db->width == 1)
		SCMI_PERF_FC_RING_DB(8);
	else if (db->width == 2)
		SCMI_PERF_FC_RING_DB(16);
	else if (db->width == 4)
		SCMI_PERF_FC_RING_DB(32);
	else /* db->width == 8 */
#ifdef CONFIG_64BIT
		SCMI_PERF_FC_RING_DB(64);
#else
	{
		u64 val = 0;

		if (db->mask)
			val = ioread64_hi_lo(db->addr) & db->mask;
351
		iowrite64_hi_lo(db->set | val, db->addr);
352 353 354 355
	}
#endif
}

356 357
static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
				   u32 domain, u32 max_perf, u32 min_perf)
358 359 360 361 362
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_set_limits *limits;

363 364
	ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
				      sizeof(*limits), 0, &t);
365 366 367 368 369 370 371 372
	if (ret)
		return ret;

	limits = t->tx.buf;
	limits->domain = cpu_to_le32(domain);
	limits->max_level = cpu_to_le32(max_perf);
	limits->min_level = cpu_to_le32(min_perf);

373
	ret = ph->xops->do_xfer(ph, t);
374

375
	ph->xops->xfer_put(ph, t);
376 377 378
	return ret;
}

379 380
static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
				u32 domain, u32 max_perf, u32 min_perf)
381
{
382
	struct scmi_perf_info *pi = ph->get_priv(ph);
383 384 385 386 387 388 389 390 391
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->limit_set_addr) {
		iowrite32(max_perf, dom->fc_info->limit_set_addr);
		iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
		scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
		return 0;
	}

392
	return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf);
393 394
}

395 396
static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
				   u32 domain, u32 *max_perf, u32 *min_perf)
397 398 399 400 401
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_get_limits *limits;

402 403
	ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
				      sizeof(__le32), 0, &t);
404 405 406
	if (ret)
		return ret;

407
	put_unaligned_le32(domain, t->tx.buf);
408

409
	ret = ph->xops->do_xfer(ph, t);
410 411 412 413 414 415 416
	if (!ret) {
		limits = t->rx.buf;

		*max_perf = le32_to_cpu(limits->max_level);
		*min_perf = le32_to_cpu(limits->min_level);
	}

417
	ph->xops->xfer_put(ph, t);
418 419 420
	return ret;
}

421 422
static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
				u32 domain, u32 *max_perf, u32 *min_perf)
423
{
424
	struct scmi_perf_info *pi = ph->get_priv(ph);
425 426 427 428 429 430 431 432
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->limit_get_addr) {
		*max_perf = ioread32(dom->fc_info->limit_get_addr);
		*min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
		return 0;
	}

433
	return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf);
434 435
}

436 437
static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
				  u32 domain, u32 level, bool poll)
438 439 440 441 442
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_set_level *lvl;

443
	ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
444 445 446
	if (ret)
		return ret;

447
	t->hdr.poll_completion = poll;
448 449 450 451
	lvl = t->tx.buf;
	lvl->domain = cpu_to_le32(domain);
	lvl->level = cpu_to_le32(level);

452
	ret = ph->xops->do_xfer(ph, t);
453

454
	ph->xops->xfer_put(ph, t);
455 456 457
	return ret;
}

458 459
static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
			       u32 domain, u32 level, bool poll)
460
{
461
	struct scmi_perf_info *pi = ph->get_priv(ph);
462 463 464 465 466 467 468 469
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->level_set_addr) {
		iowrite32(level, dom->fc_info->level_set_addr);
		scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
		return 0;
	}

470 471 472 473 474
	return scmi_perf_mb_level_set(ph, domain, level, poll);
}

static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
				  u32 domain, u32 *level, bool poll)
475 476 477 478
{
	int ret;
	struct scmi_xfer *t;

479 480
	ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
				     sizeof(u32), sizeof(u32), &t);
481 482 483
	if (ret)
		return ret;

484
	t->hdr.poll_completion = poll;
485
	put_unaligned_le32(domain, t->tx.buf);
486

487
	ret = ph->xops->do_xfer(ph, t);
488
	if (!ret)
489
		*level = get_unaligned_le32(t->rx.buf);
490

491
	ph->xops->xfer_put(ph, t);
492 493 494
	return ret;
}

495 496
static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
			       u32 domain, u32 *level, bool poll)
497
{
498
	struct scmi_perf_info *pi = ph->get_priv(ph);
499 500 501 502 503 504 505
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->level_get_addr) {
		*level = ioread32(dom->fc_info->level_get_addr);
		return 0;
	}

506 507 508 509
	return scmi_perf_mb_level_get(ph, domain, level, poll);
}

static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
510 511 512 513 514 515 516
					 u32 domain, int message_id,
					 bool enable)
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_notify_level_or_limits *notify;

517
	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
518 519 520 521 522 523 524
	if (ret)
		return ret;

	notify = t->tx.buf;
	notify->domain = cpu_to_le32(domain);
	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;

525
	ret = ph->xops->do_xfer(ph, t);
526

527
	ph->xops->xfer_put(ph, t);
528 529 530
	return ret;
}

531 532 533 534 535 536 537 538 539 540
static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
{
	if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
		return true;
	if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
		return true;
	return false;
}

static void
541
scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
			 u32 message_id, void __iomem **p_addr,
			 struct scmi_fc_db_info **p_db)
{
	int ret;
	u32 flags;
	u64 phys_addr;
	u8 size;
	void __iomem *addr;
	struct scmi_xfer *t;
	struct scmi_fc_db_info *db;
	struct scmi_perf_get_fc_info *info;
	struct scmi_msg_resp_perf_desc_fc *resp;

	if (!p_addr)
		return;

558 559
	ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
				      sizeof(*info), sizeof(*resp), &t);
560 561 562 563 564 565 566
	if (ret)
		return;

	info = t->tx.buf;
	info->domain = cpu_to_le32(domain);
	info->message_id = cpu_to_le32(message_id);

567
	ret = ph->xops->do_xfer(ph, t);
568 569 570 571 572 573 574 575 576 577 578
	if (ret)
		goto err_xfer;

	resp = t->rx.buf;
	flags = le32_to_cpu(resp->attr);
	size = le32_to_cpu(resp->chan_size);
	if (!scmi_perf_fc_size_is_valid(message_id, size))
		goto err_xfer;

	phys_addr = le32_to_cpu(resp->chan_addr_low);
	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
579
	addr = devm_ioremap(ph->dev, phys_addr, size);
580 581 582 583 584
	if (!addr)
		goto err_xfer;
	*p_addr = addr;

	if (p_db && SUPPORTS_DOORBELL(flags)) {
585
		db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
586 587 588 589 590 591
		if (!db)
			goto err_xfer;

		size = 1 << DOORBELL_REG_WIDTH(flags);
		phys_addr = le32_to_cpu(resp->db_addr_low);
		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
592
		addr = devm_ioremap(ph->dev, phys_addr, size);
593 594 595 596 597 598 599 600 601 602 603 604
		if (!addr)
			goto err_xfer;

		db->addr = addr;
		db->width = size;
		db->set = le32_to_cpu(resp->db_set_lmask);
		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
		db->mask = le32_to_cpu(resp->db_preserve_lmask);
		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
		*p_db = db;
	}
err_xfer:
605
	ph->xops->xfer_put(ph, t);
606 607
}

608
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
609 610 611 612
				     u32 domain, struct scmi_fc_info **p_fc)
{
	struct scmi_fc_info *fc;

613
	fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
614 615 616
	if (!fc)
		return;

617
	scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
618
				 &fc->level_set_addr, &fc->level_set_db);
619
	scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
620
				 &fc->level_get_addr, NULL);
621
	scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
622
				 &fc->limit_set_addr, &fc->limit_set_db);
623
	scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
624 625 626 627
				 &fc->limit_get_addr, NULL);
	*p_fc = fc;
}

628 629 630 631 632 633 634 635 636 637 638 639
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
	struct of_phandle_args clkspec;

	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
				       0, &clkspec))
		return -EINVAL;

	return clkspec.args[0];
}

640
static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
641
				     struct device *dev)
642 643 644 645 646
{
	int idx, ret, domain;
	unsigned long freq;
	struct scmi_opp *opp;
	struct perf_dom_info *dom;
647
	struct scmi_perf_info *pi = ph->get_priv(ph);
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671

	domain = scmi_dev_domain_id(dev);
	if (domain < 0)
		return domain;

	dom = pi->dom_info + domain;

	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
		freq = opp->perf * dom->mult_factor;

		ret = dev_pm_opp_add(dev, freq, 0);
		if (ret) {
			dev_warn(dev, "failed to add opp %luHz\n", freq);

			while (idx-- > 0) {
				freq = (--opp)->perf * dom->mult_factor;
				dev_pm_opp_remove(dev, freq);
			}
			return ret;
		}
	}
	return 0;
}

672 673 674
static int
scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
				 struct device *dev)
675 676
{
	struct perf_dom_info *dom;
677
	struct scmi_perf_info *pi = ph->get_priv(ph);
678 679 680 681 682 683 684 685 686 687
	int domain = scmi_dev_domain_id(dev);

	if (domain < 0)
		return domain;

	dom = pi->dom_info + domain;
	/* uS to nS */
	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}

688
static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
689
			      unsigned long freq, bool poll)
690
{
691
	struct scmi_perf_info *pi = ph->get_priv(ph);
692 693
	struct perf_dom_info *dom = pi->dom_info + domain;

694
	return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
695 696
}

697
static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
698
			      unsigned long *freq, bool poll)
699 700 701
{
	int ret;
	u32 level;
702
	struct scmi_perf_info *pi = ph->get_priv(ph);
703 704
	struct perf_dom_info *dom = pi->dom_info + domain;

705
	ret = scmi_perf_level_get(ph, domain, &level, poll);
706 707 708 709 710 711
	if (!ret)
		*freq = level * dom->mult_factor;

	return ret;
}

712 713 714 715 716
static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
				   u32 domain, unsigned long *freq,
				   unsigned long *power)
{
	struct scmi_perf_info *pi = ph->get_priv(ph);
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	struct perf_dom_info *dom;
	unsigned long opp_freq;
	int idx, ret = -EINVAL;
	struct scmi_opp *opp;

	dom = pi->dom_info + domain;
	if (!dom)
		return -EIO;

	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
		opp_freq = opp->perf * dom->mult_factor;
		if (opp_freq < *freq)
			continue;

		*freq = opp_freq;
		*power = opp->power;
		ret = 0;
		break;
	}

	return ret;
}

740
static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
741 742 743
				      struct device *dev)
{
	struct perf_dom_info *dom;
744
	struct scmi_perf_info *pi = ph->get_priv(ph);
745 746 747 748 749 750

	dom = pi->dom_info + scmi_dev_domain_id(dev);

	return dom->fc_info && dom->fc_info->level_set_addr;
}

751 752 753
static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
{
	struct scmi_perf_info *pi = ph->get_priv(ph);
754 755 756 757

	return pi->power_scale_mw;
}

758
static const struct scmi_perf_proto_ops perf_proto_ops = {
759 760 761 762 763
	.limits_set = scmi_perf_limits_set,
	.limits_get = scmi_perf_limits_get,
	.level_set = scmi_perf_level_set,
	.level_get = scmi_perf_level_get,
	.device_domain_id = scmi_dev_domain_id,
764 765
	.transition_latency_get = scmi_dvfs_transition_latency_get,
	.device_opps_add = scmi_dvfs_device_opps_add,
766 767
	.freq_set = scmi_dvfs_freq_set,
	.freq_get = scmi_dvfs_freq_get,
768
	.est_power_get = scmi_dvfs_est_power_get,
769
	.fast_switch_possible = scmi_fast_switch_possible,
770
	.power_scale_mw_get = scmi_power_scale_mw_get,
771 772
};

773
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
774 775 776 777 778 779 780 781
					u8 evt_id, u32 src_id, bool enable)
{
	int ret, cmd_id;

	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
		return -EINVAL;

	cmd_id = evt_2_cmd[evt_id];
782
	ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
783 784 785 786 787 788 789
	if (ret)
		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
			 evt_id, src_id, ret);

	return ret;
}

790
static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
791
					  u8 evt_id, ktime_t timestamp,
792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
					  const void *payld, size_t payld_sz,
					  void *report, u32 *src_id)
{
	void *rep = NULL;

	switch (evt_id) {
	case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
	{
		const struct scmi_perf_limits_notify_payld *p = payld;
		struct scmi_perf_limits_report *r = report;

		if (sizeof(*p) != payld_sz)
			break;

		r->timestamp = timestamp;
		r->agent_id = le32_to_cpu(p->agent_id);
		r->domain_id = le32_to_cpu(p->domain_id);
		r->range_max = le32_to_cpu(p->range_max);
		r->range_min = le32_to_cpu(p->range_min);
		*src_id = r->domain_id;
		rep = r;
		break;
	}
	case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
	{
		const struct scmi_perf_level_notify_payld *p = payld;
		struct scmi_perf_level_report *r = report;

		if (sizeof(*p) != payld_sz)
			break;

		r->timestamp = timestamp;
		r->agent_id = le32_to_cpu(p->agent_id);
		r->domain_id = le32_to_cpu(p->domain_id);
		r->performance_level = le32_to_cpu(p->performance_level);
		*src_id = r->domain_id;
		rep = r;
		break;
	}
	default:
		break;
	}

	return rep;
}

838
static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
839
{
840
	struct scmi_perf_info *pi = ph->get_priv(ph);
841 842 843 844 845 846 847

	if (!pi)
		return -EINVAL;

	return pi->num_domains;
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861
static const struct scmi_event perf_events[] = {
	{
		.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
		.max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
		.max_report_sz = sizeof(struct scmi_perf_limits_report),
	},
	{
		.id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
		.max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
		.max_report_sz = sizeof(struct scmi_perf_level_report),
	},
};

static const struct scmi_event_ops perf_event_ops = {
862
	.get_num_sources = scmi_perf_get_num_sources,
863 864 865 866
	.set_notify_enabled = scmi_perf_set_notify_enabled,
	.fill_custom_report = scmi_perf_fill_custom_report,
};

867 868 869 870 871 872 873
static const struct scmi_protocol_events perf_protocol_events = {
	.queue_sz = SCMI_PROTO_QUEUE_SZ,
	.ops = &perf_event_ops,
	.evts = perf_events,
	.num_events = ARRAY_SIZE(perf_events),
};

874
static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
875 876 877 878 879
{
	int domain;
	u32 version;
	struct scmi_perf_info *pinfo;

880
	ph->xops->version_get(ph, &version);
881

882
	dev_dbg(ph->dev, "Performance Version %d.%d\n",
883 884
		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));

885
	pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
886 887 888
	if (!pinfo)
		return -ENOMEM;

889
	scmi_perf_attributes_get(ph, pinfo);
890

891
	pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
892 893 894 895 896 897 898
				       sizeof(*pinfo->dom_info), GFP_KERNEL);
	if (!pinfo->dom_info)
		return -ENOMEM;

	for (domain = 0; domain < pinfo->num_domains; domain++) {
		struct perf_dom_info *dom = pinfo->dom_info + domain;

899 900
		scmi_perf_domain_attributes_get(ph, domain, dom);
		scmi_perf_describe_levels_get(ph, domain, dom);
901 902

		if (dom->perf_fastchannels)
903
			scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
904 905
	}

906
	pinfo->version = version;
907 908

	return ph->set_priv(ph, pinfo);
909 910
}

911 912
static const struct scmi_protocol scmi_perf = {
	.id = SCMI_PROTOCOL_PERF,
913
	.owner = THIS_MODULE,
914 915
	.instance_init = &scmi_perf_protocol_init,
	.ops = &perf_proto_ops,
916
	.events = &perf_protocol_events,
917 918 919
};

DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)