perf.c 21.9 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/*
 * System Control and Management Interface (SCMI) Performance Protocol
 *
5
 * Copyright (C) 2018-2021 ARM Ltd.
6 7
 */

8 9
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt

10
#include <linux/bits.h>
11
#include <linux/of.h>
12
#include <linux/io.h>
13
#include <linux/io-64-nonatomic-hi-lo.h>
14 15
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
16
#include <linux/scmi_protocol.h>
17 18 19
#include <linux/sort.h>

#include "common.h"
20
#include "notify.h"
21 22 23 24 25 26 27 28 29 30

enum scmi_performance_protocol_cmd {
	PERF_DOMAIN_ATTRIBUTES = 0x3,
	PERF_DESCRIBE_LEVELS = 0x4,
	PERF_LIMITS_SET = 0x5,
	PERF_LIMITS_GET = 0x6,
	PERF_LEVEL_SET = 0x7,
	PERF_LEVEL_GET = 0x8,
	PERF_NOTIFY_LIMITS = 0x9,
	PERF_NOTIFY_LEVEL = 0xa,
31
	PERF_DESCRIBE_FASTCHANNEL = 0xb,
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
};

struct scmi_opp {
	u32 perf;
	u32 power;
	u32 trans_latency_us;
};

struct scmi_msg_resp_perf_attributes {
	__le16 num_domains;
	__le16 flags;
#define POWER_SCALE_IN_MILLIWATT(x)	((x) & BIT(0))
	__le32 stats_addr_low;
	__le32 stats_addr_high;
	__le32 stats_size;
};

struct scmi_msg_resp_perf_domain_attributes {
	__le32 flags;
#define SUPPORTS_SET_LIMITS(x)		((x) & BIT(31))
#define SUPPORTS_SET_PERF_LVL(x)	((x) & BIT(30))
#define SUPPORTS_PERF_LIMIT_NOTIFY(x)	((x) & BIT(29))
#define SUPPORTS_PERF_LEVEL_NOTIFY(x)	((x) & BIT(28))
55
#define SUPPORTS_PERF_FASTCHANNELS(x)	((x) & BIT(27))
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	__le32 rate_limit_us;
	__le32 sustained_freq_khz;
	__le32 sustained_perf_level;
	    u8 name[SCMI_MAX_STR_SIZE];
};

struct scmi_msg_perf_describe_levels {
	__le32 domain;
	__le32 level_index;
};

struct scmi_perf_set_limits {
	__le32 domain;
	__le32 max_level;
	__le32 min_level;
};

struct scmi_perf_get_limits {
	__le32 max_level;
	__le32 min_level;
};

struct scmi_perf_set_level {
	__le32 domain;
	__le32 level;
};

struct scmi_perf_notify_level_or_limits {
	__le32 domain;
	__le32 notify_enable;
};

88 89 90 91 92 93 94 95 96 97 98 99 100
struct scmi_perf_limits_notify_payld {
	__le32 agent_id;
	__le32 domain_id;
	__le32 range_max;
	__le32 range_min;
};

struct scmi_perf_level_notify_payld {
	__le32 agent_id;
	__le32 domain_id;
	__le32 performance_level;
};

101 102 103 104 105 106 107 108
struct scmi_msg_resp_perf_describe_levels {
	__le16 num_returned;
	__le16 num_remaining;
	struct {
		__le32 perf_val;
		__le32 power;
		__le16 transition_latency_us;
		__le16 reserved;
109
	} opp[];
110 111
};

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
struct scmi_perf_get_fc_info {
	__le32 domain;
	__le32 message_id;
};

struct scmi_msg_resp_perf_desc_fc {
	__le32 attr;
#define SUPPORTS_DOORBELL(x)		((x) & BIT(0))
#define DOORBELL_REG_WIDTH(x)		FIELD_GET(GENMASK(2, 1), (x))
	__le32 rate_limit;
	__le32 chan_addr_low;
	__le32 chan_addr_high;
	__le32 chan_size;
	__le32 db_addr_low;
	__le32 db_addr_high;
	__le32 db_set_lmask;
	__le32 db_set_hmask;
	__le32 db_preserve_lmask;
	__le32 db_preserve_hmask;
};

struct scmi_fc_db_info {
	int width;
	u64 set;
	u64 mask;
	void __iomem *addr;
};

struct scmi_fc_info {
	void __iomem *level_set_addr;
	void __iomem *limit_set_addr;
	void __iomem *level_get_addr;
	void __iomem *limit_get_addr;
	struct scmi_fc_db_info *level_set_db;
	struct scmi_fc_db_info *limit_set_db;
};

149 150 151 152 153
struct perf_dom_info {
	bool set_limits;
	bool set_perf;
	bool perf_limit_notify;
	bool perf_level_notify;
154
	bool perf_fastchannels;
155 156 157 158 159 160
	u32 opp_count;
	u32 sustained_freq_khz;
	u32 sustained_perf_level;
	u32 mult_factor;
	char name[SCMI_MAX_STR_SIZE];
	struct scmi_opp opp[MAX_OPPS];
161
	struct scmi_fc_info *fc_info;
162 163 164
};

struct scmi_perf_info {
165
	u32 version;
166 167 168 169 170 171 172
	int num_domains;
	bool power_scale_mw;
	u64 stats_addr;
	u32 stats_size;
	struct perf_dom_info *dom_info;
};

173 174 175 176 177
static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
	PERF_NOTIFY_LIMITS,
	PERF_NOTIFY_LEVEL,
};

178
static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
179 180 181 182 183 184
				    struct scmi_perf_info *pi)
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_msg_resp_perf_attributes *attr;

185 186
	ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
				      sizeof(*attr), &t);
187 188 189 190 191
	if (ret)
		return ret;

	attr = t->rx.buf;

192
	ret = ph->xops->do_xfer(ph, t);
193 194 195 196 197 198 199 200 201 202
	if (!ret) {
		u16 flags = le16_to_cpu(attr->flags);

		pi->num_domains = le16_to_cpu(attr->num_domains);
		pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
		pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
				(u64)le32_to_cpu(attr->stats_addr_high) << 32;
		pi->stats_size = le32_to_cpu(attr->stats_size);
	}

203
	ph->xops->xfer_put(ph, t);
204 205 206 207
	return ret;
}

static int
208 209
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
				u32 domain, struct perf_dom_info *dom_info)
210 211 212 213 214
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_msg_resp_perf_domain_attributes *attr;

215 216
	ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
				     sizeof(domain), sizeof(*attr), &t);
217 218 219
	if (ret)
		return ret;

220
	put_unaligned_le32(domain, t->tx.buf);
221 222
	attr = t->rx.buf;

223
	ret = ph->xops->do_xfer(ph, t);
224 225 226 227 228 229 230
	if (!ret) {
		u32 flags = le32_to_cpu(attr->flags);

		dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
		dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
		dom_info->perf_limit_notify = SUPPORTS_PERF_LIMIT_NOTIFY(flags);
		dom_info->perf_level_notify = SUPPORTS_PERF_LEVEL_NOTIFY(flags);
231
		dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
232 233 234 235
		dom_info->sustained_freq_khz =
					le32_to_cpu(attr->sustained_freq_khz);
		dom_info->sustained_perf_level =
					le32_to_cpu(attr->sustained_perf_level);
236 237 238 239 240 241 242
		if (!dom_info->sustained_freq_khz ||
		    !dom_info->sustained_perf_level)
			/* CPUFreq converts to kHz, hence default 1000 */
			dom_info->mult_factor =	1000;
		else
			dom_info->mult_factor =
					(dom_info->sustained_freq_khz * 1000) /
243
					dom_info->sustained_perf_level;
244
		strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
245 246
	}

247
	ph->xops->xfer_put(ph, t);
248 249 250 251 252 253 254 255 256 257 258
	return ret;
}

static int opp_cmp_func(const void *opp1, const void *opp2)
{
	const struct scmi_opp *t1 = opp1, *t2 = opp2;

	return t1->perf - t2->perf;
}

static int
259
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
260 261 262 263 264 265 266 267 268 269
			      struct perf_dom_info *perf_dom)
{
	int ret, cnt;
	u32 tot_opp_cnt = 0;
	u16 num_returned, num_remaining;
	struct scmi_xfer *t;
	struct scmi_opp *opp;
	struct scmi_msg_perf_describe_levels *dom_info;
	struct scmi_msg_resp_perf_describe_levels *level_info;

270 271
	ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
				      sizeof(*dom_info), 0, &t);
272 273 274 275 276 277 278 279 280 281 282
	if (ret)
		return ret;

	dom_info = t->tx.buf;
	level_info = t->rx.buf;

	do {
		dom_info->domain = cpu_to_le32(domain);
		/* Set the number of OPPs to be skipped/already read */
		dom_info->level_index = cpu_to_le32(tot_opp_cnt);

283
		ret = ph->xops->do_xfer(ph, t);
284 285 286 287 288 289
		if (ret)
			break;

		num_returned = le16_to_cpu(level_info->num_returned);
		num_remaining = le16_to_cpu(level_info->num_remaining);
		if (tot_opp_cnt + num_returned > MAX_OPPS) {
290
			dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
291 292 293 294 295 296 297 298 299 300
			break;
		}

		opp = &perf_dom->opp[tot_opp_cnt];
		for (cnt = 0; cnt < num_returned; cnt++, opp++) {
			opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
			opp->power = le32_to_cpu(level_info->opp[cnt].power);
			opp->trans_latency_us = le16_to_cpu
				(level_info->opp[cnt].transition_latency_us);

301
			dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
302 303 304 305
				opp->perf, opp->power, opp->trans_latency_us);
		}

		tot_opp_cnt += num_returned;
306

307
		ph->xops->reset_rx_to_maxsz(ph, t);
308 309 310 311 312 313 314
		/*
		 * check for both returned and remaining to avoid infinite
		 * loop due to buggy firmware
		 */
	} while (num_returned && num_remaining);

	perf_dom->opp_count = tot_opp_cnt;
315
	ph->xops->xfer_put(ph, t);
316 317 318 319 320

	sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
	return ret;
}

321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
#define SCMI_PERF_FC_RING_DB(w)				\
do {							\
	u##w val = 0;					\
							\
	if (db->mask)					\
		val = ioread##w(db->addr) & db->mask;	\
	iowrite##w((u##w)db->set | val, db->addr);	\
} while (0)

static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db)
{
	if (!db || !db->addr)
		return;

	if (db->width == 1)
		SCMI_PERF_FC_RING_DB(8);
	else if (db->width == 2)
		SCMI_PERF_FC_RING_DB(16);
	else if (db->width == 4)
		SCMI_PERF_FC_RING_DB(32);
	else /* db->width == 8 */
#ifdef CONFIG_64BIT
		SCMI_PERF_FC_RING_DB(64);
#else
	{
		u64 val = 0;

		if (db->mask)
			val = ioread64_hi_lo(db->addr) & db->mask;
350
		iowrite64_hi_lo(db->set | val, db->addr);
351 352 353 354
	}
#endif
}

355 356
static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph,
				   u32 domain, u32 max_perf, u32 min_perf)
357 358 359 360 361
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_set_limits *limits;

362 363
	ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
				      sizeof(*limits), 0, &t);
364 365 366 367 368 369 370 371
	if (ret)
		return ret;

	limits = t->tx.buf;
	limits->domain = cpu_to_le32(domain);
	limits->max_level = cpu_to_le32(max_perf);
	limits->min_level = cpu_to_le32(min_perf);

372
	ret = ph->xops->do_xfer(ph, t);
373

374
	ph->xops->xfer_put(ph, t);
375 376 377
	return ret;
}

378 379
static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
				u32 domain, u32 max_perf, u32 min_perf)
380
{
381
	struct scmi_perf_info *pi = ph->get_priv(ph);
382 383 384 385 386 387 388 389 390
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->limit_set_addr) {
		iowrite32(max_perf, dom->fc_info->limit_set_addr);
		iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
		scmi_perf_fc_ring_db(dom->fc_info->limit_set_db);
		return 0;
	}

391
	return scmi_perf_mb_limits_set(ph, domain, max_perf, min_perf);
392 393
}

394 395
static int scmi_perf_mb_limits_get(const struct scmi_protocol_handle *ph,
				   u32 domain, u32 *max_perf, u32 *min_perf)
396 397 398 399 400
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_get_limits *limits;

401 402
	ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
				      sizeof(__le32), 0, &t);
403 404 405
	if (ret)
		return ret;

406
	put_unaligned_le32(domain, t->tx.buf);
407

408
	ret = ph->xops->do_xfer(ph, t);
409 410 411 412 413 414 415
	if (!ret) {
		limits = t->rx.buf;

		*max_perf = le32_to_cpu(limits->max_level);
		*min_perf = le32_to_cpu(limits->min_level);
	}

416
	ph->xops->xfer_put(ph, t);
417 418 419
	return ret;
}

420 421
static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
				u32 domain, u32 *max_perf, u32 *min_perf)
422
{
423
	struct scmi_perf_info *pi = ph->get_priv(ph);
424 425 426 427 428 429 430 431
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->limit_get_addr) {
		*max_perf = ioread32(dom->fc_info->limit_get_addr);
		*min_perf = ioread32(dom->fc_info->limit_get_addr + 4);
		return 0;
	}

432
	return scmi_perf_mb_limits_get(ph, domain, max_perf, min_perf);
433 434
}

435 436
static int scmi_perf_mb_level_set(const struct scmi_protocol_handle *ph,
				  u32 domain, u32 level, bool poll)
437 438 439 440 441
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_set_level *lvl;

442
	ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
443 444 445
	if (ret)
		return ret;

446
	t->hdr.poll_completion = poll;
447 448 449 450
	lvl = t->tx.buf;
	lvl->domain = cpu_to_le32(domain);
	lvl->level = cpu_to_le32(level);

451
	ret = ph->xops->do_xfer(ph, t);
452

453
	ph->xops->xfer_put(ph, t);
454 455 456
	return ret;
}

457 458
static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
			       u32 domain, u32 level, bool poll)
459
{
460
	struct scmi_perf_info *pi = ph->get_priv(ph);
461 462 463 464 465 466 467 468
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->level_set_addr) {
		iowrite32(level, dom->fc_info->level_set_addr);
		scmi_perf_fc_ring_db(dom->fc_info->level_set_db);
		return 0;
	}

469 470 471 472 473
	return scmi_perf_mb_level_set(ph, domain, level, poll);
}

static int scmi_perf_mb_level_get(const struct scmi_protocol_handle *ph,
				  u32 domain, u32 *level, bool poll)
474 475 476 477
{
	int ret;
	struct scmi_xfer *t;

478 479
	ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
				     sizeof(u32), sizeof(u32), &t);
480 481 482
	if (ret)
		return ret;

483
	t->hdr.poll_completion = poll;
484
	put_unaligned_le32(domain, t->tx.buf);
485

486
	ret = ph->xops->do_xfer(ph, t);
487
	if (!ret)
488
		*level = get_unaligned_le32(t->rx.buf);
489

490
	ph->xops->xfer_put(ph, t);
491 492 493
	return ret;
}

494 495
static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
			       u32 domain, u32 *level, bool poll)
496
{
497
	struct scmi_perf_info *pi = ph->get_priv(ph);
498 499 500 501 502 503 504
	struct perf_dom_info *dom = pi->dom_info + domain;

	if (dom->fc_info && dom->fc_info->level_get_addr) {
		*level = ioread32(dom->fc_info->level_get_addr);
		return 0;
	}

505 506 507 508
	return scmi_perf_mb_level_get(ph, domain, level, poll);
}

static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
509 510 511 512 513 514 515
					 u32 domain, int message_id,
					 bool enable)
{
	int ret;
	struct scmi_xfer *t;
	struct scmi_perf_notify_level_or_limits *notify;

516
	ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
517 518 519 520 521 522 523
	if (ret)
		return ret;

	notify = t->tx.buf;
	notify->domain = cpu_to_le32(domain);
	notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;

524
	ret = ph->xops->do_xfer(ph, t);
525

526
	ph->xops->xfer_put(ph, t);
527 528 529
	return ret;
}

530 531 532 533 534 535 536 537 538 539
static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size)
{
	if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4)
		return true;
	if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8)
		return true;
	return false;
}

static void
540
scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain,
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
			 u32 message_id, void __iomem **p_addr,
			 struct scmi_fc_db_info **p_db)
{
	int ret;
	u32 flags;
	u64 phys_addr;
	u8 size;
	void __iomem *addr;
	struct scmi_xfer *t;
	struct scmi_fc_db_info *db;
	struct scmi_perf_get_fc_info *info;
	struct scmi_msg_resp_perf_desc_fc *resp;

	if (!p_addr)
		return;

557 558
	ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL,
				      sizeof(*info), sizeof(*resp), &t);
559 560 561 562 563 564 565
	if (ret)
		return;

	info = t->tx.buf;
	info->domain = cpu_to_le32(domain);
	info->message_id = cpu_to_le32(message_id);

566
	ret = ph->xops->do_xfer(ph, t);
567 568 569 570 571 572 573 574 575 576 577
	if (ret)
		goto err_xfer;

	resp = t->rx.buf;
	flags = le32_to_cpu(resp->attr);
	size = le32_to_cpu(resp->chan_size);
	if (!scmi_perf_fc_size_is_valid(message_id, size))
		goto err_xfer;

	phys_addr = le32_to_cpu(resp->chan_addr_low);
	phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
578
	addr = devm_ioremap(ph->dev, phys_addr, size);
579 580 581 582 583
	if (!addr)
		goto err_xfer;
	*p_addr = addr;

	if (p_db && SUPPORTS_DOORBELL(flags)) {
584
		db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
585 586 587 588 589 590
		if (!db)
			goto err_xfer;

		size = 1 << DOORBELL_REG_WIDTH(flags);
		phys_addr = le32_to_cpu(resp->db_addr_low);
		phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
591
		addr = devm_ioremap(ph->dev, phys_addr, size);
592 593 594 595 596 597 598 599 600 601 602 603
		if (!addr)
			goto err_xfer;

		db->addr = addr;
		db->width = size;
		db->set = le32_to_cpu(resp->db_set_lmask);
		db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
		db->mask = le32_to_cpu(resp->db_preserve_lmask);
		db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
		*p_db = db;
	}
err_xfer:
604
	ph->xops->xfer_put(ph, t);
605 606
}

607
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
608 609 610 611
				     u32 domain, struct scmi_fc_info **p_fc)
{
	struct scmi_fc_info *fc;

612
	fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL);
613 614 615
	if (!fc)
		return;

616
	scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET,
617
				 &fc->level_set_addr, &fc->level_set_db);
618
	scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET,
619
				 &fc->level_get_addr, NULL);
620
	scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET,
621
				 &fc->limit_set_addr, &fc->limit_set_db);
622
	scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET,
623 624 625 626
				 &fc->limit_get_addr, NULL);
	*p_fc = fc;
}

627 628 629 630 631 632 633 634 635 636 637 638
/* Device specific ops */
static int scmi_dev_domain_id(struct device *dev)
{
	struct of_phandle_args clkspec;

	if (of_parse_phandle_with_args(dev->of_node, "clocks", "#clock-cells",
				       0, &clkspec))
		return -EINVAL;

	return clkspec.args[0];
}

639
static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
640
				     struct device *dev)
641 642 643 644 645
{
	int idx, ret, domain;
	unsigned long freq;
	struct scmi_opp *opp;
	struct perf_dom_info *dom;
646
	struct scmi_perf_info *pi = ph->get_priv(ph);
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670

	domain = scmi_dev_domain_id(dev);
	if (domain < 0)
		return domain;

	dom = pi->dom_info + domain;

	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
		freq = opp->perf * dom->mult_factor;

		ret = dev_pm_opp_add(dev, freq, 0);
		if (ret) {
			dev_warn(dev, "failed to add opp %luHz\n", freq);

			while (idx-- > 0) {
				freq = (--opp)->perf * dom->mult_factor;
				dev_pm_opp_remove(dev, freq);
			}
			return ret;
		}
	}
	return 0;
}

671 672 673
static int
scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
				 struct device *dev)
674 675
{
	struct perf_dom_info *dom;
676
	struct scmi_perf_info *pi = ph->get_priv(ph);
677 678 679 680 681 682 683 684 685 686
	int domain = scmi_dev_domain_id(dev);

	if (domain < 0)
		return domain;

	dom = pi->dom_info + domain;
	/* uS to nS */
	return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
}

687
static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
688
			      unsigned long freq, bool poll)
689
{
690
	struct scmi_perf_info *pi = ph->get_priv(ph);
691 692
	struct perf_dom_info *dom = pi->dom_info + domain;

693
	return scmi_perf_level_set(ph, domain, freq / dom->mult_factor, poll);
694 695
}

696
static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
697
			      unsigned long *freq, bool poll)
698 699 700
{
	int ret;
	u32 level;
701
	struct scmi_perf_info *pi = ph->get_priv(ph);
702 703
	struct perf_dom_info *dom = pi->dom_info + domain;

704
	ret = scmi_perf_level_get(ph, domain, &level, poll);
705 706 707 708 709 710
	if (!ret)
		*freq = level * dom->mult_factor;

	return ret;
}

711 712 713 714 715
static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
				   u32 domain, unsigned long *freq,
				   unsigned long *power)
{
	struct scmi_perf_info *pi = ph->get_priv(ph);
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	struct perf_dom_info *dom;
	unsigned long opp_freq;
	int idx, ret = -EINVAL;
	struct scmi_opp *opp;

	dom = pi->dom_info + domain;
	if (!dom)
		return -EIO;

	for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
		opp_freq = opp->perf * dom->mult_factor;
		if (opp_freq < *freq)
			continue;

		*freq = opp_freq;
		*power = opp->power;
		ret = 0;
		break;
	}

	return ret;
}

739
static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
740 741 742
				      struct device *dev)
{
	struct perf_dom_info *dom;
743
	struct scmi_perf_info *pi = ph->get_priv(ph);
744 745 746 747 748 749

	dom = pi->dom_info + scmi_dev_domain_id(dev);

	return dom->fc_info && dom->fc_info->level_set_addr;
}

750 751 752
static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph)
{
	struct scmi_perf_info *pi = ph->get_priv(ph);
753 754 755 756

	return pi->power_scale_mw;
}

757
static const struct scmi_perf_proto_ops perf_proto_ops = {
758 759 760 761 762
	.limits_set = scmi_perf_limits_set,
	.limits_get = scmi_perf_limits_get,
	.level_set = scmi_perf_level_set,
	.level_get = scmi_perf_level_get,
	.device_domain_id = scmi_dev_domain_id,
763 764
	.transition_latency_get = scmi_dvfs_transition_latency_get,
	.device_opps_add = scmi_dvfs_device_opps_add,
765 766
	.freq_set = scmi_dvfs_freq_set,
	.freq_get = scmi_dvfs_freq_get,
767
	.est_power_get = scmi_dvfs_est_power_get,
768
	.fast_switch_possible = scmi_fast_switch_possible,
769
	.power_scale_mw_get = scmi_power_scale_mw_get,
770 771
};

772
static int scmi_perf_set_notify_enabled(const void *ph,
773 774 775 776 777 778 779 780
					u8 evt_id, u32 src_id, bool enable)
{
	int ret, cmd_id;

	if (evt_id >= ARRAY_SIZE(evt_2_cmd))
		return -EINVAL;

	cmd_id = evt_2_cmd[evt_id];
781
	ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
782 783 784 785 786 787 788
	if (ret)
		pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
			 evt_id, src_id, ret);

	return ret;
}

789
static void *scmi_perf_fill_custom_report(const void *ph,
790
					  u8 evt_id, ktime_t timestamp,
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836
					  const void *payld, size_t payld_sz,
					  void *report, u32 *src_id)
{
	void *rep = NULL;

	switch (evt_id) {
	case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
	{
		const struct scmi_perf_limits_notify_payld *p = payld;
		struct scmi_perf_limits_report *r = report;

		if (sizeof(*p) != payld_sz)
			break;

		r->timestamp = timestamp;
		r->agent_id = le32_to_cpu(p->agent_id);
		r->domain_id = le32_to_cpu(p->domain_id);
		r->range_max = le32_to_cpu(p->range_max);
		r->range_min = le32_to_cpu(p->range_min);
		*src_id = r->domain_id;
		rep = r;
		break;
	}
	case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
	{
		const struct scmi_perf_level_notify_payld *p = payld;
		struct scmi_perf_level_report *r = report;

		if (sizeof(*p) != payld_sz)
			break;

		r->timestamp = timestamp;
		r->agent_id = le32_to_cpu(p->agent_id);
		r->domain_id = le32_to_cpu(p->domain_id);
		r->performance_level = le32_to_cpu(p->performance_level);
		*src_id = r->domain_id;
		rep = r;
		break;
	}
	default:
		break;
	}

	return rep;
}

837
static int scmi_perf_get_num_sources(const void *ph)
838
{
839
	struct scmi_perf_info *pi =
840
		((const struct scmi_protocol_handle *)ph)->get_priv(ph);
841 842 843 844 845 846 847

	if (!pi)
		return -EINVAL;

	return pi->num_domains;
}

848 849 850 851 852 853 854 855 856 857 858 859 860 861
static const struct scmi_event perf_events[] = {
	{
		.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
		.max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
		.max_report_sz = sizeof(struct scmi_perf_limits_report),
	},
	{
		.id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
		.max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
		.max_report_sz = sizeof(struct scmi_perf_level_report),
	},
};

static const struct scmi_event_ops perf_event_ops = {
862
	.get_num_sources = scmi_perf_get_num_sources,
863 864 865 866
	.set_notify_enabled = scmi_perf_set_notify_enabled,
	.fill_custom_report = scmi_perf_fill_custom_report,
};

867 868 869 870 871 872 873
static const struct scmi_protocol_events perf_protocol_events = {
	.queue_sz = SCMI_PROTO_QUEUE_SZ,
	.ops = &perf_event_ops,
	.evts = perf_events,
	.num_events = ARRAY_SIZE(perf_events),
};

874
static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
875 876 877 878 879
{
	int domain;
	u32 version;
	struct scmi_perf_info *pinfo;

880
	ph->xops->version_get(ph, &version);
881

882
	dev_dbg(ph->dev, "Performance Version %d.%d\n",
883 884
		PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));

885
	pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
886 887 888
	if (!pinfo)
		return -ENOMEM;

889
	scmi_perf_attributes_get(ph, pinfo);
890

891
	pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
892 893 894 895 896 897 898
				       sizeof(*pinfo->dom_info), GFP_KERNEL);
	if (!pinfo->dom_info)
		return -ENOMEM;

	for (domain = 0; domain < pinfo->num_domains; domain++) {
		struct perf_dom_info *dom = pinfo->dom_info + domain;

899 900
		scmi_perf_domain_attributes_get(ph, domain, dom);
		scmi_perf_describe_levels_get(ph, domain, dom);
901 902

		if (dom->perf_fastchannels)
903
			scmi_perf_domain_init_fc(ph, domain, &dom->fc_info);
904 905
	}

906
	pinfo->version = version;
907 908

	return ph->set_priv(ph, pinfo);
909 910
}

911 912
static const struct scmi_protocol scmi_perf = {
	.id = SCMI_PROTOCOL_PERF,
913 914
	.instance_init = &scmi_perf_protocol_init,
	.ops = &perf_proto_ops,
915
	.events = &perf_protocol_events,
916 917 918
};

DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)