cppc_acpi.c 38.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/*
 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
 *
 * (C) Copyright 2014, 2015 Linaro Ltd.
 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 *
 * CPPC describes a few methods for controlling CPU performance using
 * information from a per CPU table called CPC. This table is described in
 * the ACPI v5.0+ specification. The table consists of a list of
 * registers which may be memory mapped or hardware registers and also may
 * include some static integer values.
 *
 * CPU performance is on an abstract continuous scale as against a discretized
 * P-state scale which is tied to CPU frequency only. In brief, the basic
 * operation involves:
 *
 * - OS makes a CPU performance request. (Can provide min and max bounds)
 *
 * - Platform (such as BMC) is free to optimize request within requested bounds
 *   depending on power/thermal budgets etc.
 *
 * - Platform conveys its decision back to OS
 *
 * The communication between OS and platform occurs through another medium
 * called (PCC) Platform Communication Channel. This is a generic mailbox like
 * mechanism which includes doorbell semantics to indicate register updates.
 * See drivers/mailbox/pcc.c for details on PCC.
 *
 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
 * above specifications.
 */

#define pr_fmt(fmt)	"ACPI CPPC: " fmt

#include <linux/cpufreq.h>
#include <linux/delay.h>
42
#include <linux/ktime.h>
43 44
#include <linux/rwsem.h>
#include <linux/wait.h>
45 46

#include <acpi/cppc_acpi.h>
47

48 49 50 51 52 53
struct cppc_pcc_data {
	struct mbox_chan *pcc_channel;
	void __iomem *pcc_comm_addr;
	bool pcc_channel_acquired;
	ktime_t deadline;
	unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54

55
	bool pending_pcc_write_cmd;	/* Any pending/batched PCC write cmds? */
56
	bool platform_owns_pcc;		/* Ownership of PCC subspace */
57
	unsigned int pcc_write_cnt;	/* Running count of PCC write commands */
58

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
	/*
	 * Lock to provide controlled access to the PCC channel.
	 *
	 * For performance critical usecases(currently cppc_set_perf)
	 *	We need to take read_lock and check if channel belongs to OSPM
	 * before reading or writing to PCC subspace
	 *	We need to take write_lock before transferring the channel
	 * ownership to the platform via a Doorbell
	 *	This allows us to batch a number of CPPC requests if they happen
	 * to originate in about the same time
	 *
	 * For non-performance critical usecases(init)
	 *	Take write_lock for all purposes which gives exclusive access
	 */
	struct rw_semaphore pcc_lock;

	/* Wait queue for CPUs whose requests were batched */
	wait_queue_head_t pcc_write_wait_q;
77 78 79 80
	ktime_t last_cmd_cmpl_time;
	ktime_t last_mpar_reset;
	int mpar_count;
	int refcount;
81
};
82

83 84 85 86
/* Array  to represent the PCC channel per subspace id */
static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
/* The cpu_pcc_subspace_idx containsper CPU subspace id */
static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
87 88 89 90 91 92 93 94 95 96

/*
 * The cpc_desc structure contains the ACPI register details
 * as described in the per CPU _CPC tables. The details
 * include the type of register (e.g. PCC, System IO, FFH etc.)
 * and destination addresses which lets us READ/WRITE CPU performance
 * information using the appropriate I/O methods.
 */
static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);

97
/* pcc mapped address + header size + offset within PCC subspace */
98 99
#define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
						0x8 + (offs))
100

101
/* Check if a CPC register is in PCC */
102 103 104 105
#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&		\
				(cpc)->cpc_entry.reg.space_id ==	\
				ACPI_ADR_SPACE_PLATFORM_COMM)

106 107 108 109 110 111 112 113 114 115 116
/* Evalutes to True if reg is a NULL register descriptor */
#define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
				(reg)->address == 0 &&			\
				(reg)->bit_width == 0 &&		\
				(reg)->bit_offset == 0 &&		\
				(reg)->access_width == 0)

/* Evalutes to True if an optional cpc field is supported */
#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?		\
				!!(cpc)->cpc_entry.int_value :		\
				!IS_NULL_REG(&(cpc)->cpc_entry.reg))
117 118
/*
 * Arbitrary Retries in case the remote processor is slow to respond
119 120
 * to PCC commands. Keeping it high enough to cover emulators where
 * the processors run painfully slow.
121
 */
122
#define NUM_RETRIES 500ULL
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137
struct cppc_attr {
	struct attribute attr;
	ssize_t (*show)(struct kobject *kobj,
			struct attribute *attr, char *buf);
	ssize_t (*store)(struct kobject *kobj,
			struct attribute *attr, const char *c, ssize_t count);
};

#define define_one_cppc_ro(_name)		\
static struct cppc_attr _name =			\
__ATTR(_name, 0444, show_##_name, NULL)

#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
#define show_cppc_data(access_fn, struct_name, member_name)		\
	static ssize_t show_##member_name(struct kobject *kobj,		\
					struct attribute *attr,	char *buf) \
	{								\
		struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);		\
		struct struct_name st_name = {0};			\
		int ret;						\
									\
		ret = access_fn(cpc_ptr->cpu_id, &st_name);		\
		if (ret)						\
			return ret;					\
									\
		return scnprintf(buf, PAGE_SIZE, "%llu\n",		\
				(u64)st_name.member_name);		\
	}								\
	define_one_cppc_ro(member_name)

show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);

162 163 164 165 166
static ssize_t show_feedback_ctrs(struct kobject *kobj,
		struct attribute *attr, char *buf)
{
	struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
	struct cppc_perf_fb_ctrs fb_ctrs = {0};
167
	int ret;
168

169 170 171
	ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
	if (ret)
		return ret;
172 173 174 175 176 177 178 179 180 181

	return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
			fb_ctrs.reference, fb_ctrs.delivered);
}
define_one_cppc_ro(feedback_ctrs);

static struct attribute *cppc_attrs[] = {
	&feedback_ctrs.attr,
	&reference_perf.attr,
	&wraparound_time.attr,
182 183 184 185
	&highest_perf.attr,
	&lowest_perf.attr,
	&lowest_nonlinear_perf.attr,
	&nominal_perf.attr,
186 187 188 189 190 191 192 193
	NULL
};

static struct kobj_type cppc_ktype = {
	.sysfs_ops = &kobj_sysfs_ops,
	.default_attrs = cppc_attrs,
};

194
static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
195
{
196
	int ret = -EIO, status = 0;
197 198 199 200 201
	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
	struct acpi_pcct_shared_memory __iomem *generic_comm_base =
		pcc_ss_data->pcc_comm_addr;
	ktime_t next_deadline = ktime_add(ktime_get(),
					  pcc_ss_data->deadline);
202

203
	if (!pcc_ss_data->platform_owns_pcc)
204 205
		return 0;

206 207
	/* Retry in case the remote processor was too slow to catch up. */
	while (!ktime_after(ktime_get(), next_deadline)) {
208 209 210 211 212
		/*
		 * Per spec, prior to boot the PCC space wil be initialized by
		 * platform and should have set the command completion bit when
		 * PCC can be used by OSPM
		 */
213 214
		status = readw_relaxed(&generic_comm_base->status);
		if (status & PCC_CMD_COMPLETE_MASK) {
215
			ret = 0;
216 217
			if (chk_err_bit && (status & PCC_ERROR_MASK))
				ret = -EIO;
218 219 220 221 222 223 224 225 226
			break;
		}
		/*
		 * Reducing the bus traffic in case this loop takes longer than
		 * a few retries.
		 */
		udelay(3);
	}

227
	if (likely(!ret))
228
		pcc_ss_data->platform_owns_pcc = false;
229
	else
230 231
		pr_err("PCC check channel failed for ss: %d. Status=%x\n",
		       pcc_ss_id, status);
232

233 234 235
	return ret;
}

236 237 238 239
/*
 * This function transfers the ownership of the PCC to the platform
 * So it must be called while holding write_lock(pcc_lock)
 */
240
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
241
{
242
	int ret = -EIO, i;
243
	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
244
	struct acpi_pcct_shared_memory *generic_comm_base =
245
		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
246
	unsigned int time_delta;
247

248 249 250 251 252
	/*
	 * For CMD_WRITE we know for a fact the caller should have checked
	 * the channel before writing to PCC space
	 */
	if (cmd == CMD_READ) {
253 254 255 256 257
		/*
		 * If there are pending cpc_writes, then we stole the channel
		 * before write completion, so first send a WRITE command to
		 * platform
		 */
258 259
		if (pcc_ss_data->pending_pcc_write_cmd)
			send_pcc_cmd(pcc_ss_id, CMD_WRITE);
260

261
		ret = check_pcc_chan(pcc_ss_id, false);
262
		if (ret)
263 264
			goto end;
	} else /* CMD_WRITE */
265
		pcc_ss_data->pending_pcc_write_cmd = FALSE;
266

267 268 269 270 271
	/*
	 * Handle the Minimum Request Turnaround Time(MRTT)
	 * "The minimum amount of time that OSPM must wait after the completion
	 * of a command before issuing the next command, in microseconds"
	 */
272 273 274 275 276
	if (pcc_ss_data->pcc_mrtt) {
		time_delta = ktime_us_delta(ktime_get(),
					    pcc_ss_data->last_cmd_cmpl_time);
		if (pcc_ss_data->pcc_mrtt > time_delta)
			udelay(pcc_ss_data->pcc_mrtt - time_delta);
277 278 279 280 281 282 283 284 285 286 287 288 289
	}

	/*
	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
	 * "The maximum number of periodic requests that the subspace channel can
	 * support, reported in commands per minute. 0 indicates no limitation."
	 *
	 * This parameter should be ideally zero or large enough so that it can
	 * handle maximum number of requests that all the cores in the system can
	 * collectively generate. If it is not, we will follow the spec and just
	 * not send the request to the platform after hitting the MPAR limit in
	 * any 60s window
	 */
290 291 292 293 294
	if (pcc_ss_data->pcc_mpar) {
		if (pcc_ss_data->mpar_count == 0) {
			time_delta = ktime_ms_delta(ktime_get(),
						    pcc_ss_data->last_mpar_reset);
			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
295 296
				pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
					 pcc_ss_id);
297 298
				ret = -EIO;
				goto end;
299
			}
300 301
			pcc_ss_data->last_mpar_reset = ktime_get();
			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
302
		}
303
		pcc_ss_data->mpar_count--;
304 305
	}

306
	/* Write to the shared comm region. */
307
	writew_relaxed(cmd, &generic_comm_base->command);
308 309

	/* Flip CMD COMPLETE bit */
310
	writew_relaxed(0, &generic_comm_base->status);
311

312
	pcc_ss_data->platform_owns_pcc = true;
313

314
	/* Ring doorbell */
315
	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
316
	if (ret < 0) {
317 318
		pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
		       pcc_ss_id, cmd, ret);
319
		goto end;
320 321
	}

322
	/* wait for completion and check for PCC errro bit */
323
	ret = check_pcc_chan(pcc_ss_id, true);
324

325 326
	if (pcc_ss_data->pcc_mrtt)
		pcc_ss_data->last_cmd_cmpl_time = ktime_get();
327

328 329
	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
330
	else
331
		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
332 333 334 335 336 337 338 339 340

end:
	if (cmd == CMD_WRITE) {
		if (unlikely(ret)) {
			for_each_possible_cpu(i) {
				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
				if (!desc)
					continue;

341
				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
342 343 344
					desc->write_cmd_status = ret;
			}
		}
345 346
		pcc_ss_data->pcc_write_cnt++;
		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
347 348
	}

349
	return ret;
350 351 352 353
}

static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
{
354
	if (ret < 0)
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
		pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
				*(u16 *)msg, ret);
	else
		pr_debug("TX completed. CMD sent:%x, ret:%d\n",
				*(u16 *)msg, ret);
}

struct mbox_client cppc_mbox_cl = {
	.tx_done = cppc_chan_tx_done,
	.knows_txdone = true,
};

static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
{
	int result = -EFAULT;
	acpi_status status = AE_OK;
	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
	struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
	struct acpi_buffer state = {0, NULL};
	union acpi_object  *psd = NULL;
	struct acpi_psd_package *pdomain;

	status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
			ACPI_TYPE_PACKAGE);
	if (ACPI_FAILURE(status))
		return -ENODEV;

	psd = buffer.pointer;
	if (!psd || psd->package.count != 1) {
		pr_debug("Invalid _PSD data\n");
		goto end;
	}

	pdomain = &(cpc_ptr->domain_info);

	state.length = sizeof(struct acpi_psd_package);
	state.pointer = pdomain;

	status = acpi_extract_package(&(psd->package.elements[0]),
		&format, &state);
	if (ACPI_FAILURE(status)) {
		pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
		goto end;
	}

	if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
		pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
		goto end;
	}

	if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
		pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
		goto end;
	}

	if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
	    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
	    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
		pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
		goto end;
	}

	result = 0;
end:
	kfree(buffer.pointer);
	return result;
}

/**
 * acpi_get_psd_map - Map the CPUs in a common freq domain.
 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
 *
 *	Return: 0 for success or negative value for err.
 */
429
int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
430 431 432 433 434
{
	int count_target;
	int retval = 0;
	unsigned int i, j;
	cpumask_var_t covered_cpus;
435
	struct cppc_cpudata *pr, *match_pr;
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	struct acpi_psd_package *pdomain;
	struct acpi_psd_package *match_pdomain;
	struct cpc_desc *cpc_ptr, *match_cpc_ptr;

	if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
		return -ENOMEM;

	/*
	 * Now that we have _PSD data from all CPUs, lets setup P-state
	 * domain info.
	 */
	for_each_possible_cpu(i) {
		pr = all_cpu_data[i];
		if (!pr)
			continue;

		if (cpumask_test_cpu(i, covered_cpus))
			continue;

		cpc_ptr = per_cpu(cpc_desc_ptr, i);
456 457 458 459
		if (!cpc_ptr) {
			retval = -EFAULT;
			goto err_ret;
		}
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480

		pdomain = &(cpc_ptr->domain_info);
		cpumask_set_cpu(i, pr->shared_cpu_map);
		cpumask_set_cpu(i, covered_cpus);
		if (pdomain->num_processors <= 1)
			continue;

		/* Validate the Domain info */
		count_target = pdomain->num_processors;
		if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
			pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
		else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
			pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
481 482 483 484
			if (!match_cpc_ptr) {
				retval = -EFAULT;
				goto err_ret;
			}
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513

			match_pdomain = &(match_cpc_ptr->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			/* Here i and j are in the same domain */
			if (match_pdomain->num_processors != count_target) {
				retval = -EFAULT;
				goto err_ret;
			}

			if (pdomain->coord_type != match_pdomain->coord_type) {
				retval = -EFAULT;
				goto err_ret;
			}

			cpumask_set_cpu(j, covered_cpus);
			cpumask_set_cpu(j, pr->shared_cpu_map);
		}

		for_each_possible_cpu(j) {
			if (i == j)
				continue;

			match_pr = all_cpu_data[j];
			if (!match_pr)
				continue;

			match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
514 515 516 517
			if (!match_cpc_ptr) {
				retval = -EFAULT;
				goto err_ret;
			}
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547

			match_pdomain = &(match_cpc_ptr->domain_info);
			if (match_pdomain->domain != pdomain->domain)
				continue;

			match_pr->shared_type = pr->shared_type;
			cpumask_copy(match_pr->shared_cpu_map,
				     pr->shared_cpu_map);
		}
	}

err_ret:
	for_each_possible_cpu(i) {
		pr = all_cpu_data[i];
		if (!pr)
			continue;

		/* Assume no coordination on any error parsing domain info */
		if (retval) {
			cpumask_clear(pr->shared_cpu_map);
			cpumask_set_cpu(i, pr->shared_cpu_map);
			pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
		}
	}

	free_cpumask_var(covered_cpus);
	return retval;
}
EXPORT_SYMBOL_GPL(acpi_get_psd_map);

548
static int register_pcc_channel(int pcc_ss_idx)
549
{
550
	struct acpi_pcct_hw_reduced *cppc_ss;
551
	u64 usecs_lat;
552

553 554 555
	if (pcc_ss_idx >= 0) {
		pcc_data[pcc_ss_idx]->pcc_channel =
			pcc_mbox_request_channel(&cppc_mbox_cl,	pcc_ss_idx);
556

557
		if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
558 559
			pr_err("Failed to find PCC channel for subspace %d\n",
			       pcc_ss_idx);
560 561 562 563 564 565 566 567 568
			return -ENODEV;
		}

		/*
		 * The PCC mailbox controller driver should
		 * have parsed the PCCT (global table of all
		 * PCC channels) and stored pointers to the
		 * subspace communication region in con_priv.
		 */
569
		cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
570 571

		if (!cppc_ss) {
572 573
			pr_err("No PCC subspace found for %d CPPC\n",
			       pcc_ss_idx);
574 575 576
			return -ENODEV;
		}

577 578 579 580 581 582
		/*
		 * cppc_ss->latency is just a Nominal value. In reality
		 * the remote processor could be much slower to reply.
		 * So add an arbitrary amount of wait on top of Nominal.
		 */
		usecs_lat = NUM_RETRIES * cppc_ss->latency;
583 584 585 586 587 588 589 590
		pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
		pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
		pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
		pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;

		pcc_data[pcc_ss_idx]->pcc_comm_addr =
			acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
		if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
591 592
			pr_err("Failed to ioremap PCC comm region mem for %d\n",
			       pcc_ss_idx);
593 594 595 596
			return -ENOMEM;
		}

		/* Set flag so that we dont come here for each CPU. */
597
		pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
598 599 600 601 602
	}

	return 0;
}

603 604 605 606 607 608 609 610 611 612 613 614 615
/**
 * cpc_ffh_supported() - check if FFH reading supported
 *
 * Check if the architecture has support for functional fixed hardware
 * read/write capability.
 *
 * Return: true for supported, false for not supported
 */
bool __weak cpc_ffh_supported(void)
{
	return false;
}

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643

/**
 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
 *
 * Check and allocate the cppc_pcc_data memory.
 * In some processor configurations it is possible that same subspace
 * is shared between multiple CPU's. This is seen especially in CPU's
 * with hardware multi-threading support.
 *
 * Return: 0 for success, errno for failure
 */
int pcc_data_alloc(int pcc_ss_id)
{
	if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
		return -EINVAL;

	if (pcc_data[pcc_ss_id]) {
		pcc_data[pcc_ss_id]->refcount++;
	} else {
		pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
					      GFP_KERNEL);
		if (!pcc_data[pcc_ss_id])
			return -ENOMEM;
		pcc_data[pcc_ss_id]->refcount++;
	}

	return 0;
}
644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
/*
 * An example CPC table looks like the following.
 *
 *	Name(_CPC, Package()
 *			{
 *			17,
 *			NumEntries
 *			1,
 *			// Revision
 *			ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
 *			// Highest Performance
 *			ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
 *			// Nominal Performance
 *			ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
 *			// Lowest Nonlinear Performance
 *			ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
 *			// Lowest Performance
 *			ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
 *			// Guaranteed Performance Register
 *			ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
 *			// Desired Performance Register
 *			ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
 *			..
 *			..
 *			..
 *
 *		}
 * Each Register() encodes how to access that specific register.
 * e.g. a sample PCC entry has the following encoding:
 *
 *	Register (
 *		PCC,
 *		AddressSpaceKeyword
 *		8,
 *		//RegisterBitWidth
 *		8,
 *		//RegisterBitOffset
 *		0x30,
 *		//RegisterAddress
 *		9
 *		//AccessSize (subspace ID)
 *		0
 *		)
 *	}
 */

/**
 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 *
 *	Return: 0 for success or negative value for err.
 */
int acpi_cppc_processor_probe(struct acpi_processor *pr)
{
	struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
	union acpi_object *out_obj, *cpc_obj;
	struct cpc_desc *cpc_ptr;
	struct cpc_reg *gas_t;
702
	struct device *cpu_dev;
703 704
	acpi_handle handle = pr->handle;
	unsigned int num_ent, i, cpc_rev;
705
	int pcc_subspace_id = -1;
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
	acpi_status status;
	int ret = -EFAULT;

	/* Parse the ACPI _CPC table for this cpu. */
	status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
			ACPI_TYPE_PACKAGE);
	if (ACPI_FAILURE(status)) {
		ret = -ENODEV;
		goto out_buf_free;
	}

	out_obj = (union acpi_object *) output.pointer;

	cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
	if (!cpc_ptr) {
		ret = -ENOMEM;
		goto out_buf_free;
	}

	/* First entry is NumEntries. */
	cpc_obj = &out_obj->package.elements[0];
	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
		num_ent = cpc_obj->integer.value;
	} else {
		pr_debug("Unexpected entry type(%d) for NumEntries\n",
				cpc_obj->type);
		goto out_free;
	}

	/* Only support CPPCv2. Bail otherwise. */
	if (num_ent != CPPC_NUM_ENT) {
		pr_debug("Firmware exports %d entries. Expected: %d\n",
				num_ent, CPPC_NUM_ENT);
		goto out_free;
	}

742 743
	cpc_ptr->num_entries = num_ent;

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
	/* Second entry should be revision. */
	cpc_obj = &out_obj->package.elements[1];
	if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
		cpc_rev = cpc_obj->integer.value;
	} else {
		pr_debug("Unexpected entry type(%d) for Revision\n",
				cpc_obj->type);
		goto out_free;
	}

	if (cpc_rev != CPPC_REV) {
		pr_debug("Firmware exports revision:%d. Expected:%d\n",
				cpc_rev, CPPC_REV);
		goto out_free;
	}

	/* Iterate through remaining entries in _CPC */
	for (i = 2; i < num_ent; i++) {
		cpc_obj = &out_obj->package.elements[i];

		if (cpc_obj->type == ACPI_TYPE_INTEGER)	{
			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
			cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
		} else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
			gas_t = (struct cpc_reg *)
				cpc_obj->buffer.pointer;

			/*
			 * The PCC Subspace index is encoded inside
			 * the CPC table entries. The same PCC index
			 * will be used for all the PCC entries,
			 * so extract it only once.
			 */
			if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
778 779 780 781 782
				if (pcc_subspace_id < 0) {
					pcc_subspace_id = gas_t->access_width;
					if (pcc_data_alloc(pcc_subspace_id))
						goto out_free;
				} else if (pcc_subspace_id != gas_t->access_width) {
783 784 785
					pr_debug("Mismatched PCC ids.\n");
					goto out_free;
				}
786 787 788 789 790 791 792 793 794 795
			} else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
				if (gas_t->address) {
					void __iomem *addr;

					addr = ioremap(gas_t->address, gas_t->bit_width/8);
					if (!addr)
						goto out_free;
					cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
				}
			} else {
796 797 798 799 800
				if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
					/* Support only PCC ,SYS MEM and FFH type regs */
					pr_debug("Unsupported register type: %d\n", gas_t->space_id);
					goto out_free;
				}
801 802 803 804 805 806 807 808 809
			}

			cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
			memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
		} else {
			pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
			goto out_free;
		}
	}
810
	per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
811 812 813 814 815 816 817 818
	/* Store CPU Logical ID */
	cpc_ptr->cpu_id = pr->id;

	/* Parse PSD data for this CPU */
	ret = acpi_get_psd(cpc_ptr, handle);
	if (ret)
		goto out_free;

819 820 821
	/* Register PCC channel once for all PCC subspace id. */
	if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
		ret = register_pcc_channel(pcc_subspace_id);
822 823
		if (ret)
			goto out_free;
824

825 826
		init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
		init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
827 828 829 830 831
	}

	/* Everything looks okay */
	pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);

832 833
	/* Add per logical CPU nodes for reading its feedback counters. */
	cpu_dev = get_cpu_device(pr->id);
834 835
	if (!cpu_dev) {
		ret = -EINVAL;
836
		goto out_free;
837
	}
838

839 840 841
	/* Plug PSD data into this CPUs CPC descriptor. */
	per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;

842 843
	ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
			"acpi_cppc");
844 845
	if (ret) {
		per_cpu(cpc_desc_ptr, pr->id) = NULL;
846
		goto out_free;
847
	}
848

849 850 851 852
	kfree(output.pointer);
	return 0;

out_free:
853 854 855 856 857 858 859
	/* Free all the mapped sys mem areas for this CPU */
	for (i = 2; i < cpc_ptr->num_entries; i++) {
		void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;

		if (addr)
			iounmap(addr);
	}
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
	kfree(cpc_ptr);

out_buf_free:
	kfree(output.pointer);
	return ret;
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);

/**
 * acpi_cppc_processor_exit - Cleanup CPC structs.
 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
 *
 * Return: Void
 */
void acpi_cppc_processor_exit(struct acpi_processor *pr)
{
	struct cpc_desc *cpc_ptr;
877 878
	unsigned int i;
	void __iomem *addr;
879 880 881 882 883 884 885 886 887 888 889 890
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);

	if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
		if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
			pcc_data[pcc_ss_id]->refcount--;
			if (!pcc_data[pcc_ss_id]->refcount) {
				pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
				pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
				kfree(pcc_data[pcc_ss_id]);
			}
		}
	}
891

892
	cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
893 894
	if (!cpc_ptr)
		return;
895 896 897 898 899 900 901 902

	/* Free all the mapped sys mem areas for this CPU */
	for (i = 2; i < cpc_ptr->num_entries; i++) {
		addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
		if (addr)
			iounmap(addr);
	}

903
	kobject_put(&cpc_ptr->kobj);
904 905 906 907
	kfree(cpc_ptr);
}
EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
/**
 * cpc_read_ffh() - Read FFH register
 * @cpunum:	cpu number to read
 * @reg:	cppc register information
 * @val:	place holder for return value
 *
 * Read bit_width bits from a specified address and bit_offset
 *
 * Return: 0 for success and error code
 */
int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
{
	return -ENOTSUPP;
}

/**
 * cpc_write_ffh() - Write FFH register
 * @cpunum:	cpu number to write
 * @reg:	cppc register information
 * @val:	value to write
 *
 * Write value of bit_width bits to a specified address and bit_offset
 *
 * Return: 0 for success and error code
 */
int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
{
	return -ENOTSUPP;
}

938 939 940 941 942
/*
 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
 * as fast as possible. We have already mapped the PCC subspace during init, so
 * we can directly write to it.
 */
943

944
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
945
{
946
	int ret_val = 0;
947
	void __iomem *vaddr = 0;
948
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
949 950 951 952 953 954
	struct cpc_reg *reg = &reg_res->cpc_entry.reg;

	if (reg_res->type == ACPI_TYPE_INTEGER) {
		*val = reg_res->cpc_entry.int_value;
		return ret_val;
	}
955 956

	*val = 0;
957
	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
958
		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
959 960
	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
		vaddr = reg_res->sys_mem_vaddr;
961 962
	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
		return cpc_read_ffh(cpu, reg, val);
963 964 965
	else
		return acpi_os_read_memory((acpi_physical_address)reg->address,
				val, reg->bit_width);
966

967
	switch (reg->bit_width) {
968
		case 8:
969
			*val = readb_relaxed(vaddr);
970 971
			break;
		case 16:
972
			*val = readw_relaxed(vaddr);
973 974
			break;
		case 32:
975
			*val = readl_relaxed(vaddr);
976 977
			break;
		case 64:
978
			*val = readq_relaxed(vaddr);
979 980
			break;
		default:
981 982
			pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
				 reg->bit_width, pcc_ss_id);
983
			ret_val = -EFAULT;
984 985
	}

986
	return ret_val;
987 988
}

989
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
990
{
991
	int ret_val = 0;
992
	void __iomem *vaddr = 0;
993
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
994
	struct cpc_reg *reg = &reg_res->cpc_entry.reg;
995

996
	if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
997
		vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
998 999
	else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
		vaddr = reg_res->sys_mem_vaddr;
1000 1001
	else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
		return cpc_write_ffh(cpu, reg, val);
1002 1003 1004
	else
		return acpi_os_write_memory((acpi_physical_address)reg->address,
				val, reg->bit_width);
1005

1006
	switch (reg->bit_width) {
1007
		case 8:
1008
			writeb_relaxed(val, vaddr);
1009 1010
			break;
		case 16:
1011
			writew_relaxed(val, vaddr);
1012 1013
			break;
		case 32:
1014
			writel_relaxed(val, vaddr);
1015 1016
			break;
		case 64:
1017
			writeq_relaxed(val, vaddr);
1018 1019
			break;
		default:
1020 1021
			pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
				 reg->bit_width, pcc_ss_id);
1022 1023
			ret_val = -EFAULT;
			break;
1024 1025
	}

1026
	return ret_val;
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
}

/**
 * cppc_get_perf_caps - Get a CPUs performance capabilities.
 * @cpunum: CPU from which to get capabilities info.
 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
 *
 * Return: 0 for success with perf_caps populated else -ERRNO.
 */
int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
{
	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1039 1040 1041
	struct cpc_register_resource *highest_reg, *lowest_reg,
		*lowest_non_linear_reg, *nominal_reg;
	u64 high, low, nom, min_nonlinear;
1042
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1043
	struct cppc_pcc_data *pcc_ss_data;
1044
	int ret = 0, regs_in_pcc = 0;
1045

1046
	if (!cpc_desc || pcc_ss_id < 0) {
1047 1048 1049 1050
		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
		return -ENODEV;
	}

1051
	pcc_ss_data = pcc_data[pcc_ss_id];
1052 1053
	highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
	lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1054 1055
	lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
	nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1056 1057

	/* Are any of the regs PCC ?*/
1058
	if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1059
		CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
1060
		regs_in_pcc = 1;
1061
		down_write(&pcc_ss_data->pcc_lock);
1062
		/* Ring doorbell once to update PCC subspace */
1063
		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1064 1065 1066 1067 1068
			ret = -EIO;
			goto out_err;
		}
	}

1069
	cpc_read(cpunum, highest_reg, &high);
1070 1071
	perf_caps->highest_perf = high;

1072
	cpc_read(cpunum, lowest_reg, &low);
1073 1074
	perf_caps->lowest_perf = low;

1075
	cpc_read(cpunum, nominal_reg, &nom);
1076 1077
	perf_caps->nominal_perf = nom;

1078 1079 1080 1081
	cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
	perf_caps->lowest_nonlinear_perf = min_nonlinear;

	if (!high || !low || !nom || !min_nonlinear)
1082 1083 1084
		ret = -EFAULT;

out_err:
1085
	if (regs_in_pcc)
1086
		up_write(&pcc_ss_data->pcc_lock);
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
	return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_caps);

/**
 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
 * @cpunum: CPU from which to read counters.
 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
 *
 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
 */
int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
{
	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1101 1102
	struct cpc_register_resource *delivered_reg, *reference_reg,
		*ref_perf_reg, *ctr_wrap_reg;
1103
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1104
	struct cppc_pcc_data *pcc_ss_data;
1105
	u64 delivered, reference, ref_perf, ctr_wrap_time;
1106
	int ret = 0, regs_in_pcc = 0;
1107

1108
	if (!cpc_desc || pcc_ss_id < 0) {
1109 1110 1111 1112
		pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
		return -ENODEV;
	}

1113
	pcc_ss_data = pcc_data[pcc_ss_id];
1114 1115
	delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
	reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1116 1117 1118 1119 1120 1121 1122 1123 1124
	ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
	ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];

	/*
	 * If refernce perf register is not supported then we should
	 * use the nominal perf value
	 */
	if (!CPC_SUPPORTED(ref_perf_reg))
		ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1125 1126

	/* Are any of the regs PCC ?*/
1127 1128
	if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
		CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1129
		down_write(&pcc_ss_data->pcc_lock);
1130
		regs_in_pcc = 1;
1131
		/* Ring doorbell once to update PCC subspace */
1132
		if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1133 1134 1135 1136 1137
			ret = -EIO;
			goto out_err;
		}
	}

1138 1139 1140
	cpc_read(cpunum, delivered_reg, &delivered);
	cpc_read(cpunum, reference_reg, &reference);
	cpc_read(cpunum, ref_perf_reg, &ref_perf);
1141 1142 1143 1144 1145 1146 1147 1148

	/*
	 * Per spec, if ctr_wrap_time optional register is unsupported, then the
	 * performance counters are assumed to never wrap during the lifetime of
	 * platform
	 */
	ctr_wrap_time = (u64)(~((u64)0));
	if (CPC_SUPPORTED(ctr_wrap_reg))
1149
		cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1150

1151
	if (!delivered || !reference ||	!ref_perf) {
1152 1153 1154 1155 1156 1157
		ret = -EFAULT;
		goto out_err;
	}

	perf_fb_ctrs->delivered = delivered;
	perf_fb_ctrs->reference = reference;
1158
	perf_fb_ctrs->reference_perf = ref_perf;
1159
	perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1160
out_err:
1161
	if (regs_in_pcc)
1162
		up_write(&pcc_ss_data->pcc_lock);
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	return ret;
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);

/**
 * cppc_set_perf - Set a CPUs performance controls.
 * @cpu: CPU for which to set performance controls.
 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
 *
 * Return: 0 for success, -ERRNO otherwise.
 */
int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
{
	struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
	struct cpc_register_resource *desired_reg;
1178
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1179
	struct cppc_pcc_data *pcc_ss_data;
1180 1181
	int ret = 0;

1182
	if (!cpc_desc || pcc_ss_id < 0) {
1183 1184 1185 1186
		pr_debug("No CPC descriptor for CPU:%d\n", cpu);
		return -ENODEV;
	}

1187
	pcc_ss_data = pcc_data[pcc_ss_id];
1188 1189
	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];

1190 1191 1192 1193 1194 1195 1196 1197
	/*
	 * This is Phase-I where we want to write to CPC registers
	 * -> We want all CPUs to be able to execute this phase in parallel
	 *
	 * Since read_lock can be acquired by multiple CPUs simultaneously we
	 * achieve that goal here
	 */
	if (CPC_IN_PCC(desired_reg)) {
1198 1199 1200
		down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
		if (pcc_ss_data->platform_owns_pcc) {
			ret = check_pcc_chan(pcc_ss_id, false);
1201
			if (ret) {
1202
				up_read(&pcc_ss_data->pcc_lock);
1203 1204 1205
				return ret;
			}
		}
1206 1207 1208 1209
		/*
		 * Update the pending_write to make sure a PCC CMD_READ will not
		 * arrive and steal the channel during the switch to write lock
		 */
1210 1211
		pcc_ss_data->pending_pcc_write_cmd = true;
		cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1212
		cpc_desc->write_cmd_status = 0;
1213 1214
	}

1215 1216 1217 1218
	/*
	 * Skip writing MIN/MAX until Linux knows how to come up with
	 * useful values.
	 */
1219
	cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1220

1221
	if (CPC_IN_PCC(desired_reg))
1222
		up_read(&pcc_ss_data->pcc_lock);	/* END Phase-I */
1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	/*
	 * This is Phase-II where we transfer the ownership of PCC to Platform
	 *
	 * Short Summary: Basically if we think of a group of cppc_set_perf
	 * requests that happened in short overlapping interval. The last CPU to
	 * come out of Phase-I will enter Phase-II and ring the doorbell.
	 *
	 * We have the following requirements for Phase-II:
	 *     1. We want to execute Phase-II only when there are no CPUs
	 * currently executing in Phase-I
	 *     2. Once we start Phase-II we want to avoid all other CPUs from
	 * entering Phase-I.
	 *     3. We want only one CPU among all those who went through Phase-I
	 * to run phase-II
	 *
	 * If write_trylock fails to get the lock and doesn't transfer the
	 * PCC ownership to the platform, then one of the following will be TRUE
	 *     1. There is at-least one CPU in Phase-I which will later execute
	 * write_trylock, so the CPUs in Phase-I will be responsible for
	 * executing the Phase-II.
	 *     2. Some other CPU has beaten this CPU to successfully execute the
	 * write_trylock and has already acquired the write_lock. We know for a
	 * fact it(other CPU acquiring the write_lock) couldn't have happened
	 * before this CPU's Phase-I as we held the read_lock.
	 *     3. Some other CPU executing pcc CMD_READ has stolen the
	 * down_write, in which case, send_pcc_cmd will check for pending
	 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
	 * So this CPU can be certain that its request will be delivered
	 *    So in all cases, this CPU knows that its request will be delivered
	 * by another CPU and can return
	 *
	 * After getting the down_write we still need to check for
	 * pending_pcc_write_cmd to take care of the following scenario
	 *    The thread running this code could be scheduled out between
	 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
	 * could have delivered the request to Platform by triggering the
	 * doorbell and transferred the ownership of PCC to platform. So this
	 * avoids triggering an unnecessary doorbell and more importantly before
	 * triggering the doorbell it makes sure that the PCC channel ownership
	 * is still with OSPM.
	 *   pending_pcc_write_cmd can also be cleared by a different CPU, if
	 * there was a pcc CMD_READ waiting on down_write and it steals the lock
	 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
	 * case during a CMD_READ and if there are pending writes it delivers
	 * the write command before servicing the read command
	 */
	if (CPC_IN_PCC(desired_reg)) {
1270
		if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1271
			/* Update only if there are pending write commands */
1272 1273 1274
			if (pcc_ss_data->pending_pcc_write_cmd)
				send_pcc_cmd(pcc_ss_id, CMD_WRITE);
			up_write(&pcc_ss_data->pcc_lock);	/* END Phase-II */
1275 1276
		} else
			/* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1277 1278
			wait_event(pcc_ss_data->pcc_write_wait_q,
				   cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1279 1280 1281

		/* send_pcc_cmd updates the status in case of failure */
		ret = cpc_desc->write_cmd_status;
1282 1283 1284 1285
	}
	return ret;
}
EXPORT_SYMBOL_GPL(cppc_set_perf);
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310

/**
 * cppc_get_transition_latency - returns frequency transition latency in ns
 *
 * ACPI CPPC does not explicitly specifiy how a platform can specify the
 * transition latency for perfromance change requests. The closest we have
 * is the timing information from the PCCT tables which provides the info
 * on the number and frequency of PCC commands the platform can handle.
 */
unsigned int cppc_get_transition_latency(int cpu_num)
{
	/*
	 * Expected transition latency is based on the PCCT timing values
	 * Below are definition from ACPI spec:
	 * pcc_nominal- Expected latency to process a command, in microseconds
	 * pcc_mpar   - The maximum number of periodic requests that the subspace
	 *              channel can support, reported in commands per minute. 0
	 *              indicates no limitation.
	 * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
	 *              completion of a command before issuing the next command,
	 *              in microseconds.
	 */
	unsigned int latency_ns = 0;
	struct cpc_desc *cpc_desc;
	struct cpc_register_resource *desired_reg;
1311
	int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1312
	struct cppc_pcc_data *pcc_ss_data;
1313 1314 1315 1316 1317 1318 1319 1320 1321

	cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
	if (!cpc_desc)
		return CPUFREQ_ETERNAL;

	desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
	if (!CPC_IN_PCC(desired_reg))
		return CPUFREQ_ETERNAL;

1322 1323 1324 1325
	if (pcc_ss_id < 0)
		return CPUFREQ_ETERNAL;

	pcc_ss_data = pcc_data[pcc_ss_id];
1326 1327
	if (pcc_ss_data->pcc_mpar)
		latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1328

1329 1330
	latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
	latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1331 1332 1333 1334

	return latency_ns;
}
EXPORT_SYMBOL_GPL(cppc_get_transition_latency);