intel_pstate.c 62.5 KB
Newer Older
1
/*
2
 * intel_pstate.c: Native P state management for Intel processors
3 4 5 6 7 8 9 10 11 12
 *
 * (C) Copyright 2012 Intel Corporation
 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 */

J
Joe Perches 已提交
13 14
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
30
#include <linux/acpi.h>
31
#include <linux/vmalloc.h>
32 33 34 35 36
#include <trace/events/power.h>

#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
37
#include <asm/cpufeature.h>
38
#include <asm/intel-family.h>
39

40 41
#define INTEL_CPUFREQ_TRANSITION_LATENCY	20000

42 43 44 45
#define ATOM_RATIOS		0x66a
#define ATOM_VIDS		0x66b
#define ATOM_TURBO_RATIOS	0x66c
#define ATOM_TURBO_VIDS		0x66d
46

47 48 49 50
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
#endif

51
#define FRAC_BITS 8
52 53
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
54

55 56
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
57 58
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
59

60 61 62 63 64
static inline int32_t mul_fp(int32_t x, int32_t y)
{
	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
}

65
static inline int32_t div_fp(s64 x, s64 y)
66
{
67
	return div64_s64((int64_t)x << FRAC_BITS, y);
68 69
}

70 71 72 73 74 75 76 77 78 79 80
static inline int ceiling_fp(int32_t x)
{
	int mask, ret;

	ret = fp_toint(x);
	mask = (1 << FRAC_BITS) - 1;
	if (x & mask)
		ret += 1;
	return ret;
}

81 82 83 84 85 86 87 88 89 90
static inline u64 mul_ext_fp(u64 x, u64 y)
{
	return (x * y) >> EXT_FRAC_BITS;
}

static inline u64 div_ext_fp(u64 x, u64 y)
{
	return div64_u64(x << EXT_FRAC_BITS, y);
}

91 92
/**
 * struct sample -	Store performance sample
93
 * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
94 95
 *			performance during last sample period
 * @busy_scaled:	Scaled busy value which is used to calculate next
96
 *			P state. This can be different than core_avg_perf
97 98 99 100 101 102 103 104 105 106 107 108
 *			to account for cpu idle period
 * @aperf:		Difference of actual performance frequency clock count
 *			read from APERF MSR between last and current sample
 * @mperf:		Difference of maximum performance frequency clock count
 *			read from MPERF MSR between last and current sample
 * @tsc:		Difference of time stamp counter between last and
 *			current sample
 * @time:		Current time from scheduler
 *
 * This structure is used in the cpudata structure to store performance sample
 * data for choosing next P State.
 */
109
struct sample {
110
	int32_t core_avg_perf;
111
	int32_t busy_scaled;
112 113
	u64 aperf;
	u64 mperf;
114
	u64 tsc;
115
	u64 time;
116 117
};

118 119 120 121 122 123 124 125 126 127 128
/**
 * struct pstate_data - Store P state data
 * @current_pstate:	Current requested P state
 * @min_pstate:		Min P state possible for this platform
 * @max_pstate:		Max P state possible for this platform
 * @max_pstate_physical:This is physical Max P state for a processor
 *			This can be higher than the max_pstate which can
 *			be limited by platform thermal design power limits
 * @scaling:		Scaling factor to  convert frequency to cpufreq
 *			frequency units
 * @turbo_pstate:	Max Turbo P state possible for this platform
129 130
 * @max_freq:		@max_pstate frequency in cpufreq units
 * @turbo_freq:		@turbo_pstate frequency in cpufreq units
131 132 133
 *
 * Stores the per cpu model P state limits and current P state.
 */
134 135 136 137
struct pstate_data {
	int	current_pstate;
	int	min_pstate;
	int	max_pstate;
138
	int	max_pstate_physical;
139
	int	scaling;
140
	int	turbo_pstate;
141 142
	unsigned int max_freq;
	unsigned int turbo_freq;
143 144
};

145 146 147 148 149 150 151 152 153 154 155 156 157
/**
 * struct vid_data -	Stores voltage information data
 * @min:		VID data for this platform corresponding to
 *			the lowest P state
 * @max:		VID data corresponding to the highest P State.
 * @turbo:		VID data for turbo P state
 * @ratio:		Ratio of (vid max - vid min) /
 *			(max P state - Min P State)
 *
 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
 * This data is used in Atom platforms, where in addition to target P state,
 * the voltage data needs to be specified to select next P State.
 */
158
struct vid_data {
159 160 161
	int min;
	int max;
	int turbo;
162 163 164
	int32_t ratio;
};

165 166 167 168 169 170 171 172 173 174 175 176
/**
 * struct _pid -	Stores PID data
 * @setpoint:		Target set point for busyness or performance
 * @integral:		Storage for accumulated error values
 * @p_gain:		PID proportional gain
 * @i_gain:		PID integral gain
 * @d_gain:		PID derivative gain
 * @deadband:		PID deadband
 * @last_err:		Last error storage for integral part of PID calculation
 *
 * Stores PID coefficients and last error for PID controller.
 */
177 178 179 180 181 182 183
struct _pid {
	int setpoint;
	int32_t integral;
	int32_t p_gain;
	int32_t i_gain;
	int32_t d_gain;
	int deadband;
184
	int32_t last_err;
185 186
};

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/**
 * struct perf_limits - Store user and policy limits
 * @no_turbo:		User requested turbo state from intel_pstate sysfs
 * @turbo_disabled:	Platform turbo status either from msr
 *			MSR_IA32_MISC_ENABLE or when maximum available pstate
 *			matches the maximum turbo pstate
 * @max_perf_pct:	Effective maximum performance limit in percentage, this
 *			is minimum of either limits enforced by cpufreq policy
 *			or limits from user set limits via intel_pstate sysfs
 * @min_perf_pct:	Effective minimum performance limit in percentage, this
 *			is maximum of either limits enforced by cpufreq policy
 *			or limits from user set limits via intel_pstate sysfs
 * @max_perf:		This is a scaled value between 0 to 255 for max_perf_pct
 *			This value is used to limit max pstate
 * @min_perf:		This is a scaled value between 0 to 255 for min_perf_pct
 *			This value is used to limit min pstate
 * @max_policy_pct:	The maximum performance in percentage enforced by
 *			cpufreq setpolicy interface
 * @max_sysfs_pct:	The maximum performance in percentage enforced by
 *			intel pstate sysfs interface, unused when per cpu
 *			controls are enforced
 * @min_policy_pct:	The minimum performance in percentage enforced by
 *			cpufreq setpolicy interface
 * @min_sysfs_pct:	The minimum performance in percentage enforced by
 *			intel pstate sysfs interface, unused when per cpu
 *			controls are enforced
 *
 * Storage for user and policy defined limits.
 */
struct perf_limits {
	int no_turbo;
	int turbo_disabled;
	int max_perf_pct;
	int min_perf_pct;
	int32_t max_perf;
	int32_t min_perf;
	int max_policy_pct;
	int max_sysfs_pct;
	int min_policy_pct;
	int min_sysfs_pct;
};

229 230 231
/**
 * struct cpudata -	Per CPU instance data storage
 * @cpu:		CPU number for this instance data
232
 * @policy:		CPUFreq policy value
233
 * @update_util:	CPUFreq utility callback information
234
 * @update_util_set:	CPUFreq utility callback is set
235 236
 * @iowait_boost:	iowait-related boost fraction
 * @last_update:	Time of the last update.
237 238 239 240 241 242 243 244 245 246
 * @pstate:		Stores P state limits for this CPU
 * @vid:		Stores VID limits for this CPU
 * @pid:		Stores PID parameters for this CPU
 * @last_sample_time:	Last Sample time
 * @prev_aperf:		Last APERF value read from APERF MSR
 * @prev_mperf:		Last MPERF value read from MPERF MSR
 * @prev_tsc:		Last timestamp counter (TSC) value
 * @prev_cummulative_iowait: IO Wait time difference from last and
 *			current sample
 * @sample:		Storage for storing last Sample data
247 248 249
 * @perf_limits:	Pointer to perf_limit unique to this CPU
 *			Not all field in the structure are applicable
 *			when per cpu controls are enforced
250 251
 * @acpi_perf_data:	Stores ACPI perf information read from _PSS
 * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
252 253 254
 * @epp_powersave:	Last saved HWP energy performance preference
 *			(EPP) or energy performance bias (EPB),
 *			when policy switched to performance
255
 * @epp_policy:		Last saved policy used to set EPP/EPB
256 257 258 259
 * @epp_default:	Power on default HWP energy performance
 *			preference/bias
 * @epp_saved:		Saved EPP/EPB during system suspend or CPU offline
 *			operation
260 261 262
 *
 * This structure stores per CPU instance data for all CPUs.
 */
263 264 265
struct cpudata {
	int cpu;

266
	unsigned int policy;
267
	struct update_util_data update_util;
268
	bool   update_util_set;
269 270

	struct pstate_data pstate;
271
	struct vid_data vid;
272 273
	struct _pid pid;

274
	u64	last_update;
275
	u64	last_sample_time;
276 277
	u64	prev_aperf;
	u64	prev_mperf;
278
	u64	prev_tsc;
279
	u64	prev_cummulative_iowait;
280
	struct sample sample;
281
	struct perf_limits *perf_limits;
282 283 284 285
#ifdef CONFIG_ACPI
	struct acpi_processor_performance acpi_perf_data;
	bool valid_pss_table;
#endif
286
	unsigned int iowait_boost;
287
	s16 epp_powersave;
288
	s16 epp_policy;
289 290
	s16 epp_default;
	s16 epp_saved;
291 292 293
};

static struct cpudata **all_cpu_data;
294 295

/**
296
 * struct pstate_adjust_policy - Stores static PID configuration data
297 298 299 300 301 302 303 304 305 306
 * @sample_rate_ms:	PID calculation sample rate in ms
 * @sample_rate_ns:	Sample rate calculation in ns
 * @deadband:		PID deadband
 * @setpoint:		PID Setpoint
 * @p_gain_pct:		PID proportional gain
 * @i_gain_pct:		PID integral gain
 * @d_gain_pct:		PID derivative gain
 *
 * Stores per CPU model static PID configuration data.
 */
307 308
struct pstate_adjust_policy {
	int sample_rate_ms;
309
	s64 sample_rate_ns;
310 311 312 313 314 315 316
	int deadband;
	int setpoint;
	int p_gain_pct;
	int d_gain_pct;
	int i_gain_pct;
};

317 318 319 320 321 322 323 324 325 326 327 328 329 330
/**
 * struct pstate_funcs - Per CPU model specific callbacks
 * @get_max:		Callback to get maximum non turbo effective P state
 * @get_max_physical:	Callback to get maximum non turbo physical P state
 * @get_min:		Callback to get minimum P state
 * @get_turbo:		Callback to get turbo P state
 * @get_scaling:	Callback to get frequency scaling factor
 * @get_val:		Callback to convert P state to actual MSR write value
 * @get_vid:		Callback to get VID data for Atom platforms
 * @get_target_pstate:	Callback to a function to calculate next P state to use
 *
 * Core and Atom CPU models have different way to get P State limits. This
 * structure is used to store those callbacks.
 */
331 332
struct pstate_funcs {
	int (*get_max)(void);
333
	int (*get_max_physical)(void);
334 335
	int (*get_min)(void);
	int (*get_turbo)(void);
336
	int (*get_scaling)(void);
337
	u64 (*get_val)(struct cpudata*, int pstate);
338
	void (*get_vid)(struct cpudata *);
339
	int32_t (*get_target_pstate)(struct cpudata *);
340 341
};

342 343 344 345 346
/**
 * struct cpu_defaults- Per CPU model default config data
 * @pid_policy:	PID config data
 * @funcs:		Callback function data
 */
347 348 349
struct cpu_defaults {
	struct pstate_adjust_policy pid_policy;
	struct pstate_funcs funcs;
350 351
};

352
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
353
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
354

355 356 357
static struct pstate_adjust_policy pid_params __read_mostly;
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
358
static bool per_cpu_limits __read_mostly;
359

360 361 362
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
363

364 365 366 367
static struct perf_limits performance_limits = {
	.no_turbo = 0,
	.turbo_disabled = 0,
	.max_perf_pct = 100,
368
	.max_perf = int_ext_tofp(1),
369
	.min_perf_pct = 100,
370
	.min_perf = int_ext_tofp(1),
371 372 373 374 375 376 377
	.max_policy_pct = 100,
	.max_sysfs_pct = 100,
	.min_policy_pct = 0,
	.min_sysfs_pct = 0,
};

static struct perf_limits powersave_limits = {
378
	.no_turbo = 0,
379
	.turbo_disabled = 0,
380
	.max_perf_pct = 100,
381
	.max_perf = int_ext_tofp(1),
382 383
	.min_perf_pct = 0,
	.min_perf = 0,
384 385
	.max_policy_pct = 100,
	.max_sysfs_pct = 100,
386 387
	.min_policy_pct = 0,
	.min_sysfs_pct = 0,
388 389
};

390 391 392 393 394 395
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
static struct perf_limits *limits = &performance_limits;
#else
static struct perf_limits *limits = &powersave_limits;
#endif

396 397
static DEFINE_MUTEX(intel_pstate_limits_lock);

398
#ifdef CONFIG_ACPI
399 400 401 402 403 404 405 406 407 408

static bool intel_pstate_get_ppc_enable_status(void)
{
	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
		return true;

	return acpi_ppc;
}

409 410 411 412 413 414
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
	struct cpudata *cpu;
	int ret;
	int i;

415 416 417
	if (hwp_active)
		return;

418
	if (!intel_pstate_get_ppc_enable_status())
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
		return;

	cpu = all_cpu_data[policy->cpu];

	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
						  policy->cpu);
	if (ret)
		return;

	/*
	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
	 * guarantee that the states returned by it map to the states in our
	 * list directly.
	 */
	if (cpu->acpi_perf_data.control_register.space_id !=
						ACPI_ADR_SPACE_FIXED_HARDWARE)
		goto err;

	/*
	 * If there is only one entry _PSS, simply ignore _PSS and continue as
	 * usual without taking _PSS into account
	 */
	if (cpu->acpi_perf_data.state_count < 2)
		goto err;

	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
			 (u32) cpu->acpi_perf_data.states[i].power,
			 (u32) cpu->acpi_perf_data.states[i].control);
	}

	/*
	 * The _PSS table doesn't contain whole turbo frequency range.
	 * This just contains +1 MHZ above the max non turbo frequency,
	 * with control value corresponding to max turbo ratio. But
	 * when cpufreq set policy is called, it will call with this
	 * max frequency, which will cause a reduced performance as
	 * this driver uses real max turbo frequency as the max
	 * frequency. So correct this frequency in _PSS table to
461
	 * correct max turbo frequency based on the turbo state.
462 463
	 * Also need to convert to MHz as _PSS freq is in MHz.
	 */
464
	if (!limits->turbo_disabled)
465 466 467
		cpu->acpi_perf_data.states[0].core_frequency =
					policy->cpuinfo.max_freq / 1000;
	cpu->valid_pss_table = true;
468
	pr_debug("_PPC limits will be enforced\n");
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

	return;

 err:
	cpu->valid_pss_table = false;
	acpi_processor_unregister_performance(policy->cpu);
}

static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
	struct cpudata *cpu;

	cpu = all_cpu_data[policy->cpu];
	if (!cpu->valid_pss_table)
		return;

	acpi_processor_unregister_performance(policy->cpu);
}

#else
489
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
490 491 492
{
}

493
static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
494 495 496 497
{
}
#endif

498
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
499
			     int deadband, int integral) {
500 501
	pid->setpoint = int_tofp(setpoint);
	pid->deadband  = int_tofp(deadband);
502
	pid->integral  = int_tofp(integral);
503
	pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
504 505 506 507
}

static inline void pid_p_gain_set(struct _pid *pid, int percent)
{
508
	pid->p_gain = div_fp(percent, 100);
509 510 511 512
}

static inline void pid_i_gain_set(struct _pid *pid, int percent)
{
513
	pid->i_gain = div_fp(percent, 100);
514 515 516 517
}

static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
518
	pid->d_gain = div_fp(percent, 100);
519 520
}

521
static signed int pid_calc(struct _pid *pid, int32_t busy)
522
{
523
	signed int result;
524 525 526
	int32_t pterm, dterm, fp_error;
	int32_t integral_limit;

527
	fp_error = pid->setpoint - busy;
528

529
	if (abs(fp_error) <= pid->deadband)
530 531 532 533 534 535
		return 0;

	pterm = mul_fp(pid->p_gain, fp_error);

	pid->integral += fp_error;

536 537 538 539 540 541 542 543
	/*
	 * We limit the integral here so that it will never
	 * get higher than 30.  This prevents it from becoming
	 * too large an input over long periods of time and allows
	 * it to get factored out sooner.
	 *
	 * The value of 30 was chosen through experimentation.
	 */
544 545 546 547 548 549
	integral_limit = int_tofp(30);
	if (pid->integral > integral_limit)
		pid->integral = integral_limit;
	if (pid->integral < -integral_limit)
		pid->integral = -integral_limit;

550 551
	dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
	pid->last_err = fp_error;
552 553

	result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
554
	result = result + (1 << (FRAC_BITS-1));
555 556 557 558 559
	return (signed int)fp_toint(result);
}

static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
{
560 561 562
	pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
	pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
	pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
563

564
	pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
565 566 567 568 569
}

static inline void intel_pstate_reset_all_pid(void)
{
	unsigned int cpu;
570

571 572 573 574 575 576
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu])
			intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
	}
}

577 578 579 580 581 582 583
static inline void update_turbo_state(void)
{
	u64 misc_en;
	struct cpudata *cpu;

	cpu = all_cpu_data[0];
	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
584
	limits->turbo_disabled =
585 586 587 588
		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
{
	u64 epb;
	int ret;

	if (!static_cpu_has(X86_FEATURE_EPB))
		return -ENXIO;

	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
	if (ret)
		return (s16)ret;

	return (s16)(epb & 0x0f);
}

static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
	s16 epp;

608 609 610 611 612 613 614 615 616 617 618
	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
		/*
		 * When hwp_req_data is 0, means that caller didn't read
		 * MSR_HWP_REQUEST, so need to read and get EPP.
		 */
		if (!hwp_req_data) {
			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
					    &hwp_req_data);
			if (epp)
				return epp;
		}
619
		epp = (hwp_req_data >> 24) & 0xff;
620
	} else {
621 622
		/* When there is no EPP present, HWP uses EPB settings */
		epp = intel_pstate_get_epb(cpu_data);
623
	}
624 625 626 627

	return epp;
}

628
static int intel_pstate_set_epb(int cpu, s16 pref)
629 630
{
	u64 epb;
631
	int ret;
632 633

	if (!static_cpu_has(X86_FEATURE_EPB))
634
		return -ENXIO;
635

636 637 638
	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
	if (ret)
		return ret;
639 640 641

	epb = (epb & ~0x0f) | pref;
	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
642 643

	return 0;
644 645
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/*
 * EPP/EPB display strings corresponding to EPP index in the
 * energy_perf_strings[]
 *	index		String
 *-------------------------------------
 *	0		default
 *	1		performance
 *	2		balance_performance
 *	3		balance_power
 *	4		power
 */
static const char * const energy_perf_strings[] = {
	"default",
	"performance",
	"balance_performance",
	"balance_power",
	"power",
	NULL
};

static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
{
	s16 epp;
	int index = -EINVAL;

	epp = intel_pstate_get_epp(cpu_data, 0);
	if (epp < 0)
		return epp;

	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
		/*
		 * Range:
		 *	0x00-0x3F	:	Performance
		 *	0x40-0x7F	:	Balance performance
		 *	0x80-0xBF	:	Balance power
		 *	0xC0-0xFF	:	Power
		 * The EPP is a 8 bit value, but our ranges restrict the
		 * value which can be set. Here only using top two bits
		 * effectively.
		 */
		index = (epp >> 6) + 1;
	} else if (static_cpu_has(X86_FEATURE_EPB)) {
		/*
		 * Range:
		 *	0x00-0x03	:	Performance
		 *	0x04-0x07	:	Balance performance
		 *	0x08-0x0B	:	Balance power
		 *	0x0C-0x0F	:	Power
		 * The EPB is a 4 bit value, but our ranges restrict the
		 * value which can be set. Here only using top two bits
		 * effectively.
		 */
		index = (epp >> 2) + 1;
	}

	return index;
}

static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
					      int pref_index)
{
	int epp = -EINVAL;
	int ret;

	if (!pref_index)
		epp = cpu_data->epp_default;

	mutex_lock(&intel_pstate_limits_lock);

	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
		u64 value;

		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
		if (ret)
			goto return_pref;

		value &= ~GENMASK_ULL(31, 24);

		/*
		 * If epp is not default, convert from index into
		 * energy_perf_strings to epp value, by shifting 6
		 * bits left to use only top two bits in epp.
		 * The resultant epp need to shifted by 24 bits to
		 * epp position in MSR_HWP_REQUEST.
		 */
		if (epp == -EINVAL)
			epp = (pref_index - 1) << 6;

		value |= (u64)epp << 24;
		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
	} else {
		if (epp == -EINVAL)
			epp = (pref_index - 1) << 2;
		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
	}
return_pref:
	mutex_unlock(&intel_pstate_limits_lock);

	return ret;
}

static ssize_t show_energy_performance_available_preferences(
				struct cpufreq_policy *policy, char *buf)
{
	int i = 0;
	int ret = 0;

	while (energy_perf_strings[i] != NULL)
		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);

	ret += sprintf(&buf[ret], "\n");

	return ret;
}

cpufreq_freq_attr_ro(energy_performance_available_preferences);

static ssize_t store_energy_performance_preference(
		struct cpufreq_policy *policy, const char *buf, size_t count)
{
	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
	char str_preference[21];
	int ret, i = 0;

	ret = sscanf(buf, "%20s", str_preference);
	if (ret != 1)
		return -EINVAL;

	while (energy_perf_strings[i] != NULL) {
		if (!strcmp(str_preference, energy_perf_strings[i])) {
			intel_pstate_set_energy_pref_index(cpu_data, i);
			return count;
		}
		++i;
	}

	return -EINVAL;
}

static ssize_t show_energy_performance_preference(
				struct cpufreq_policy *policy, char *buf)
{
	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
	int preference;

	preference = intel_pstate_get_energy_pref_index(cpu_data);
	if (preference < 0)
		return preference;

	return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
}

cpufreq_freq_attr_rw(energy_performance_preference);

static struct freq_attr *hwp_cpufreq_attrs[] = {
	&energy_performance_preference,
	&energy_performance_available_preferences,
	NULL,
};

806
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
D
Dirk Brandewie 已提交
807
{
808
	int min, hw_min, max, hw_max, cpu, range, adj_range;
809
	struct perf_limits *perf_limits = limits;
810 811
	u64 value, cap;

812
	for_each_cpu(cpu, cpumask) {
813
		int max_perf_pct, min_perf_pct;
814 815
		struct cpudata *cpu_data = all_cpu_data[cpu];
		s16 epp;
816 817 818 819

		if (per_cpu_limits)
			perf_limits = all_cpu_data[cpu]->perf_limits;

820 821 822 823 824
		rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
		hw_min = HWP_LOWEST_PERF(cap);
		hw_max = HWP_HIGHEST_PERF(cap);
		range = hw_max - hw_min;

825 826 827
		max_perf_pct = perf_limits->max_perf_pct;
		min_perf_pct = perf_limits->min_perf_pct;

D
Dirk Brandewie 已提交
828
		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
829
		adj_range = min_perf_pct * range / 100;
830
		min = hw_min + adj_range;
D
Dirk Brandewie 已提交
831 832 833
		value &= ~HWP_MIN_PERF(~0L);
		value |= HWP_MIN_PERF(min);

834
		adj_range = max_perf_pct * range / 100;
835
		max = hw_min + adj_range;
836
		if (limits->no_turbo) {
837 838 839
			hw_max = HWP_GUARANTEED_PERF(cap);
			if (hw_max < max)
				max = hw_max;
D
Dirk Brandewie 已提交
840 841 842 843
		}

		value &= ~HWP_MAX_PERF(~0L);
		value |= HWP_MAX_PERF(max);
844 845 846 847 848 849

		if (cpu_data->epp_policy == cpu_data->policy)
			goto skip_epp;

		cpu_data->epp_policy = cpu_data->policy;

850 851 852 853 854 855
		if (cpu_data->epp_saved >= 0) {
			epp = cpu_data->epp_saved;
			cpu_data->epp_saved = -EINVAL;
			goto update_epp;
		}

856 857
		if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
			epp = intel_pstate_get_epp(cpu_data, value);
858
			cpu_data->epp_powersave = epp;
859
			/* If EPP read was failed, then don't try to write */
860
			if (epp < 0)
861 862 863 864 865 866
				goto skip_epp;


			epp = 0;
		} else {
			/* skip setting EPP, when saved value is invalid */
867
			if (cpu_data->epp_powersave < 0)
868 869 870 871 872 873 874 875 876 877 878 879 880
				goto skip_epp;

			/*
			 * No need to restore EPP when it is not zero. This
			 * means:
			 *  - Policy is not changed
			 *  - user has manually changed
			 *  - Error reading EPB
			 */
			epp = intel_pstate_get_epp(cpu_data, value);
			if (epp)
				goto skip_epp;

881
			epp = cpu_data->epp_powersave;
882
		}
883
update_epp:
884 885 886 887 888 889 890
		if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
			value &= ~GENMASK_ULL(31, 24);
			value |= (u64)epp << 24;
		} else {
			intel_pstate_set_epb(cpu, epp);
		}
skip_epp:
D
Dirk Brandewie 已提交
891 892
		wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
	}
893
}
D
Dirk Brandewie 已提交
894

895 896 897 898 899 900 901 902
static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
{
	if (hwp_active)
		intel_pstate_hwp_set(policy->cpus);

	return 0;
}

903 904 905 906 907 908 909 910 911 912 913 914
static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
{
	struct cpudata *cpu_data = all_cpu_data[policy->cpu];

	if (!hwp_active)
		return 0;

	cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);

	return 0;
}

915 916
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
917 918
	int ret;

919 920 921
	if (!hwp_active)
		return 0;

922 923
	mutex_lock(&intel_pstate_limits_lock);

924 925
	all_cpu_data[policy->cpu]->epp_policy = 0;

926 927 928 929 930
	ret = intel_pstate_hwp_set_policy(policy);

	mutex_unlock(&intel_pstate_limits_lock);

	return ret;
931 932
}

933 934 935 936
static void intel_pstate_hwp_set_online_cpus(void)
{
	get_online_cpus();
	intel_pstate_hwp_set(cpu_online_mask);
D
Dirk Brandewie 已提交
937 938 939
	put_online_cpus();
}

940 941 942 943 944 945 946
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
	*(u32 *)data = val;
	intel_pstate_reset_all_pid();
	return 0;
}
947

948 949 950 951 952
static int pid_param_get(void *data, u64 *val)
{
	*val = *(u32 *)data;
	return 0;
}
953
DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
954 955 956 957 958 959 960

struct pid_param {
	char *name;
	void *value;
};

static struct pid_param pid_files[] = {
961 962 963 964 965 966
	{"sample_rate_ms", &pid_params.sample_rate_ms},
	{"d_gain_pct", &pid_params.d_gain_pct},
	{"i_gain_pct", &pid_params.i_gain_pct},
	{"deadband", &pid_params.deadband},
	{"setpoint", &pid_params.setpoint},
	{"p_gain_pct", &pid_params.p_gain_pct},
967 968 969
	{NULL, NULL}
};

970
static void __init intel_pstate_debug_expose_params(void)
971
{
972
	struct dentry *debugfs_parent;
973 974 975 976 977 978 979
	int i = 0;

	debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
	if (IS_ERR_OR_NULL(debugfs_parent))
		return;
	while (pid_files[i].name) {
		debugfs_create_file(pid_files[i].name, 0660,
980 981
				    debugfs_parent, pid_files[i].value,
				    &fops_pid_param);
982 983 984 985 986 987 988 989 990 991 992
		i++;
	}
}

/************************** debugfs end ************************/

/************************** sysfs begin ************************/
#define show_one(file_name, object)					\
	static ssize_t show_##file_name					\
	(struct kobject *kobj, struct attribute *attr, char *buf)	\
	{								\
993
		return sprintf(buf, "%u\n", limits->object);		\
994 995
	}

996 997 998 999 1000 1001 1002 1003 1004 1005 1006
static ssize_t show_turbo_pct(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	struct cpudata *cpu;
	int total, no_turbo, turbo_pct;
	uint32_t turbo_fp;

	cpu = all_cpu_data[0];

	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1007
	turbo_fp = div_fp(no_turbo, total);
1008 1009 1010 1011
	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
	return sprintf(buf, "%u\n", turbo_pct);
}

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
static ssize_t show_num_pstates(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	struct cpudata *cpu;
	int total;

	cpu = all_cpu_data[0];
	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
	return sprintf(buf, "%u\n", total);
}

1023 1024 1025 1026 1027 1028
static ssize_t show_no_turbo(struct kobject *kobj,
			     struct attribute *attr, char *buf)
{
	ssize_t ret;

	update_turbo_state();
1029 1030
	if (limits->turbo_disabled)
		ret = sprintf(buf, "%u\n", limits->turbo_disabled);
1031
	else
1032
		ret = sprintf(buf, "%u\n", limits->no_turbo);
1033 1034 1035 1036

	return ret;
}

1037
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1038
			      const char *buf, size_t count)
1039 1040 1041
{
	unsigned int input;
	int ret;
1042

1043 1044 1045
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
1046

1047 1048
	mutex_lock(&intel_pstate_limits_lock);

1049
	update_turbo_state();
1050
	if (limits->turbo_disabled) {
J
Joe Perches 已提交
1051
		pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1052
		mutex_unlock(&intel_pstate_limits_lock);
1053
		return -EPERM;
1054
	}
D
Dirk Brandewie 已提交
1055

1056
	limits->no_turbo = clamp_t(int, input, 0, 1);
1057

D
Dirk Brandewie 已提交
1058
	if (hwp_active)
1059
		intel_pstate_hwp_set_online_cpus();
D
Dirk Brandewie 已提交
1060

1061 1062
	mutex_unlock(&intel_pstate_limits_lock);

1063 1064 1065 1066
	return count;
}

static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1067
				  const char *buf, size_t count)
1068 1069 1070
{
	unsigned int input;
	int ret;
1071

1072 1073 1074 1075
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

1076 1077
	mutex_lock(&intel_pstate_limits_lock);

1078 1079 1080 1081 1082 1083 1084
	limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
	limits->max_perf_pct = min(limits->max_policy_pct,
				   limits->max_sysfs_pct);
	limits->max_perf_pct = max(limits->min_policy_pct,
				   limits->max_perf_pct);
	limits->max_perf_pct = max(limits->min_perf_pct,
				   limits->max_perf_pct);
1085
	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
1086

D
Dirk Brandewie 已提交
1087
	if (hwp_active)
1088
		intel_pstate_hwp_set_online_cpus();
1089 1090 1091

	mutex_unlock(&intel_pstate_limits_lock);

1092 1093 1094 1095
	return count;
}

static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1096
				  const char *buf, size_t count)
1097 1098 1099
{
	unsigned int input;
	int ret;
1100

1101 1102 1103
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
1104

1105 1106
	mutex_lock(&intel_pstate_limits_lock);

1107 1108 1109 1110 1111 1112 1113
	limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
	limits->min_perf_pct = max(limits->min_policy_pct,
				   limits->min_sysfs_pct);
	limits->min_perf_pct = min(limits->max_policy_pct,
				   limits->min_perf_pct);
	limits->min_perf_pct = min(limits->max_perf_pct,
				   limits->min_perf_pct);
1114
	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
1115

D
Dirk Brandewie 已提交
1116
	if (hwp_active)
1117
		intel_pstate_hwp_set_online_cpus();
1118 1119 1120

	mutex_unlock(&intel_pstate_limits_lock);

1121 1122 1123 1124 1125 1126 1127 1128 1129
	return count;
}

show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);

define_one_global_rw(no_turbo);
define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
1130
define_one_global_ro(turbo_pct);
1131
define_one_global_ro(num_pstates);
1132 1133 1134

static struct attribute *intel_pstate_attributes[] = {
	&no_turbo.attr,
1135
	&turbo_pct.attr,
1136
	&num_pstates.attr,
1137 1138 1139 1140 1141 1142 1143
	NULL
};

static struct attribute_group intel_pstate_attr_group = {
	.attrs = intel_pstate_attributes,
};

1144
static void __init intel_pstate_sysfs_expose_params(void)
1145
{
1146
	struct kobject *intel_pstate_kobject;
1147 1148 1149 1150
	int rc;

	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
						&cpu_subsys.dev_root->kobj);
1151 1152 1153
	if (WARN_ON(!intel_pstate_kobject))
		return;

1154
	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
	if (WARN_ON(rc))
		return;

	/*
	 * If per cpu limits are enforced there are no global limits, so
	 * return without creating max/min_perf_pct attributes
	 */
	if (per_cpu_limits)
		return;

	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
	WARN_ON(rc);

	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
	WARN_ON(rc);

1171 1172
}
/************************** sysfs end ************************/
D
Dirk Brandewie 已提交
1173

1174
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
D
Dirk Brandewie 已提交
1175
{
1176
	/* First disable HWP notification interrupt as we don't process them */
1177 1178
	if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1179

1180
	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1181
	cpudata->epp_policy = 0;
1182 1183
	if (cpudata->epp_default == -EINVAL)
		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
D
Dirk Brandewie 已提交
1184 1185
}

1186
static int atom_get_min_pstate(void)
1187 1188
{
	u64 value;
1189

1190
	rdmsrl(ATOM_RATIOS, value);
D
Dirk Brandewie 已提交
1191
	return (value >> 8) & 0x7F;
1192 1193
}

1194
static int atom_get_max_pstate(void)
1195 1196
{
	u64 value;
1197

1198
	rdmsrl(ATOM_RATIOS, value);
D
Dirk Brandewie 已提交
1199
	return (value >> 16) & 0x7F;
1200
}
1201

1202
static int atom_get_turbo_pstate(void)
1203 1204
{
	u64 value;
1205

1206
	rdmsrl(ATOM_TURBO_RATIOS, value);
D
Dirk Brandewie 已提交
1207
	return value & 0x7F;
1208 1209
}

1210
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1211 1212 1213 1214 1215
{
	u64 val;
	int32_t vid_fp;
	u32 vid;

1216
	val = (u64)pstate << 8;
1217
	if (limits->no_turbo && !limits->turbo_disabled)
1218 1219 1220 1221 1222 1223 1224
		val |= (u64)1 << 32;

	vid_fp = cpudata->vid.min + mul_fp(
		int_tofp(pstate - cpudata->pstate.min_pstate),
		cpudata->vid.ratio);

	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1225
	vid = ceiling_fp(vid_fp);
1226

1227 1228 1229
	if (pstate > cpudata->pstate.max_pstate)
		vid = cpudata->vid.turbo;

1230
	return val | vid;
1231 1232
}

1233
static int silvermont_get_scaling(void)
1234 1235 1236
{
	u64 value;
	int i;
1237 1238 1239
	/* Defined in Table 35-6 from SDM (Sept 2015) */
	static int silvermont_freq_table[] = {
		83300, 100000, 133300, 116700, 80000};
1240 1241

	rdmsrl(MSR_FSB_FREQ, value);
1242 1243
	i = value & 0x7;
	WARN_ON(i > 4);
1244

1245 1246
	return silvermont_freq_table[i];
}
1247

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
static int airmont_get_scaling(void)
{
	u64 value;
	int i;
	/* Defined in Table 35-10 from SDM (Sept 2015) */
	static int airmont_freq_table[] = {
		83300, 100000, 133300, 116700, 80000,
		93300, 90000, 88900, 87500};

	rdmsrl(MSR_FSB_FREQ, value);
	i = value & 0xF;
	WARN_ON(i > 8);

	return airmont_freq_table[i];
1262 1263
}

1264
static void atom_get_vid(struct cpudata *cpudata)
1265 1266 1267
{
	u64 value;

1268
	rdmsrl(ATOM_VIDS, value);
D
Dirk Brandewie 已提交
1269 1270
	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1271 1272 1273 1274
	cpudata->vid.ratio = div_fp(
		cpudata->vid.max - cpudata->vid.min,
		int_tofp(cpudata->pstate.max_pstate -
			cpudata->pstate.min_pstate));
1275

1276
	rdmsrl(ATOM_TURBO_VIDS, value);
1277
	cpudata->vid.turbo = value & 0x7f;
1278 1279
}

1280
static int core_get_min_pstate(void)
1281 1282
{
	u64 value;
1283

1284
	rdmsrl(MSR_PLATFORM_INFO, value);
1285 1286 1287
	return (value >> 40) & 0xFF;
}

1288
static int core_get_max_pstate_physical(void)
1289 1290
{
	u64 value;
1291

1292
	rdmsrl(MSR_PLATFORM_INFO, value);
1293 1294 1295
	return (value >> 8) & 0xFF;
}

1296
static int core_get_max_pstate(void)
1297
{
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	u64 tar;
	u64 plat_info;
	int max_pstate;
	int err;

	rdmsrl(MSR_PLATFORM_INFO, plat_info);
	max_pstate = (plat_info >> 8) & 0xFF;

	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
	if (!err) {
		/* Do some sanity checking for safety */
		if (plat_info & 0x600000000) {
			u64 tdp_ctrl;
			u64 tdp_ratio;
			int tdp_msr;

			err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
			if (err)
				goto skip_tar;

1318
			tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
1319 1320 1321 1322
			err = rdmsrl_safe(tdp_msr, &tdp_ratio);
			if (err)
				goto skip_tar;

1323 1324 1325 1326 1327
			/* For level 1 and 2, bits[23:16] contain the ratio */
			if (tdp_ctrl)
				tdp_ratio >>= 16;

			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1328 1329 1330 1331 1332 1333 1334 1335
			if (tdp_ratio - 1 == tar) {
				max_pstate = tar;
				pr_debug("max_pstate=TAC %x\n", max_pstate);
			} else {
				goto skip_tar;
			}
		}
	}
1336

1337 1338
skip_tar:
	return max_pstate;
1339 1340
}

1341
static int core_get_turbo_pstate(void)
1342 1343 1344
{
	u64 value;
	int nont, ret;
1345

1346
	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1347
	nont = core_get_max_pstate();
1348
	ret = (value) & 255;
1349 1350 1351 1352 1353
	if (ret <= nont)
		ret = nont;
	return ret;
}

1354 1355 1356 1357 1358
static inline int core_get_scaling(void)
{
	return 100000;
}

1359
static u64 core_get_val(struct cpudata *cpudata, int pstate)
1360 1361 1362
{
	u64 val;

1363
	val = (u64)pstate << 8;
1364
	if (limits->no_turbo && !limits->turbo_disabled)
1365 1366
		val |= (u64)1 << 32;

1367
	return val;
1368 1369
}

1370 1371 1372 1373 1374
static int knl_get_turbo_pstate(void)
{
	u64 value;
	int nont, ret;

1375
	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1376 1377 1378 1379 1380 1381 1382
	nont = core_get_max_pstate();
	ret = (((value) >> 8) & 0xFF);
	if (ret <= nont)
		ret = nont;
	return ret;
}

1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
static struct cpu_defaults core_params = {
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 97,
		.p_gain_pct = 20,
		.d_gain_pct = 0,
		.i_gain_pct = 0,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
1394
		.get_max_physical = core_get_max_pstate_physical,
1395 1396
		.get_min = core_get_min_pstate,
		.get_turbo = core_get_turbo_pstate,
1397
		.get_scaling = core_get_scaling,
1398
		.get_val = core_get_val,
1399
		.get_target_pstate = get_target_pstate_use_performance,
1400 1401 1402
	},
};

1403
static const struct cpu_defaults silvermont_params = {
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 60,
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
		.get_max = atom_get_max_pstate,
		.get_max_physical = atom_get_max_pstate,
		.get_min = atom_get_min_pstate,
		.get_turbo = atom_get_turbo_pstate,
1417
		.get_val = atom_get_val,
1418 1419
		.get_scaling = silvermont_get_scaling,
		.get_vid = atom_get_vid,
1420
		.get_target_pstate = get_target_pstate_use_cpu_load,
1421 1422 1423
	},
};

1424
static const struct cpu_defaults airmont_params = {
1425 1426 1427
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
1428
		.setpoint = 60,
1429 1430 1431 1432 1433
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
1434 1435 1436 1437
		.get_max = atom_get_max_pstate,
		.get_max_physical = atom_get_max_pstate,
		.get_min = atom_get_min_pstate,
		.get_turbo = atom_get_turbo_pstate,
1438
		.get_val = atom_get_val,
1439
		.get_scaling = airmont_get_scaling,
1440
		.get_vid = atom_get_vid,
1441
		.get_target_pstate = get_target_pstate_use_cpu_load,
1442 1443 1444
	},
};

1445
static const struct cpu_defaults knl_params = {
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 97,
		.p_gain_pct = 20,
		.d_gain_pct = 0,
		.i_gain_pct = 0,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
1456
		.get_max_physical = core_get_max_pstate_physical,
1457 1458
		.get_min = core_get_min_pstate,
		.get_turbo = knl_get_turbo_pstate,
1459
		.get_scaling = core_get_scaling,
1460
		.get_val = core_get_val,
1461
		.get_target_pstate = get_target_pstate_use_performance,
1462 1463 1464
	},
};

1465
static const struct cpu_defaults bxt_params = {
1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 60,
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
		.get_max_physical = core_get_max_pstate_physical,
		.get_min = core_get_min_pstate,
		.get_turbo = core_get_turbo_pstate,
		.get_scaling = core_get_scaling,
		.get_val = core_get_val,
		.get_target_pstate = get_target_pstate_use_cpu_load,
	},
};

1485 1486 1487
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
	int max_perf = cpu->pstate.turbo_pstate;
1488
	int max_perf_adj;
1489
	int min_perf;
1490
	struct perf_limits *perf_limits = limits;
1491

1492
	if (limits->no_turbo || limits->turbo_disabled)
1493 1494
		max_perf = cpu->pstate.max_pstate;

1495 1496 1497
	if (per_cpu_limits)
		perf_limits = cpu->perf_limits;

1498 1499 1500 1501 1502
	/*
	 * performance can be limited by user through sysfs, by cpufreq
	 * policy, or by cpu specific default values determined through
	 * experimentation.
	 */
1503
	max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
1504 1505
	*max = clamp_t(int, max_perf_adj,
			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
1506

1507
	min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
1508
	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
1509 1510
}

1511
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1512
{
1513 1514
	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
	cpu->pstate.current_pstate = pstate;
1515 1516 1517 1518 1519 1520 1521
	/*
	 * Generally, there is no guarantee that this code will always run on
	 * the CPU being updated, so force the register update to run on the
	 * right CPU.
	 */
	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
		      pstate_funcs.get_val(cpu, pstate));
1522 1523
}

1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{
	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}

static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
	int min_pstate, max_pstate;

	update_turbo_state();
	intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
	intel_pstate_set_pstate(cpu, max_pstate);
}

1538 1539
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
1540 1541
	cpu->pstate.min_pstate = pstate_funcs.get_min();
	cpu->pstate.max_pstate = pstate_funcs.get_max();
1542
	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
1543
	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1544
	cpu->pstate.scaling = pstate_funcs.get_scaling();
1545 1546
	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1547

1548 1549
	if (pstate_funcs.get_vid)
		pstate_funcs.get_vid(cpu);
1550 1551

	intel_pstate_set_min_pstate(cpu);
1552 1553
}

1554
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1555
{
1556
	struct sample *sample = &cpu->sample;
1557

1558
	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
1559 1560
}

1561
static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1562 1563
{
	u64 aperf, mperf;
1564
	unsigned long flags;
1565
	u64 tsc;
1566

1567
	local_irq_save(flags);
1568 1569
	rdmsrl(MSR_IA32_APERF, aperf);
	rdmsrl(MSR_IA32_MPERF, mperf);
1570
	tsc = rdtsc();
1571
	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
1572
		local_irq_restore(flags);
1573
		return false;
1574
	}
1575
	local_irq_restore(flags);
1576

1577
	cpu->last_sample_time = cpu->sample.time;
1578
	cpu->sample.time = time;
1579 1580
	cpu->sample.aperf = aperf;
	cpu->sample.mperf = mperf;
1581
	cpu->sample.tsc =  tsc;
1582 1583
	cpu->sample.aperf -= cpu->prev_aperf;
	cpu->sample.mperf -= cpu->prev_mperf;
1584
	cpu->sample.tsc -= cpu->prev_tsc;
1585

1586 1587
	cpu->prev_aperf = aperf;
	cpu->prev_mperf = mperf;
1588
	cpu->prev_tsc = tsc;
1589 1590 1591 1592 1593 1594 1595 1596
	/*
	 * First time this function is invoked in a given cycle, all of the
	 * previous sample data fields are equal to zero or stale and they must
	 * be populated with meaningful numbers for things to work, so assume
	 * that sample.time will always be reset before setting the utilization
	 * update hook and make the caller skip the sample then.
	 */
	return !!cpu->last_sample_time;
1597 1598
}

1599 1600
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
1601 1602
	return mul_ext_fp(cpu->sample.core_avg_perf,
			  cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
1603 1604
}

1605 1606
static inline int32_t get_avg_pstate(struct cpudata *cpu)
{
1607 1608
	return mul_ext_fp(cpu->pstate.max_pstate_physical,
			  cpu->sample.core_avg_perf);
1609 1610
}

1611 1612 1613
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
	struct sample *sample = &cpu->sample;
1614
	int32_t busy_frac, boost;
1615
	int target, avg_pstate;
1616

1617
	busy_frac = div_fp(sample->mperf, sample->tsc);
1618

1619 1620
	boost = cpu->iowait_boost;
	cpu->iowait_boost >>= 1;
1621

1622 1623
	if (busy_frac < boost)
		busy_frac = boost;
1624

1625
	sample->busy_scaled = busy_frac * 100;
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645

	target = limits->no_turbo || limits->turbo_disabled ?
			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
	target += target >> 2;
	target = mul_fp(target, busy_frac);
	if (target < cpu->pstate.min_pstate)
		target = cpu->pstate.min_pstate;

	/*
	 * If the average P-state during the previous cycle was higher than the
	 * current target, add 50% of the difference to the target to reduce
	 * possible performance oscillations and offset possible performance
	 * loss related to moving the workload from one CPU to another within
	 * a package/module.
	 */
	avg_pstate = get_avg_pstate(cpu);
	if (avg_pstate > target)
		target += (avg_pstate - target) >> 1;

	return target;
1646 1647
}

1648
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1649
{
1650
	int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
1651
	u64 duration_ns;
1652

1653
	/*
1654 1655 1656 1657 1658
	 * perf_scaled is the ratio of the average P-state during the last
	 * sampling period to the P-state requested last time (in percent).
	 *
	 * That measures the system's response to the previous P-state
	 * selection.
1659
	 */
1660 1661
	max_pstate = cpu->pstate.max_pstate_physical;
	current_pstate = cpu->pstate.current_pstate;
1662
	perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
1663
			       div_fp(100 * max_pstate, current_pstate));
1664

1665
	/*
1666 1667 1668
	 * Since our utilization update callback will not run unless we are
	 * in C0, check if the actual elapsed time is significantly greater (3x)
	 * than our sample interval.  If it is, then we were idle for a long
1669
	 * enough period of time to adjust our performance metric.
1670
	 */
1671
	duration_ns = cpu->sample.time - cpu->last_sample_time;
1672
	if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
1673
		sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
1674
		perf_scaled = mul_fp(perf_scaled, sample_ratio);
1675 1676 1677
	} else {
		sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
		if (sample_ratio < int_tofp(1))
1678
			perf_scaled = 0;
1679 1680
	}

1681 1682
	cpu->sample.busy_scaled = perf_scaled;
	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
1683 1684
}

1685
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1686 1687 1688 1689 1690
{
	int max_perf, min_perf;

	intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
	pstate = clamp_t(int, pstate, min_perf, max_perf);
1691
	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1692 1693 1694 1695 1696 1697
	return pstate;
}

static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{
	pstate = intel_pstate_prepare_request(cpu, pstate);
1698 1699 1700
	if (pstate == cpu->pstate.current_pstate)
		return;

1701
	cpu->pstate.current_pstate = pstate;
1702 1703 1704
	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}

1705 1706
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
1707
	int from, target_pstate;
1708 1709 1710
	struct sample *sample;

	from = cpu->pstate.current_pstate;
1711

1712 1713
	target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
		cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
1714

1715 1716
	update_turbo_state();

1717
	intel_pstate_update_pstate(cpu, target_pstate);
1718 1719

	sample = &cpu->sample;
1720
	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
1721
		fp_toint(sample->busy_scaled),
1722 1723 1724 1725 1726
		from,
		cpu->pstate.current_pstate,
		sample->mperf,
		sample->aperf,
		sample->tsc,
1727 1728
		get_avg_frequency(cpu),
		fp_toint(cpu->iowait_boost * 100));
1729 1730
}

1731
static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1732
				     unsigned int flags)
1733
{
1734
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1735 1736
	u64 delta_ns;

1737
	if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
		if (flags & SCHED_CPUFREQ_IOWAIT) {
			cpu->iowait_boost = int_tofp(1);
		} else if (cpu->iowait_boost) {
			/* Clear iowait_boost if the CPU may have been idle. */
			delta_ns = time - cpu->last_update;
			if (delta_ns > TICK_NSEC)
				cpu->iowait_boost = 0;
		}
		cpu->last_update = time;
	}
1748

1749
	delta_ns = time - cpu->sample.time;
1750
	if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1751 1752
		bool sample_taken = intel_pstate_sample(cpu, time);

1753
		if (sample_taken) {
1754
			intel_pstate_calc_avg_perf(cpu);
1755 1756 1757
			if (!hwp_active)
				intel_pstate_adjust_busy_pstate(cpu);
		}
1758
	}
1759 1760 1761
}

#define ICPU(model, policy) \
1762 1763
	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
			(unsigned long)&policy }
1764 1765

static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
	ICPU(INTEL_FAM6_SANDYBRIDGE, 		core_params),
	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		core_params),
	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	silvermont_params),
	ICPU(INTEL_FAM6_IVYBRIDGE,		core_params),
	ICPU(INTEL_FAM6_HASWELL_CORE,		core_params),
	ICPU(INTEL_FAM6_BROADWELL_CORE,		core_params),
	ICPU(INTEL_FAM6_IVYBRIDGE_X,		core_params),
	ICPU(INTEL_FAM6_HASWELL_X,		core_params),
	ICPU(INTEL_FAM6_HASWELL_ULT,		core_params),
	ICPU(INTEL_FAM6_HASWELL_GT3E,		core_params),
	ICPU(INTEL_FAM6_BROADWELL_GT3E,		core_params),
	ICPU(INTEL_FAM6_ATOM_AIRMONT,		airmont_params),
	ICPU(INTEL_FAM6_SKYLAKE_MOBILE,		core_params),
	ICPU(INTEL_FAM6_BROADWELL_X,		core_params),
	ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,	core_params),
	ICPU(INTEL_FAM6_BROADWELL_XEON_D,	core_params),
	ICPU(INTEL_FAM6_XEON_PHI_KNL,		knl_params),
1783
	ICPU(INTEL_FAM6_XEON_PHI_KNM,		knl_params),
1784
	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		bxt_params),
1785 1786 1787 1788
	{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

1789
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1790
	ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
1791 1792
	ICPU(INTEL_FAM6_BROADWELL_X, core_params),
	ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
D
Dirk Brandewie 已提交
1793 1794 1795
	{}
};

1796 1797 1798 1799
static int intel_pstate_init_cpu(unsigned int cpunum)
{
	struct cpudata *cpu;

1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
	cpu = all_cpu_data[cpunum];

	if (!cpu) {
		unsigned int size = sizeof(struct cpudata);

		if (per_cpu_limits)
			size += sizeof(struct perf_limits);

		cpu = kzalloc(size, GFP_KERNEL);
		if (!cpu)
			return -ENOMEM;

		all_cpu_data[cpunum] = cpu;
		if (per_cpu_limits)
			cpu->perf_limits = (struct perf_limits *)(cpu + 1);

1816 1817 1818
		cpu->epp_default = -EINVAL;
		cpu->epp_powersave = -EINVAL;
		cpu->epp_saved = -EINVAL;
1819
	}
1820 1821 1822 1823

	cpu = all_cpu_data[cpunum];

	cpu->cpu = cpunum;
1824

1825
	if (hwp_active) {
1826
		intel_pstate_hwp_enable(cpu);
1827 1828 1829
		pid_params.sample_rate_ms = 50;
		pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
	}
1830

1831
	intel_pstate_get_cpu_pstates(cpu);
1832

1833 1834
	intel_pstate_busy_pid_reset(cpu);

J
Joe Perches 已提交
1835
	pr_debug("controlling: cpu %d\n", cpunum);
1836 1837 1838 1839 1840 1841

	return 0;
}

static unsigned int intel_pstate_get(unsigned int cpu_num)
{
1842
	struct cpudata *cpu = all_cpu_data[cpu_num];
1843

1844
	return cpu ? get_avg_frequency(cpu) : 0;
1845 1846
}

1847
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1848
{
1849 1850
	struct cpudata *cpu = all_cpu_data[cpu_num];

1851 1852 1853
	if (cpu->update_util_set)
		return;

1854 1855
	/* Prevent intel_pstate_update_util() from using stale data. */
	cpu->sample.time = 0;
1856 1857
	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
				     intel_pstate_update_util);
1858
	cpu->update_util_set = true;
1859 1860 1861 1862
}

static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
1863 1864 1865 1866 1867
	struct cpudata *cpu_data = all_cpu_data[cpu];

	if (!cpu_data->update_util_set)
		return;

1868
	cpufreq_remove_update_util_hook(cpu);
1869
	cpu_data->update_util_set = false;
1870 1871 1872
	synchronize_sched();
}

1873 1874 1875 1876 1877
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
{
	limits->no_turbo = 0;
	limits->turbo_disabled = 0;
	limits->max_perf_pct = 100;
1878
	limits->max_perf = int_ext_tofp(1);
1879
	limits->min_perf_pct = 100;
1880
	limits->min_perf = int_ext_tofp(1);
1881 1882 1883 1884 1885 1886
	limits->max_policy_pct = 100;
	limits->max_sysfs_pct = 100;
	limits->min_policy_pct = 0;
	limits->min_sysfs_pct = 0;
}

1887 1888 1889
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
					    struct perf_limits *limits)
{
1890

1891 1892 1893
	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
					      policy->cpuinfo.max_freq);
	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
1894 1895 1896
	if (policy->max == policy->min) {
		limits->min_policy_pct = limits->max_policy_pct;
	} else {
1897 1898
		limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
						      policy->cpuinfo.max_freq);
1899 1900 1901
		limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
						 0, 100);
	}
1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915

	/* Normalize user input to [min_policy_pct, max_policy_pct] */
	limits->min_perf_pct = max(limits->min_policy_pct,
				   limits->min_sysfs_pct);
	limits->min_perf_pct = min(limits->max_policy_pct,
				   limits->min_perf_pct);
	limits->max_perf_pct = min(limits->max_policy_pct,
				   limits->max_sysfs_pct);
	limits->max_perf_pct = max(limits->min_policy_pct,
				   limits->max_perf_pct);

	/* Make sure min_perf_pct <= max_perf_pct */
	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);

1916 1917 1918 1919
	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
	limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
	limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
1920 1921 1922 1923 1924

	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
		 limits->max_perf_pct, limits->min_perf_pct);
}

1925 1926
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
1927
	struct cpudata *cpu;
1928
	struct perf_limits *perf_limits = NULL;
1929

1930 1931 1932
	if (!policy->cpuinfo.max_freq)
		return -ENODEV;

1933 1934 1935
	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
		 policy->cpuinfo.max_freq, policy->max);

1936
	cpu = all_cpu_data[policy->cpu];
1937 1938
	cpu->policy = policy->policy;

1939 1940 1941 1942 1943
	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
	    policy->max < policy->cpuinfo.max_freq &&
	    policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
		pr_debug("policy->max > max non turbo frequency\n");
		policy->max = policy->cpuinfo.max_freq;
1944 1945
	}

1946 1947 1948
	if (per_cpu_limits)
		perf_limits = cpu->perf_limits;

1949 1950
	mutex_lock(&intel_pstate_limits_lock);

1951 1952 1953 1954 1955
	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
		if (!perf_limits) {
			limits = &performance_limits;
			perf_limits = limits;
		}
1956
		if (policy->max >= policy->cpuinfo.max_freq) {
J
Joe Perches 已提交
1957
			pr_debug("set performance\n");
1958
			intel_pstate_set_performance_limits(perf_limits);
1959 1960 1961
			goto out;
		}
	} else {
J
Joe Perches 已提交
1962
		pr_debug("set powersave\n");
1963 1964 1965 1966
		if (!perf_limits) {
			limits = &powersave_limits;
			perf_limits = limits;
		}
1967

1968
	}
1969

1970
	intel_pstate_update_perf_limits(policy, perf_limits);
1971
 out:
1972
	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
1973 1974 1975 1976 1977 1978 1979 1980
		/*
		 * NOHZ_FULL CPUs need this as the governor callback may not
		 * be invoked on them.
		 */
		intel_pstate_clear_update_util_hook(policy->cpu);
		intel_pstate_max_within_limits(cpu);
	}

1981 1982
	intel_pstate_set_update_util_hook(policy->cpu);

1983
	intel_pstate_hwp_set_policy(policy);
D
Dirk Brandewie 已提交
1984

1985 1986
	mutex_unlock(&intel_pstate_limits_lock);

1987 1988 1989 1990 1991
	return 0;
}

static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
1992
	cpufreq_verify_within_cpu_limits(policy);
1993

1994
	if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1995
	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1996 1997 1998 1999 2000
		return -EINVAL;

	return 0;
}

2001 2002 2003 2004 2005
static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
	intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
}

2006
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
2007
{
2008
	pr_debug("CPU %d exiting\n", policy->cpu);
2009

2010
	intel_pstate_clear_update_util_hook(policy->cpu);
2011 2012 2013
	if (hwp_active)
		intel_pstate_hwp_save_state(policy);
	else
2014 2015
		intel_cpufreq_stop_cpu(policy);
}
2016

2017 2018 2019
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
	intel_pstate_exit_perf_limits(policy);
2020

2021
	policy->fast_switch_possible = false;
D
Dirk Brandewie 已提交
2022

2023
	return 0;
2024 2025
}

2026
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2027 2028
{
	struct cpudata *cpu;
2029
	int rc;
2030 2031 2032 2033 2034 2035 2036

	rc = intel_pstate_init_cpu(policy->cpu);
	if (rc)
		return rc;

	cpu = all_cpu_data[policy->cpu];

2037 2038 2039 2040 2041 2042 2043 2044
	/*
	 * We need sane value in the cpu->perf_limits, so inherit from global
	 * perf_limits limits, which are seeded with values based on the
	 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
	 */
	if (per_cpu_limits)
		memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));

2045 2046
	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2047 2048

	/* cpuinfo and default policy values */
2049
	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2050 2051 2052 2053 2054
	update_turbo_state();
	policy->cpuinfo.max_freq = limits->turbo_disabled ?
			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
	policy->cpuinfo.max_freq *= cpu->pstate.scaling;

2055
	intel_pstate_init_acpi_perf_limits(policy);
2056 2057
	cpumask_set_cpu(policy->cpu, policy->cpus);

2058 2059
	policy->fast_switch_possible = true;

2060 2061 2062
	return 0;
}

2063
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2064
{
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
	int ret = __intel_pstate_cpu_init(policy);

	if (ret)
		return ret;

	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
	if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
	else
		policy->policy = CPUFREQ_POLICY_POWERSAVE;
2075 2076 2077 2078

	return 0;
}

2079
static struct cpufreq_driver intel_pstate = {
2080 2081 2082
	.flags		= CPUFREQ_CONST_LOOPS,
	.verify		= intel_pstate_verify_policy,
	.setpolicy	= intel_pstate_set_policy,
2083
	.suspend	= intel_pstate_hwp_save_state,
2084
	.resume		= intel_pstate_resume,
2085 2086
	.get		= intel_pstate_get,
	.init		= intel_pstate_cpu_init,
2087
	.exit		= intel_pstate_cpu_exit,
2088
	.stop_cpu	= intel_pstate_stop_cpu,
2089 2090 2091
	.name		= "intel_pstate",
};

2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
{
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	struct perf_limits *perf_limits = limits;

	update_turbo_state();
	policy->cpuinfo.max_freq = limits->turbo_disabled ?
			cpu->pstate.max_freq : cpu->pstate.turbo_freq;

	cpufreq_verify_within_cpu_limits(policy);

	if (per_cpu_limits)
		perf_limits = cpu->perf_limits;

	intel_pstate_update_perf_limits(policy, perf_limits);

	return 0;
}

static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
					       struct cpufreq_policy *policy,
					       unsigned int target_freq)
{
	unsigned int max_freq;

	update_turbo_state();

	max_freq = limits->no_turbo || limits->turbo_disabled ?
			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
	policy->cpuinfo.max_freq = max_freq;
	if (policy->max > max_freq)
		policy->max = max_freq;

	if (target_freq > max_freq)
		target_freq = max_freq;

	return target_freq;
}

static int intel_cpufreq_target(struct cpufreq_policy *policy,
				unsigned int target_freq,
				unsigned int relation)
{
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	struct cpufreq_freqs freqs;
	int target_pstate;

	freqs.old = policy->cur;
	freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);

	cpufreq_freq_transition_begin(policy, &freqs);
	switch (relation) {
	case CPUFREQ_RELATION_L:
		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
		break;
	case CPUFREQ_RELATION_H:
		target_pstate = freqs.new / cpu->pstate.scaling;
		break;
	default:
		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
		break;
	}
	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
	if (target_pstate != cpu->pstate.current_pstate) {
		cpu->pstate.current_pstate = target_pstate;
		wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
			      pstate_funcs.get_val(cpu, target_pstate));
	}
	cpufreq_freq_transition_end(policy, &freqs, false);

	return 0;
}

static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
					      unsigned int target_freq)
{
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	int target_pstate;

	target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
	intel_pstate_update_pstate(cpu, target_pstate);
	return target_freq;
}

static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
	int ret = __intel_pstate_cpu_init(policy);

	if (ret)
		return ret;

	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
	policy->cur = policy->cpuinfo.min_freq;

	return 0;
}

static struct cpufreq_driver intel_cpufreq = {
	.flags		= CPUFREQ_CONST_LOOPS,
	.verify		= intel_cpufreq_verify_policy,
	.target		= intel_cpufreq_target,
	.fast_switch	= intel_cpufreq_fast_switch,
	.init		= intel_cpufreq_cpu_init,
	.exit		= intel_pstate_cpu_exit,
	.stop_cpu	= intel_cpufreq_stop_cpu,
	.name		= "intel_cpufreq",
};

static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;

2204 2205 2206
static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
2207
static unsigned int force_load __initdata;
2208

2209
static int __init intel_pstate_msrs_not_valid(void)
2210
{
2211
	if (!pstate_funcs.get_max() ||
2212 2213
	    !pstate_funcs.get_min() ||
	    !pstate_funcs.get_turbo())
2214 2215 2216 2217
		return -ENODEV;

	return 0;
}
2218

2219
static void __init copy_pid_params(struct pstate_adjust_policy *policy)
2220 2221
{
	pid_params.sample_rate_ms = policy->sample_rate_ms;
2222
	pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
2223 2224 2225 2226 2227 2228 2229
	pid_params.p_gain_pct = policy->p_gain_pct;
	pid_params.i_gain_pct = policy->i_gain_pct;
	pid_params.d_gain_pct = policy->d_gain_pct;
	pid_params.deadband = policy->deadband;
	pid_params.setpoint = policy->setpoint;
}

2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
#ifdef CONFIG_ACPI
static void intel_pstate_use_acpi_profile(void)
{
	if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
		pstate_funcs.get_target_pstate =
				get_target_pstate_use_cpu_load;
}
#else
static void intel_pstate_use_acpi_profile(void)
{
}
#endif

2243
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2244 2245
{
	pstate_funcs.get_max   = funcs->get_max;
2246
	pstate_funcs.get_max_physical = funcs->get_max_physical;
2247 2248
	pstate_funcs.get_min   = funcs->get_min;
	pstate_funcs.get_turbo = funcs->get_turbo;
2249
	pstate_funcs.get_scaling = funcs->get_scaling;
2250
	pstate_funcs.get_val   = funcs->get_val;
2251
	pstate_funcs.get_vid   = funcs->get_vid;
2252 2253
	pstate_funcs.get_target_pstate = funcs->get_target_pstate;

2254
	intel_pstate_use_acpi_profile();
2255 2256
}

2257
#ifdef CONFIG_ACPI
2258

2259
static bool __init intel_pstate_no_acpi_pss(void)
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
{
	int i;

	for_each_possible_cpu(i) {
		acpi_status status;
		union acpi_object *pss;
		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
		struct acpi_processor *pr = per_cpu(processors, i);

		if (!pr)
			continue;

		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
		if (ACPI_FAILURE(status))
			continue;

		pss = buffer.pointer;
		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
			kfree(pss);
			return false;
		}

		kfree(pss);
	}

	return true;
}

2288
static bool __init intel_pstate_has_acpi_ppc(void)
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307
{
	int i;

	for_each_possible_cpu(i) {
		struct acpi_processor *pr = per_cpu(processors, i);

		if (!pr)
			continue;
		if (acpi_has_method(pr->handle, "_PPC"))
			return true;
	}
	return false;
}

enum {
	PSS,
	PPC,
};

2308 2309 2310 2311
struct hw_vendor_info {
	u16  valid;
	char oem_id[ACPI_OEM_ID_SIZE];
	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
2312
	int  oem_pwr_table;
2313 2314 2315
};

/* Hardware vendor-specific info that has its own power management modes */
2316
static struct hw_vendor_info vendor_info[] __initdata = {
2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327
	{1, "HP    ", "ProLiant", PSS},
	{1, "ORACLE", "X4-2    ", PPC},
	{1, "ORACLE", "X4-2L   ", PPC},
	{1, "ORACLE", "X4-2B   ", PPC},
	{1, "ORACLE", "X3-2    ", PPC},
	{1, "ORACLE", "X3-2L   ", PPC},
	{1, "ORACLE", "X3-2B   ", PPC},
	{1, "ORACLE", "X4470M2 ", PPC},
	{1, "ORACLE", "X4270M3 ", PPC},
	{1, "ORACLE", "X4270M2 ", PPC},
	{1, "ORACLE", "X4170M2 ", PPC},
2328 2329 2330 2331
	{1, "ORACLE", "X4170 M3", PPC},
	{1, "ORACLE", "X4275 M3", PPC},
	{1, "ORACLE", "X6-2    ", PPC},
	{1, "ORACLE", "Sudbury ", PPC},
2332 2333 2334
	{0, "", ""},
};

2335
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2336 2337 2338
{
	struct acpi_table_header hdr;
	struct hw_vendor_info *v_info;
D
Dirk Brandewie 已提交
2339 2340 2341 2342 2343 2344 2345 2346 2347
	const struct x86_cpu_id *id;
	u64 misc_pwr;

	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
	if (id) {
		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
		if ( misc_pwr & (1 << 8))
			return true;
	}
2348

2349 2350
	if (acpi_disabled ||
	    ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
2351 2352 2353
		return false;

	for (v_info = vendor_info; v_info->valid; v_info++) {
2354
		if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
2355 2356 2357 2358 2359 2360
			!strncmp(hdr.oem_table_id, v_info->oem_table_id,
						ACPI_OEM_TABLE_ID_SIZE))
			switch (v_info->oem_pwr_table) {
			case PSS:
				return intel_pstate_no_acpi_pss();
			case PPC:
2361 2362
				return intel_pstate_has_acpi_ppc() &&
					(!force_load);
2363
			}
2364 2365 2366 2367
	}

	return false;
}
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377

static void intel_pstate_request_control_from_smm(void)
{
	/*
	 * It may be unsafe to request P-states control from SMM if _PPC support
	 * has not been enabled.
	 */
	if (acpi_ppc)
		acpi_processor_pstate_control();
}
2378 2379
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2380
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2381
static inline void intel_pstate_request_control_from_smm(void) {}
2382 2383
#endif /* CONFIG_ACPI */

2384 2385 2386 2387 2388
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
	{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
	{}
};

2389 2390
static int __init intel_pstate_init(void)
{
2391
	int cpu, rc = 0;
2392
	const struct x86_cpu_id *id;
2393
	struct cpu_defaults *cpu_def;
2394

2395 2396 2397
	if (no_load)
		return -ENODEV;

2398 2399 2400
	if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
		copy_cpu_funcs(&core_params.funcs);
		hwp_active++;
2401
		intel_pstate.attr = hwp_cpufreq_attrs;
2402 2403 2404
		goto hwp_cpu_matched;
	}

2405 2406 2407 2408
	id = x86_match_cpu(intel_pstate_cpu_ids);
	if (!id)
		return -ENODEV;

2409
	cpu_def = (struct cpu_defaults *)id->driver_data;
2410

2411 2412
	copy_pid_params(&cpu_def->pid_policy);
	copy_cpu_funcs(&cpu_def->funcs);
2413

2414 2415 2416
	if (intel_pstate_msrs_not_valid())
		return -ENODEV;

2417 2418 2419 2420 2421 2422 2423 2424
hwp_cpu_matched:
	/*
	 * The Intel pstate driver will be ignored if the platform
	 * firmware has its own power management modes.
	 */
	if (intel_pstate_platform_pwr_mgmt_exists())
		return -ENODEV;

J
Joe Perches 已提交
2425
	pr_info("Intel P-state driver initializing\n");
2426

2427
	all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
2428 2429 2430
	if (!all_cpu_data)
		return -ENOMEM;

2431 2432 2433
	if (!hwp_active && hwp_only)
		goto out;

2434 2435
	intel_pstate_request_control_from_smm();

2436
	rc = cpufreq_register_driver(intel_pstate_driver);
2437 2438 2439
	if (rc)
		goto out;

2440 2441 2442 2443
	if (intel_pstate_driver == &intel_pstate && !hwp_active &&
	    pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
		intel_pstate_debug_expose_params();

2444
	intel_pstate_sysfs_expose_params();
2445

2446
	if (hwp_active)
J
Joe Perches 已提交
2447
		pr_info("HWP enabled\n");
2448

2449 2450
	return rc;
out:
2451 2452 2453
	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu]) {
2454 2455 2456
			if (intel_pstate_driver == &intel_pstate)
				intel_pstate_clear_update_util_hook(cpu);

2457 2458 2459 2460 2461 2462
			kfree(all_cpu_data[cpu]);
		}
	}

	put_online_cpus();
	vfree(all_cpu_data);
2463 2464 2465 2466
	return -ENODEV;
}
device_initcall(intel_pstate_init);

2467 2468 2469 2470 2471
static int __init intel_pstate_setup(char *str)
{
	if (!str)
		return -EINVAL;

2472
	if (!strcmp(str, "disable")) {
2473
		no_load = 1;
2474 2475 2476 2477 2478
	} else if (!strcmp(str, "passive")) {
		pr_info("Passive mode enabled\n");
		intel_pstate_driver = &intel_cpufreq;
		no_hwp = 1;
	}
2479
	if (!strcmp(str, "no_hwp")) {
J
Joe Perches 已提交
2480
		pr_info("HWP disabled\n");
D
Dirk Brandewie 已提交
2481
		no_hwp = 1;
2482
	}
2483 2484
	if (!strcmp(str, "force"))
		force_load = 1;
2485 2486
	if (!strcmp(str, "hwp_only"))
		hwp_only = 1;
2487 2488
	if (!strcmp(str, "per_cpu_perf_limits"))
		per_cpu_limits = true;
2489 2490 2491 2492 2493 2494

#ifdef CONFIG_ACPI
	if (!strcmp(str, "support_acpi_ppc"))
		acpi_ppc = true;
#endif

2495 2496 2497 2498
	return 0;
}
early_param("intel_pstate", intel_pstate_setup);

2499 2500 2501
MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
MODULE_LICENSE("GPL");