intel_pstate.c 62.4 KB
Newer Older
1
/*
2
 * intel_pstate.c: Native P state management for Intel processors
3 4 5 6 7 8 9 10 11 12
 *
 * (C) Copyright 2012 Intel Corporation
 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 */

J
Joe Perches 已提交
13 14
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
30
#include <linux/acpi.h>
31
#include <linux/vmalloc.h>
32 33 34 35 36
#include <trace/events/power.h>

#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>
37
#include <asm/cpufeature.h>
38
#include <asm/intel-family.h>
39

40 41
#define INTEL_CPUFREQ_TRANSITION_LATENCY	20000

42 43 44 45
#define ATOM_RATIOS		0x66a
#define ATOM_VIDS		0x66b
#define ATOM_TURBO_RATIOS	0x66c
#define ATOM_TURBO_VIDS		0x66d
46

47 48 49 50
#ifdef CONFIG_ACPI
#include <acpi/processor.h>
#endif

51
#define FRAC_BITS 8
52 53
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
54

55 56
#define EXT_BITS 6
#define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
57 58
#define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
#define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
59

60 61 62 63 64
static inline int32_t mul_fp(int32_t x, int32_t y)
{
	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
}

65
static inline int32_t div_fp(s64 x, s64 y)
66
{
67
	return div64_s64((int64_t)x << FRAC_BITS, y);
68 69
}

70 71 72 73 74 75 76 77 78 79 80
static inline int ceiling_fp(int32_t x)
{
	int mask, ret;

	ret = fp_toint(x);
	mask = (1 << FRAC_BITS) - 1;
	if (x & mask)
		ret += 1;
	return ret;
}

81 82 83 84 85 86 87 88 89 90
static inline u64 mul_ext_fp(u64 x, u64 y)
{
	return (x * y) >> EXT_FRAC_BITS;
}

static inline u64 div_ext_fp(u64 x, u64 y)
{
	return div64_u64(x << EXT_FRAC_BITS, y);
}

91 92
/**
 * struct sample -	Store performance sample
93
 * @core_avg_perf:	Ratio of APERF/MPERF which is the actual average
94 95
 *			performance during last sample period
 * @busy_scaled:	Scaled busy value which is used to calculate next
96
 *			P state. This can be different than core_avg_perf
97 98 99 100 101 102 103 104 105 106 107 108
 *			to account for cpu idle period
 * @aperf:		Difference of actual performance frequency clock count
 *			read from APERF MSR between last and current sample
 * @mperf:		Difference of maximum performance frequency clock count
 *			read from MPERF MSR between last and current sample
 * @tsc:		Difference of time stamp counter between last and
 *			current sample
 * @time:		Current time from scheduler
 *
 * This structure is used in the cpudata structure to store performance sample
 * data for choosing next P State.
 */
109
struct sample {
110
	int32_t core_avg_perf;
111
	int32_t busy_scaled;
112 113
	u64 aperf;
	u64 mperf;
114
	u64 tsc;
115
	u64 time;
116 117
};

118 119 120 121 122 123 124 125 126 127 128
/**
 * struct pstate_data - Store P state data
 * @current_pstate:	Current requested P state
 * @min_pstate:		Min P state possible for this platform
 * @max_pstate:		Max P state possible for this platform
 * @max_pstate_physical:This is physical Max P state for a processor
 *			This can be higher than the max_pstate which can
 *			be limited by platform thermal design power limits
 * @scaling:		Scaling factor to  convert frequency to cpufreq
 *			frequency units
 * @turbo_pstate:	Max Turbo P state possible for this platform
129 130
 * @max_freq:		@max_pstate frequency in cpufreq units
 * @turbo_freq:		@turbo_pstate frequency in cpufreq units
131 132 133
 *
 * Stores the per cpu model P state limits and current P state.
 */
134 135 136 137
struct pstate_data {
	int	current_pstate;
	int	min_pstate;
	int	max_pstate;
138
	int	max_pstate_physical;
139
	int	scaling;
140
	int	turbo_pstate;
141 142
	unsigned int max_freq;
	unsigned int turbo_freq;
143 144
};

145 146 147 148 149 150 151 152 153 154 155 156 157
/**
 * struct vid_data -	Stores voltage information data
 * @min:		VID data for this platform corresponding to
 *			the lowest P state
 * @max:		VID data corresponding to the highest P State.
 * @turbo:		VID data for turbo P state
 * @ratio:		Ratio of (vid max - vid min) /
 *			(max P state - Min P State)
 *
 * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
 * This data is used in Atom platforms, where in addition to target P state,
 * the voltage data needs to be specified to select next P State.
 */
158
struct vid_data {
159 160 161
	int min;
	int max;
	int turbo;
162 163 164
	int32_t ratio;
};

165 166 167 168 169 170 171 172 173 174 175 176
/**
 * struct _pid -	Stores PID data
 * @setpoint:		Target set point for busyness or performance
 * @integral:		Storage for accumulated error values
 * @p_gain:		PID proportional gain
 * @i_gain:		PID integral gain
 * @d_gain:		PID derivative gain
 * @deadband:		PID deadband
 * @last_err:		Last error storage for integral part of PID calculation
 *
 * Stores PID coefficients and last error for PID controller.
 */
177 178 179 180 181 182 183
struct _pid {
	int setpoint;
	int32_t integral;
	int32_t p_gain;
	int32_t i_gain;
	int32_t d_gain;
	int deadband;
184
	int32_t last_err;
185 186
};

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
/**
 * struct perf_limits - Store user and policy limits
 * @no_turbo:		User requested turbo state from intel_pstate sysfs
 * @turbo_disabled:	Platform turbo status either from msr
 *			MSR_IA32_MISC_ENABLE or when maximum available pstate
 *			matches the maximum turbo pstate
 * @max_perf_pct:	Effective maximum performance limit in percentage, this
 *			is minimum of either limits enforced by cpufreq policy
 *			or limits from user set limits via intel_pstate sysfs
 * @min_perf_pct:	Effective minimum performance limit in percentage, this
 *			is maximum of either limits enforced by cpufreq policy
 *			or limits from user set limits via intel_pstate sysfs
 * @max_perf:		This is a scaled value between 0 to 255 for max_perf_pct
 *			This value is used to limit max pstate
 * @min_perf:		This is a scaled value between 0 to 255 for min_perf_pct
 *			This value is used to limit min pstate
 * @max_policy_pct:	The maximum performance in percentage enforced by
 *			cpufreq setpolicy interface
 * @max_sysfs_pct:	The maximum performance in percentage enforced by
 *			intel pstate sysfs interface, unused when per cpu
 *			controls are enforced
 * @min_policy_pct:	The minimum performance in percentage enforced by
 *			cpufreq setpolicy interface
 * @min_sysfs_pct:	The minimum performance in percentage enforced by
 *			intel pstate sysfs interface, unused when per cpu
 *			controls are enforced
 *
 * Storage for user and policy defined limits.
 */
struct perf_limits {
	int no_turbo;
	int turbo_disabled;
	int max_perf_pct;
	int min_perf_pct;
	int32_t max_perf;
	int32_t min_perf;
	int max_policy_pct;
	int max_sysfs_pct;
	int min_policy_pct;
	int min_sysfs_pct;
};

229 230 231
/**
 * struct cpudata -	Per CPU instance data storage
 * @cpu:		CPU number for this instance data
232
 * @policy:		CPUFreq policy value
233
 * @update_util:	CPUFreq utility callback information
234
 * @update_util_set:	CPUFreq utility callback is set
235 236
 * @iowait_boost:	iowait-related boost fraction
 * @last_update:	Time of the last update.
237 238 239 240 241 242 243 244 245 246
 * @pstate:		Stores P state limits for this CPU
 * @vid:		Stores VID limits for this CPU
 * @pid:		Stores PID parameters for this CPU
 * @last_sample_time:	Last Sample time
 * @prev_aperf:		Last APERF value read from APERF MSR
 * @prev_mperf:		Last MPERF value read from MPERF MSR
 * @prev_tsc:		Last timestamp counter (TSC) value
 * @prev_cummulative_iowait: IO Wait time difference from last and
 *			current sample
 * @sample:		Storage for storing last Sample data
247 248 249
 * @perf_limits:	Pointer to perf_limit unique to this CPU
 *			Not all field in the structure are applicable
 *			when per cpu controls are enforced
250 251
 * @acpi_perf_data:	Stores ACPI perf information read from _PSS
 * @valid_pss_table:	Set to true for valid ACPI _PSS entries found
252 253 254
 * @epp_powersave:	Last saved HWP energy performance preference
 *			(EPP) or energy performance bias (EPB),
 *			when policy switched to performance
255
 * @epp_policy:		Last saved policy used to set EPP/EPB
256 257 258 259
 * @epp_default:	Power on default HWP energy performance
 *			preference/bias
 * @epp_saved:		Saved EPP/EPB during system suspend or CPU offline
 *			operation
260 261 262
 *
 * This structure stores per CPU instance data for all CPUs.
 */
263 264 265
struct cpudata {
	int cpu;

266
	unsigned int policy;
267
	struct update_util_data update_util;
268
	bool   update_util_set;
269 270

	struct pstate_data pstate;
271
	struct vid_data vid;
272 273
	struct _pid pid;

274
	u64	last_update;
275
	u64	last_sample_time;
276 277
	u64	prev_aperf;
	u64	prev_mperf;
278
	u64	prev_tsc;
279
	u64	prev_cummulative_iowait;
280
	struct sample sample;
281
	struct perf_limits *perf_limits;
282 283 284 285
#ifdef CONFIG_ACPI
	struct acpi_processor_performance acpi_perf_data;
	bool valid_pss_table;
#endif
286
	unsigned int iowait_boost;
287
	s16 epp_powersave;
288
	s16 epp_policy;
289 290
	s16 epp_default;
	s16 epp_saved;
291 292 293
};

static struct cpudata **all_cpu_data;
294 295

/**
296
 * struct pstate_adjust_policy - Stores static PID configuration data
297 298 299 300 301 302 303 304 305 306
 * @sample_rate_ms:	PID calculation sample rate in ms
 * @sample_rate_ns:	Sample rate calculation in ns
 * @deadband:		PID deadband
 * @setpoint:		PID Setpoint
 * @p_gain_pct:		PID proportional gain
 * @i_gain_pct:		PID integral gain
 * @d_gain_pct:		PID derivative gain
 *
 * Stores per CPU model static PID configuration data.
 */
307 308
struct pstate_adjust_policy {
	int sample_rate_ms;
309
	s64 sample_rate_ns;
310 311 312 313 314 315 316
	int deadband;
	int setpoint;
	int p_gain_pct;
	int d_gain_pct;
	int i_gain_pct;
};

317 318 319 320 321 322 323 324 325 326 327 328 329 330
/**
 * struct pstate_funcs - Per CPU model specific callbacks
 * @get_max:		Callback to get maximum non turbo effective P state
 * @get_max_physical:	Callback to get maximum non turbo physical P state
 * @get_min:		Callback to get minimum P state
 * @get_turbo:		Callback to get turbo P state
 * @get_scaling:	Callback to get frequency scaling factor
 * @get_val:		Callback to convert P state to actual MSR write value
 * @get_vid:		Callback to get VID data for Atom platforms
 * @get_target_pstate:	Callback to a function to calculate next P state to use
 *
 * Core and Atom CPU models have different way to get P State limits. This
 * structure is used to store those callbacks.
 */
331 332
struct pstate_funcs {
	int (*get_max)(void);
333
	int (*get_max_physical)(void);
334 335
	int (*get_min)(void);
	int (*get_turbo)(void);
336
	int (*get_scaling)(void);
337
	u64 (*get_val)(struct cpudata*, int pstate);
338
	void (*get_vid)(struct cpudata *);
339
	int32_t (*get_target_pstate)(struct cpudata *);
340 341
};

342 343 344 345 346
/**
 * struct cpu_defaults- Per CPU model default config data
 * @pid_policy:	PID config data
 * @funcs:		Callback function data
 */
347 348 349
struct cpu_defaults {
	struct pstate_adjust_policy pid_policy;
	struct pstate_funcs funcs;
350 351
};

352
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu);
353
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu);
354

355 356 357
static struct pstate_adjust_policy pid_params __read_mostly;
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
358
static bool per_cpu_limits __read_mostly;
359

360 361 362
#ifdef CONFIG_ACPI
static bool acpi_ppc;
#endif
363

364 365 366 367
static struct perf_limits performance_limits = {
	.no_turbo = 0,
	.turbo_disabled = 0,
	.max_perf_pct = 100,
368
	.max_perf = int_ext_tofp(1),
369
	.min_perf_pct = 100,
370
	.min_perf = int_ext_tofp(1),
371 372 373 374 375 376 377
	.max_policy_pct = 100,
	.max_sysfs_pct = 100,
	.min_policy_pct = 0,
	.min_sysfs_pct = 0,
};

static struct perf_limits powersave_limits = {
378
	.no_turbo = 0,
379
	.turbo_disabled = 0,
380
	.max_perf_pct = 100,
381
	.max_perf = int_ext_tofp(1),
382 383
	.min_perf_pct = 0,
	.min_perf = 0,
384 385
	.max_policy_pct = 100,
	.max_sysfs_pct = 100,
386 387
	.min_policy_pct = 0,
	.min_sysfs_pct = 0,
388 389
};

390 391 392 393 394 395
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
static struct perf_limits *limits = &performance_limits;
#else
static struct perf_limits *limits = &powersave_limits;
#endif

396 397
static DEFINE_MUTEX(intel_pstate_limits_lock);

398
#ifdef CONFIG_ACPI
399 400 401 402 403 404 405 406 407 408

static bool intel_pstate_get_ppc_enable_status(void)
{
	if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
	    acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
		return true;

	return acpi_ppc;
}

409 410 411 412 413 414
static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
{
	struct cpudata *cpu;
	int ret;
	int i;

415 416 417
	if (hwp_active)
		return;

418
	if (!intel_pstate_get_ppc_enable_status())
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
		return;

	cpu = all_cpu_data[policy->cpu];

	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
						  policy->cpu);
	if (ret)
		return;

	/*
	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
	 * guarantee that the states returned by it map to the states in our
	 * list directly.
	 */
	if (cpu->acpi_perf_data.control_register.space_id !=
						ACPI_ADR_SPACE_FIXED_HARDWARE)
		goto err;

	/*
	 * If there is only one entry _PSS, simply ignore _PSS and continue as
	 * usual without taking _PSS into account
	 */
	if (cpu->acpi_perf_data.state_count < 2)
		goto err;

	pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
			 (u32) cpu->acpi_perf_data.states[i].power,
			 (u32) cpu->acpi_perf_data.states[i].control);
	}

	/*
	 * The _PSS table doesn't contain whole turbo frequency range.
	 * This just contains +1 MHZ above the max non turbo frequency,
	 * with control value corresponding to max turbo ratio. But
	 * when cpufreq set policy is called, it will call with this
	 * max frequency, which will cause a reduced performance as
	 * this driver uses real max turbo frequency as the max
	 * frequency. So correct this frequency in _PSS table to
461
	 * correct max turbo frequency based on the turbo state.
462 463
	 * Also need to convert to MHz as _PSS freq is in MHz.
	 */
464
	if (!limits->turbo_disabled)
465 466 467
		cpu->acpi_perf_data.states[0].core_frequency =
					policy->cpuinfo.max_freq / 1000;
	cpu->valid_pss_table = true;
468
	pr_debug("_PPC limits will be enforced\n");
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488

	return;

 err:
	cpu->valid_pss_table = false;
	acpi_processor_unregister_performance(policy->cpu);
}

static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
{
	struct cpudata *cpu;

	cpu = all_cpu_data[policy->cpu];
	if (!cpu->valid_pss_table)
		return;

	acpi_processor_unregister_performance(policy->cpu);
}

#else
489
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
490 491 492
{
}

493
static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
494 495 496 497
{
}
#endif

498
static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
499
			     int deadband, int integral) {
500 501
	pid->setpoint = int_tofp(setpoint);
	pid->deadband  = int_tofp(deadband);
502
	pid->integral  = int_tofp(integral);
503
	pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
504 505 506 507
}

static inline void pid_p_gain_set(struct _pid *pid, int percent)
{
508
	pid->p_gain = div_fp(percent, 100);
509 510 511 512
}

static inline void pid_i_gain_set(struct _pid *pid, int percent)
{
513
	pid->i_gain = div_fp(percent, 100);
514 515 516 517
}

static inline void pid_d_gain_set(struct _pid *pid, int percent)
{
518
	pid->d_gain = div_fp(percent, 100);
519 520
}

521
static signed int pid_calc(struct _pid *pid, int32_t busy)
522
{
523
	signed int result;
524 525 526
	int32_t pterm, dterm, fp_error;
	int32_t integral_limit;

527
	fp_error = pid->setpoint - busy;
528

529
	if (abs(fp_error) <= pid->deadband)
530 531 532 533 534 535
		return 0;

	pterm = mul_fp(pid->p_gain, fp_error);

	pid->integral += fp_error;

536 537 538 539 540 541 542 543
	/*
	 * We limit the integral here so that it will never
	 * get higher than 30.  This prevents it from becoming
	 * too large an input over long periods of time and allows
	 * it to get factored out sooner.
	 *
	 * The value of 30 was chosen through experimentation.
	 */
544 545 546 547 548 549
	integral_limit = int_tofp(30);
	if (pid->integral > integral_limit)
		pid->integral = integral_limit;
	if (pid->integral < -integral_limit)
		pid->integral = -integral_limit;

550 551
	dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
	pid->last_err = fp_error;
552 553

	result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
554
	result = result + (1 << (FRAC_BITS-1));
555 556 557 558 559
	return (signed int)fp_toint(result);
}

static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
{
560 561 562
	pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
	pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
	pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
563

564
	pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
565 566 567 568 569
}

static inline void intel_pstate_reset_all_pid(void)
{
	unsigned int cpu;
570

571 572 573 574 575 576
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu])
			intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
	}
}

577 578 579 580 581 582 583
static inline void update_turbo_state(void)
{
	u64 misc_en;
	struct cpudata *cpu;

	cpu = all_cpu_data[0];
	rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
584
	limits->turbo_disabled =
585 586 587 588
		(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
		 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
{
	u64 epb;
	int ret;

	if (!static_cpu_has(X86_FEATURE_EPB))
		return -ENXIO;

	ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
	if (ret)
		return (s16)ret;

	return (s16)(epb & 0x0f);
}

static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
	s16 epp;

608 609 610 611 612 613 614 615 616 617 618
	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
		/*
		 * When hwp_req_data is 0, means that caller didn't read
		 * MSR_HWP_REQUEST, so need to read and get EPP.
		 */
		if (!hwp_req_data) {
			epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
					    &hwp_req_data);
			if (epp)
				return epp;
		}
619
		epp = (hwp_req_data >> 24) & 0xff;
620
	} else {
621 622
		/* When there is no EPP present, HWP uses EPB settings */
		epp = intel_pstate_get_epb(cpu_data);
623
	}
624 625 626 627

	return epp;
}

628
static int intel_pstate_set_epb(int cpu, s16 pref)
629 630
{
	u64 epb;
631
	int ret;
632 633

	if (!static_cpu_has(X86_FEATURE_EPB))
634
		return -ENXIO;
635

636 637 638
	ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
	if (ret)
		return ret;
639 640 641

	epb = (epb & ~0x0f) | pref;
	wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
642 643

	return 0;
644 645
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/*
 * EPP/EPB display strings corresponding to EPP index in the
 * energy_perf_strings[]
 *	index		String
 *-------------------------------------
 *	0		default
 *	1		performance
 *	2		balance_performance
 *	3		balance_power
 *	4		power
 */
static const char * const energy_perf_strings[] = {
	"default",
	"performance",
	"balance_performance",
	"balance_power",
	"power",
	NULL
};

static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data)
{
	s16 epp;
	int index = -EINVAL;

	epp = intel_pstate_get_epp(cpu_data, 0);
	if (epp < 0)
		return epp;

	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
		/*
		 * Range:
		 *	0x00-0x3F	:	Performance
		 *	0x40-0x7F	:	Balance performance
		 *	0x80-0xBF	:	Balance power
		 *	0xC0-0xFF	:	Power
		 * The EPP is a 8 bit value, but our ranges restrict the
		 * value which can be set. Here only using top two bits
		 * effectively.
		 */
		index = (epp >> 6) + 1;
	} else if (static_cpu_has(X86_FEATURE_EPB)) {
		/*
		 * Range:
		 *	0x00-0x03	:	Performance
		 *	0x04-0x07	:	Balance performance
		 *	0x08-0x0B	:	Balance power
		 *	0x0C-0x0F	:	Power
		 * The EPB is a 4 bit value, but our ranges restrict the
		 * value which can be set. Here only using top two bits
		 * effectively.
		 */
		index = (epp >> 2) + 1;
	}

	return index;
}

static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
					      int pref_index)
{
	int epp = -EINVAL;
	int ret;

	if (!pref_index)
		epp = cpu_data->epp_default;

	mutex_lock(&intel_pstate_limits_lock);

	if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
		u64 value;

		ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
		if (ret)
			goto return_pref;

		value &= ~GENMASK_ULL(31, 24);

		/*
		 * If epp is not default, convert from index into
		 * energy_perf_strings to epp value, by shifting 6
		 * bits left to use only top two bits in epp.
		 * The resultant epp need to shifted by 24 bits to
		 * epp position in MSR_HWP_REQUEST.
		 */
		if (epp == -EINVAL)
			epp = (pref_index - 1) << 6;

		value |= (u64)epp << 24;
		ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
	} else {
		if (epp == -EINVAL)
			epp = (pref_index - 1) << 2;
		ret = intel_pstate_set_epb(cpu_data->cpu, epp);
	}
return_pref:
	mutex_unlock(&intel_pstate_limits_lock);

	return ret;
}

static ssize_t show_energy_performance_available_preferences(
				struct cpufreq_policy *policy, char *buf)
{
	int i = 0;
	int ret = 0;

	while (energy_perf_strings[i] != NULL)
		ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);

	ret += sprintf(&buf[ret], "\n");

	return ret;
}

cpufreq_freq_attr_ro(energy_performance_available_preferences);

static ssize_t store_energy_performance_preference(
		struct cpufreq_policy *policy, const char *buf, size_t count)
{
	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
	char str_preference[21];
	int ret, i = 0;

	ret = sscanf(buf, "%20s", str_preference);
	if (ret != 1)
		return -EINVAL;

	while (energy_perf_strings[i] != NULL) {
		if (!strcmp(str_preference, energy_perf_strings[i])) {
			intel_pstate_set_energy_pref_index(cpu_data, i);
			return count;
		}
		++i;
	}

	return -EINVAL;
}

static ssize_t show_energy_performance_preference(
				struct cpufreq_policy *policy, char *buf)
{
	struct cpudata *cpu_data = all_cpu_data[policy->cpu];
	int preference;

	preference = intel_pstate_get_energy_pref_index(cpu_data);
	if (preference < 0)
		return preference;

	return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
}

cpufreq_freq_attr_rw(energy_performance_preference);

static struct freq_attr *hwp_cpufreq_attrs[] = {
	&energy_performance_preference,
	&energy_performance_available_preferences,
	NULL,
};

806
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
D
Dirk Brandewie 已提交
807
{
808
	int min, hw_min, max, hw_max, cpu, range, adj_range;
809
	struct perf_limits *perf_limits = limits;
810 811
	u64 value, cap;

812
	for_each_cpu(cpu, cpumask) {
813
		int max_perf_pct, min_perf_pct;
814 815
		struct cpudata *cpu_data = all_cpu_data[cpu];
		s16 epp;
816 817 818 819

		if (per_cpu_limits)
			perf_limits = all_cpu_data[cpu]->perf_limits;

820 821 822 823 824
		rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
		hw_min = HWP_LOWEST_PERF(cap);
		hw_max = HWP_HIGHEST_PERF(cap);
		range = hw_max - hw_min;

825 826 827
		max_perf_pct = perf_limits->max_perf_pct;
		min_perf_pct = perf_limits->min_perf_pct;

D
Dirk Brandewie 已提交
828
		rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
829
		adj_range = min_perf_pct * range / 100;
830
		min = hw_min + adj_range;
D
Dirk Brandewie 已提交
831 832 833
		value &= ~HWP_MIN_PERF(~0L);
		value |= HWP_MIN_PERF(min);

834
		adj_range = max_perf_pct * range / 100;
835
		max = hw_min + adj_range;
836
		if (limits->no_turbo) {
837 838 839
			hw_max = HWP_GUARANTEED_PERF(cap);
			if (hw_max < max)
				max = hw_max;
D
Dirk Brandewie 已提交
840 841 842 843
		}

		value &= ~HWP_MAX_PERF(~0L);
		value |= HWP_MAX_PERF(max);
844 845 846 847 848 849

		if (cpu_data->epp_policy == cpu_data->policy)
			goto skip_epp;

		cpu_data->epp_policy = cpu_data->policy;

850 851 852 853 854 855
		if (cpu_data->epp_saved >= 0) {
			epp = cpu_data->epp_saved;
			cpu_data->epp_saved = -EINVAL;
			goto update_epp;
		}

856 857
		if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
			epp = intel_pstate_get_epp(cpu_data, value);
858
			cpu_data->epp_powersave = epp;
859
			/* If EPP read was failed, then don't try to write */
860
			if (epp < 0)
861 862 863 864 865 866
				goto skip_epp;


			epp = 0;
		} else {
			/* skip setting EPP, when saved value is invalid */
867
			if (cpu_data->epp_powersave < 0)
868 869 870 871 872 873 874 875 876 877 878 879 880
				goto skip_epp;

			/*
			 * No need to restore EPP when it is not zero. This
			 * means:
			 *  - Policy is not changed
			 *  - user has manually changed
			 *  - Error reading EPB
			 */
			epp = intel_pstate_get_epp(cpu_data, value);
			if (epp)
				goto skip_epp;

881
			epp = cpu_data->epp_powersave;
882
		}
883
update_epp:
884 885 886 887 888 889 890
		if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
			value &= ~GENMASK_ULL(31, 24);
			value |= (u64)epp << 24;
		} else {
			intel_pstate_set_epb(cpu, epp);
		}
skip_epp:
D
Dirk Brandewie 已提交
891 892
		wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
	}
893
}
D
Dirk Brandewie 已提交
894

895 896 897 898 899 900 901 902
static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
{
	if (hwp_active)
		intel_pstate_hwp_set(policy->cpus);

	return 0;
}

903 904 905 906 907 908 909 910 911 912 913 914
static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
{
	struct cpudata *cpu_data = all_cpu_data[policy->cpu];

	if (!hwp_active)
		return 0;

	cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);

	return 0;
}

915 916 917 918 919 920 921 922 923 924
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
	if (!hwp_active)
		return 0;

	all_cpu_data[policy->cpu]->epp_policy = 0;

	return intel_pstate_hwp_set_policy(policy);
}

925 926 927 928
static void intel_pstate_hwp_set_online_cpus(void)
{
	get_online_cpus();
	intel_pstate_hwp_set(cpu_online_mask);
D
Dirk Brandewie 已提交
929 930 931
	put_online_cpus();
}

932 933 934 935 936 937 938
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
	*(u32 *)data = val;
	intel_pstate_reset_all_pid();
	return 0;
}
939

940 941 942 943 944
static int pid_param_get(void *data, u64 *val)
{
	*val = *(u32 *)data;
	return 0;
}
945
DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
946 947 948 949 950 951 952

struct pid_param {
	char *name;
	void *value;
};

static struct pid_param pid_files[] = {
953 954 955 956 957 958
	{"sample_rate_ms", &pid_params.sample_rate_ms},
	{"d_gain_pct", &pid_params.d_gain_pct},
	{"i_gain_pct", &pid_params.i_gain_pct},
	{"deadband", &pid_params.deadband},
	{"setpoint", &pid_params.setpoint},
	{"p_gain_pct", &pid_params.p_gain_pct},
959 960 961
	{NULL, NULL}
};

962
static void __init intel_pstate_debug_expose_params(void)
963
{
964
	struct dentry *debugfs_parent;
965 966
	int i = 0;

967 968
	if (hwp_active ||
	    pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load)
D
Dirk Brandewie 已提交
969
		return;
970

971 972 973 974 975
	debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
	if (IS_ERR_OR_NULL(debugfs_parent))
		return;
	while (pid_files[i].name) {
		debugfs_create_file(pid_files[i].name, 0660,
976 977
				    debugfs_parent, pid_files[i].value,
				    &fops_pid_param);
978 979 980 981 982 983 984 985 986 987 988
		i++;
	}
}

/************************** debugfs end ************************/

/************************** sysfs begin ************************/
#define show_one(file_name, object)					\
	static ssize_t show_##file_name					\
	(struct kobject *kobj, struct attribute *attr, char *buf)	\
	{								\
989
		return sprintf(buf, "%u\n", limits->object);		\
990 991
	}

992 993 994 995 996 997 998 999 1000 1001 1002
static ssize_t show_turbo_pct(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	struct cpudata *cpu;
	int total, no_turbo, turbo_pct;
	uint32_t turbo_fp;

	cpu = all_cpu_data[0];

	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
	no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1003
	turbo_fp = div_fp(no_turbo, total);
1004 1005 1006 1007
	turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
	return sprintf(buf, "%u\n", turbo_pct);
}

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
static ssize_t show_num_pstates(struct kobject *kobj,
				struct attribute *attr, char *buf)
{
	struct cpudata *cpu;
	int total;

	cpu = all_cpu_data[0];
	total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
	return sprintf(buf, "%u\n", total);
}

1019 1020 1021 1022 1023 1024
static ssize_t show_no_turbo(struct kobject *kobj,
			     struct attribute *attr, char *buf)
{
	ssize_t ret;

	update_turbo_state();
1025 1026
	if (limits->turbo_disabled)
		ret = sprintf(buf, "%u\n", limits->turbo_disabled);
1027
	else
1028
		ret = sprintf(buf, "%u\n", limits->no_turbo);
1029 1030 1031 1032

	return ret;
}

1033
static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
1034
			      const char *buf, size_t count)
1035 1036 1037
{
	unsigned int input;
	int ret;
1038

1039 1040 1041
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
1042

1043 1044
	mutex_lock(&intel_pstate_limits_lock);

1045
	update_turbo_state();
1046
	if (limits->turbo_disabled) {
J
Joe Perches 已提交
1047
		pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
1048
		mutex_unlock(&intel_pstate_limits_lock);
1049
		return -EPERM;
1050
	}
D
Dirk Brandewie 已提交
1051

1052
	limits->no_turbo = clamp_t(int, input, 0, 1);
1053

D
Dirk Brandewie 已提交
1054
	if (hwp_active)
1055
		intel_pstate_hwp_set_online_cpus();
D
Dirk Brandewie 已提交
1056

1057 1058
	mutex_unlock(&intel_pstate_limits_lock);

1059 1060 1061 1062
	return count;
}

static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
1063
				  const char *buf, size_t count)
1064 1065 1066
{
	unsigned int input;
	int ret;
1067

1068 1069 1070 1071
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

1072 1073
	mutex_lock(&intel_pstate_limits_lock);

1074 1075 1076 1077 1078 1079 1080
	limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
	limits->max_perf_pct = min(limits->max_policy_pct,
				   limits->max_sysfs_pct);
	limits->max_perf_pct = max(limits->min_policy_pct,
				   limits->max_perf_pct);
	limits->max_perf_pct = max(limits->min_perf_pct,
				   limits->max_perf_pct);
1081
	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
1082

D
Dirk Brandewie 已提交
1083
	if (hwp_active)
1084
		intel_pstate_hwp_set_online_cpus();
1085 1086 1087

	mutex_unlock(&intel_pstate_limits_lock);

1088 1089 1090 1091
	return count;
}

static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
1092
				  const char *buf, size_t count)
1093 1094 1095
{
	unsigned int input;
	int ret;
1096

1097 1098 1099
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
1100

1101 1102
	mutex_lock(&intel_pstate_limits_lock);

1103 1104 1105 1106 1107 1108 1109
	limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
	limits->min_perf_pct = max(limits->min_policy_pct,
				   limits->min_sysfs_pct);
	limits->min_perf_pct = min(limits->max_policy_pct,
				   limits->min_perf_pct);
	limits->min_perf_pct = min(limits->max_perf_pct,
				   limits->min_perf_pct);
1110
	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
1111

D
Dirk Brandewie 已提交
1112
	if (hwp_active)
1113
		intel_pstate_hwp_set_online_cpus();
1114 1115 1116

	mutex_unlock(&intel_pstate_limits_lock);

1117 1118 1119 1120 1121 1122 1123 1124 1125
	return count;
}

show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);

define_one_global_rw(no_turbo);
define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);
1126
define_one_global_ro(turbo_pct);
1127
define_one_global_ro(num_pstates);
1128 1129 1130

static struct attribute *intel_pstate_attributes[] = {
	&no_turbo.attr,
1131
	&turbo_pct.attr,
1132
	&num_pstates.attr,
1133 1134 1135 1136 1137 1138 1139
	NULL
};

static struct attribute_group intel_pstate_attr_group = {
	.attrs = intel_pstate_attributes,
};

1140
static void __init intel_pstate_sysfs_expose_params(void)
1141
{
1142
	struct kobject *intel_pstate_kobject;
1143 1144 1145 1146
	int rc;

	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
						&cpu_subsys.dev_root->kobj);
1147 1148 1149
	if (WARN_ON(!intel_pstate_kobject))
		return;

1150
	rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
	if (WARN_ON(rc))
		return;

	/*
	 * If per cpu limits are enforced there are no global limits, so
	 * return without creating max/min_perf_pct attributes
	 */
	if (per_cpu_limits)
		return;

	rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
	WARN_ON(rc);

	rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
	WARN_ON(rc);

1167 1168
}
/************************** sysfs end ************************/
D
Dirk Brandewie 已提交
1169

1170
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
D
Dirk Brandewie 已提交
1171
{
1172
	/* First disable HWP notification interrupt as we don't process them */
1173 1174
	if (static_cpu_has(X86_FEATURE_HWP_NOTIFY))
		wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1175

1176
	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1177
	cpudata->epp_policy = 0;
1178 1179
	if (cpudata->epp_default == -EINVAL)
		cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
D
Dirk Brandewie 已提交
1180 1181
}

1182
static int atom_get_min_pstate(void)
1183 1184
{
	u64 value;
1185

1186
	rdmsrl(ATOM_RATIOS, value);
D
Dirk Brandewie 已提交
1187
	return (value >> 8) & 0x7F;
1188 1189
}

1190
static int atom_get_max_pstate(void)
1191 1192
{
	u64 value;
1193

1194
	rdmsrl(ATOM_RATIOS, value);
D
Dirk Brandewie 已提交
1195
	return (value >> 16) & 0x7F;
1196
}
1197

1198
static int atom_get_turbo_pstate(void)
1199 1200
{
	u64 value;
1201

1202
	rdmsrl(ATOM_TURBO_RATIOS, value);
D
Dirk Brandewie 已提交
1203
	return value & 0x7F;
1204 1205
}

1206
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1207 1208 1209 1210 1211
{
	u64 val;
	int32_t vid_fp;
	u32 vid;

1212
	val = (u64)pstate << 8;
1213
	if (limits->no_turbo && !limits->turbo_disabled)
1214 1215 1216 1217 1218 1219 1220
		val |= (u64)1 << 32;

	vid_fp = cpudata->vid.min + mul_fp(
		int_tofp(pstate - cpudata->pstate.min_pstate),
		cpudata->vid.ratio);

	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1221
	vid = ceiling_fp(vid_fp);
1222

1223 1224 1225
	if (pstate > cpudata->pstate.max_pstate)
		vid = cpudata->vid.turbo;

1226
	return val | vid;
1227 1228
}

1229
static int silvermont_get_scaling(void)
1230 1231 1232
{
	u64 value;
	int i;
1233 1234 1235
	/* Defined in Table 35-6 from SDM (Sept 2015) */
	static int silvermont_freq_table[] = {
		83300, 100000, 133300, 116700, 80000};
1236 1237

	rdmsrl(MSR_FSB_FREQ, value);
1238 1239
	i = value & 0x7;
	WARN_ON(i > 4);
1240

1241 1242
	return silvermont_freq_table[i];
}
1243

1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
static int airmont_get_scaling(void)
{
	u64 value;
	int i;
	/* Defined in Table 35-10 from SDM (Sept 2015) */
	static int airmont_freq_table[] = {
		83300, 100000, 133300, 116700, 80000,
		93300, 90000, 88900, 87500};

	rdmsrl(MSR_FSB_FREQ, value);
	i = value & 0xF;
	WARN_ON(i > 8);

	return airmont_freq_table[i];
1258 1259
}

1260
static void atom_get_vid(struct cpudata *cpudata)
1261 1262 1263
{
	u64 value;

1264
	rdmsrl(ATOM_VIDS, value);
D
Dirk Brandewie 已提交
1265 1266
	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1267 1268 1269 1270
	cpudata->vid.ratio = div_fp(
		cpudata->vid.max - cpudata->vid.min,
		int_tofp(cpudata->pstate.max_pstate -
			cpudata->pstate.min_pstate));
1271

1272
	rdmsrl(ATOM_TURBO_VIDS, value);
1273
	cpudata->vid.turbo = value & 0x7f;
1274 1275
}

1276
static int core_get_min_pstate(void)
1277 1278
{
	u64 value;
1279

1280
	rdmsrl(MSR_PLATFORM_INFO, value);
1281 1282 1283
	return (value >> 40) & 0xFF;
}

1284
static int core_get_max_pstate_physical(void)
1285 1286
{
	u64 value;
1287

1288
	rdmsrl(MSR_PLATFORM_INFO, value);
1289 1290 1291
	return (value >> 8) & 0xFF;
}

1292
static int core_get_max_pstate(void)
1293
{
1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
	u64 tar;
	u64 plat_info;
	int max_pstate;
	int err;

	rdmsrl(MSR_PLATFORM_INFO, plat_info);
	max_pstate = (plat_info >> 8) & 0xFF;

	err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
	if (!err) {
		/* Do some sanity checking for safety */
		if (plat_info & 0x600000000) {
			u64 tdp_ctrl;
			u64 tdp_ratio;
			int tdp_msr;

			err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
			if (err)
				goto skip_tar;

1314
			tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
1315 1316 1317 1318
			err = rdmsrl_safe(tdp_msr, &tdp_ratio);
			if (err)
				goto skip_tar;

1319 1320 1321 1322 1323
			/* For level 1 and 2, bits[23:16] contain the ratio */
			if (tdp_ctrl)
				tdp_ratio >>= 16;

			tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1324 1325 1326 1327 1328 1329 1330 1331
			if (tdp_ratio - 1 == tar) {
				max_pstate = tar;
				pr_debug("max_pstate=TAC %x\n", max_pstate);
			} else {
				goto skip_tar;
			}
		}
	}
1332

1333 1334
skip_tar:
	return max_pstate;
1335 1336
}

1337
static int core_get_turbo_pstate(void)
1338 1339 1340
{
	u64 value;
	int nont, ret;
1341

1342
	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1343
	nont = core_get_max_pstate();
1344
	ret = (value) & 255;
1345 1346 1347 1348 1349
	if (ret <= nont)
		ret = nont;
	return ret;
}

1350 1351 1352 1353 1354
static inline int core_get_scaling(void)
{
	return 100000;
}

1355
static u64 core_get_val(struct cpudata *cpudata, int pstate)
1356 1357 1358
{
	u64 val;

1359
	val = (u64)pstate << 8;
1360
	if (limits->no_turbo && !limits->turbo_disabled)
1361 1362
		val |= (u64)1 << 32;

1363
	return val;
1364 1365
}

1366 1367 1368 1369 1370
static int knl_get_turbo_pstate(void)
{
	u64 value;
	int nont, ret;

1371
	rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1372 1373 1374 1375 1376 1377 1378
	nont = core_get_max_pstate();
	ret = (((value) >> 8) & 0xFF);
	if (ret <= nont)
		ret = nont;
	return ret;
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
static struct cpu_defaults core_params = {
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 97,
		.p_gain_pct = 20,
		.d_gain_pct = 0,
		.i_gain_pct = 0,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
1390
		.get_max_physical = core_get_max_pstate_physical,
1391 1392
		.get_min = core_get_min_pstate,
		.get_turbo = core_get_turbo_pstate,
1393
		.get_scaling = core_get_scaling,
1394
		.get_val = core_get_val,
1395
		.get_target_pstate = get_target_pstate_use_performance,
1396 1397 1398
	},
};

1399
static const struct cpu_defaults silvermont_params = {
1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 60,
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
		.get_max = atom_get_max_pstate,
		.get_max_physical = atom_get_max_pstate,
		.get_min = atom_get_min_pstate,
		.get_turbo = atom_get_turbo_pstate,
1413
		.get_val = atom_get_val,
1414 1415
		.get_scaling = silvermont_get_scaling,
		.get_vid = atom_get_vid,
1416
		.get_target_pstate = get_target_pstate_use_cpu_load,
1417 1418 1419
	},
};

1420
static const struct cpu_defaults airmont_params = {
1421 1422 1423
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
1424
		.setpoint = 60,
1425 1426 1427 1428 1429
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
1430 1431 1432 1433
		.get_max = atom_get_max_pstate,
		.get_max_physical = atom_get_max_pstate,
		.get_min = atom_get_min_pstate,
		.get_turbo = atom_get_turbo_pstate,
1434
		.get_val = atom_get_val,
1435
		.get_scaling = airmont_get_scaling,
1436
		.get_vid = atom_get_vid,
1437
		.get_target_pstate = get_target_pstate_use_cpu_load,
1438 1439 1440
	},
};

1441
static const struct cpu_defaults knl_params = {
1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 97,
		.p_gain_pct = 20,
		.d_gain_pct = 0,
		.i_gain_pct = 0,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
1452
		.get_max_physical = core_get_max_pstate_physical,
1453 1454
		.get_min = core_get_min_pstate,
		.get_turbo = knl_get_turbo_pstate,
1455
		.get_scaling = core_get_scaling,
1456
		.get_val = core_get_val,
1457
		.get_target_pstate = get_target_pstate_use_performance,
1458 1459 1460
	},
};

1461
static const struct cpu_defaults bxt_params = {
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 60,
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
		.get_max_physical = core_get_max_pstate_physical,
		.get_min = core_get_min_pstate,
		.get_turbo = core_get_turbo_pstate,
		.get_scaling = core_get_scaling,
		.get_val = core_get_val,
		.get_target_pstate = get_target_pstate_use_cpu_load,
	},
};

1481 1482 1483
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
	int max_perf = cpu->pstate.turbo_pstate;
1484
	int max_perf_adj;
1485
	int min_perf;
1486
	struct perf_limits *perf_limits = limits;
1487

1488
	if (limits->no_turbo || limits->turbo_disabled)
1489 1490
		max_perf = cpu->pstate.max_pstate;

1491 1492 1493
	if (per_cpu_limits)
		perf_limits = cpu->perf_limits;

1494 1495 1496 1497 1498
	/*
	 * performance can be limited by user through sysfs, by cpufreq
	 * policy, or by cpu specific default values determined through
	 * experimentation.
	 */
1499
	max_perf_adj = fp_ext_toint(max_perf * perf_limits->max_perf);
1500 1501
	*max = clamp_t(int, max_perf_adj,
			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
1502

1503
	min_perf = fp_ext_toint(max_perf * perf_limits->min_perf);
1504
	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
1505 1506
}

1507
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1508
{
1509 1510
	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
	cpu->pstate.current_pstate = pstate;
1511 1512 1513 1514 1515 1516 1517
	/*
	 * Generally, there is no guarantee that this code will always run on
	 * the CPU being updated, so force the register update to run on the
	 * right CPU.
	 */
	wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
		      pstate_funcs.get_val(cpu, pstate));
1518 1519
}

1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533
static void intel_pstate_set_min_pstate(struct cpudata *cpu)
{
	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
}

static void intel_pstate_max_within_limits(struct cpudata *cpu)
{
	int min_pstate, max_pstate;

	update_turbo_state();
	intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
	intel_pstate_set_pstate(cpu, max_pstate);
}

1534 1535
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
1536 1537
	cpu->pstate.min_pstate = pstate_funcs.get_min();
	cpu->pstate.max_pstate = pstate_funcs.get_max();
1538
	cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
1539
	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1540
	cpu->pstate.scaling = pstate_funcs.get_scaling();
1541 1542
	cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
	cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1543

1544 1545
	if (pstate_funcs.get_vid)
		pstate_funcs.get_vid(cpu);
1546 1547

	intel_pstate_set_min_pstate(cpu);
1548 1549
}

1550
static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1551
{
1552
	struct sample *sample = &cpu->sample;
1553

1554
	sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
1555 1556
}

1557
static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1558 1559
{
	u64 aperf, mperf;
1560
	unsigned long flags;
1561
	u64 tsc;
1562

1563
	local_irq_save(flags);
1564 1565
	rdmsrl(MSR_IA32_APERF, aperf);
	rdmsrl(MSR_IA32_MPERF, mperf);
1566
	tsc = rdtsc();
1567
	if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
1568
		local_irq_restore(flags);
1569
		return false;
1570
	}
1571
	local_irq_restore(flags);
1572

1573
	cpu->last_sample_time = cpu->sample.time;
1574
	cpu->sample.time = time;
1575 1576
	cpu->sample.aperf = aperf;
	cpu->sample.mperf = mperf;
1577
	cpu->sample.tsc =  tsc;
1578 1579
	cpu->sample.aperf -= cpu->prev_aperf;
	cpu->sample.mperf -= cpu->prev_mperf;
1580
	cpu->sample.tsc -= cpu->prev_tsc;
1581

1582 1583
	cpu->prev_aperf = aperf;
	cpu->prev_mperf = mperf;
1584
	cpu->prev_tsc = tsc;
1585 1586 1587 1588 1589 1590 1591 1592
	/*
	 * First time this function is invoked in a given cycle, all of the
	 * previous sample data fields are equal to zero or stale and they must
	 * be populated with meaningful numbers for things to work, so assume
	 * that sample.time will always be reset before setting the utilization
	 * update hook and make the caller skip the sample then.
	 */
	return !!cpu->last_sample_time;
1593 1594
}

1595 1596
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
1597 1598
	return mul_ext_fp(cpu->sample.core_avg_perf,
			  cpu->pstate.max_pstate_physical * cpu->pstate.scaling);
1599 1600
}

1601 1602
static inline int32_t get_avg_pstate(struct cpudata *cpu)
{
1603 1604
	return mul_ext_fp(cpu->pstate.max_pstate_physical,
			  cpu->sample.core_avg_perf);
1605 1606
}

1607 1608 1609
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
	struct sample *sample = &cpu->sample;
1610
	int32_t busy_frac, boost;
1611
	int target, avg_pstate;
1612

1613
	busy_frac = div_fp(sample->mperf, sample->tsc);
1614

1615 1616
	boost = cpu->iowait_boost;
	cpu->iowait_boost >>= 1;
1617

1618 1619
	if (busy_frac < boost)
		busy_frac = boost;
1620

1621
	sample->busy_scaled = busy_frac * 100;
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641

	target = limits->no_turbo || limits->turbo_disabled ?
			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
	target += target >> 2;
	target = mul_fp(target, busy_frac);
	if (target < cpu->pstate.min_pstate)
		target = cpu->pstate.min_pstate;

	/*
	 * If the average P-state during the previous cycle was higher than the
	 * current target, add 50% of the difference to the target to reduce
	 * possible performance oscillations and offset possible performance
	 * loss related to moving the workload from one CPU to another within
	 * a package/module.
	 */
	avg_pstate = get_avg_pstate(cpu);
	if (avg_pstate > target)
		target += (avg_pstate - target) >> 1;

	return target;
1642 1643
}

1644
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
1645
{
1646
	int32_t perf_scaled, max_pstate, current_pstate, sample_ratio;
1647
	u64 duration_ns;
1648

1649
	/*
1650 1651 1652 1653 1654
	 * perf_scaled is the ratio of the average P-state during the last
	 * sampling period to the P-state requested last time (in percent).
	 *
	 * That measures the system's response to the previous P-state
	 * selection.
1655
	 */
1656 1657
	max_pstate = cpu->pstate.max_pstate_physical;
	current_pstate = cpu->pstate.current_pstate;
1658
	perf_scaled = mul_ext_fp(cpu->sample.core_avg_perf,
1659
			       div_fp(100 * max_pstate, current_pstate));
1660

1661
	/*
1662 1663 1664
	 * Since our utilization update callback will not run unless we are
	 * in C0, check if the actual elapsed time is significantly greater (3x)
	 * than our sample interval.  If it is, then we were idle for a long
1665
	 * enough period of time to adjust our performance metric.
1666
	 */
1667
	duration_ns = cpu->sample.time - cpu->last_sample_time;
1668
	if ((s64)duration_ns > pid_params.sample_rate_ns * 3) {
1669
		sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns);
1670
		perf_scaled = mul_fp(perf_scaled, sample_ratio);
1671 1672 1673
	} else {
		sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc);
		if (sample_ratio < int_tofp(1))
1674
			perf_scaled = 0;
1675 1676
	}

1677 1678
	cpu->sample.busy_scaled = perf_scaled;
	return cpu->pstate.current_pstate - pid_calc(&cpu->pid, perf_scaled);
1679 1680
}

1681
static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1682 1683 1684 1685 1686
{
	int max_perf, min_perf;

	intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
	pstate = clamp_t(int, pstate, min_perf, max_perf);
1687
	trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1688 1689 1690 1691 1692 1693
	return pstate;
}

static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{
	pstate = intel_pstate_prepare_request(cpu, pstate);
1694 1695 1696
	if (pstate == cpu->pstate.current_pstate)
		return;

1697
	cpu->pstate.current_pstate = pstate;
1698 1699 1700
	wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
}

1701 1702
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
1703
	int from, target_pstate;
1704 1705 1706
	struct sample *sample;

	from = cpu->pstate.current_pstate;
1707

1708 1709
	target_pstate = cpu->policy == CPUFREQ_POLICY_PERFORMANCE ?
		cpu->pstate.turbo_pstate : pstate_funcs.get_target_pstate(cpu);
1710

1711 1712
	update_turbo_state();

1713
	intel_pstate_update_pstate(cpu, target_pstate);
1714 1715

	sample = &cpu->sample;
1716
	trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
1717
		fp_toint(sample->busy_scaled),
1718 1719 1720 1721 1722
		from,
		cpu->pstate.current_pstate,
		sample->mperf,
		sample->aperf,
		sample->tsc,
1723 1724
		get_avg_frequency(cpu),
		fp_toint(cpu->iowait_boost * 100));
1725 1726
}

1727
static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1728
				     unsigned int flags)
1729
{
1730
	struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1731 1732
	u64 delta_ns;

1733
	if (pstate_funcs.get_target_pstate == get_target_pstate_use_cpu_load) {
1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
		if (flags & SCHED_CPUFREQ_IOWAIT) {
			cpu->iowait_boost = int_tofp(1);
		} else if (cpu->iowait_boost) {
			/* Clear iowait_boost if the CPU may have been idle. */
			delta_ns = time - cpu->last_update;
			if (delta_ns > TICK_NSEC)
				cpu->iowait_boost = 0;
		}
		cpu->last_update = time;
	}
1744

1745
	delta_ns = time - cpu->sample.time;
1746
	if ((s64)delta_ns >= pid_params.sample_rate_ns) {
1747 1748
		bool sample_taken = intel_pstate_sample(cpu, time);

1749
		if (sample_taken) {
1750
			intel_pstate_calc_avg_perf(cpu);
1751 1752 1753
			if (!hwp_active)
				intel_pstate_adjust_busy_pstate(cpu);
		}
1754
	}
1755 1756 1757
}

#define ICPU(model, policy) \
1758 1759
	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
			(unsigned long)&policy }
1760 1761

static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778
	ICPU(INTEL_FAM6_SANDYBRIDGE, 		core_params),
	ICPU(INTEL_FAM6_SANDYBRIDGE_X,		core_params),
	ICPU(INTEL_FAM6_ATOM_SILVERMONT1,	silvermont_params),
	ICPU(INTEL_FAM6_IVYBRIDGE,		core_params),
	ICPU(INTEL_FAM6_HASWELL_CORE,		core_params),
	ICPU(INTEL_FAM6_BROADWELL_CORE,		core_params),
	ICPU(INTEL_FAM6_IVYBRIDGE_X,		core_params),
	ICPU(INTEL_FAM6_HASWELL_X,		core_params),
	ICPU(INTEL_FAM6_HASWELL_ULT,		core_params),
	ICPU(INTEL_FAM6_HASWELL_GT3E,		core_params),
	ICPU(INTEL_FAM6_BROADWELL_GT3E,		core_params),
	ICPU(INTEL_FAM6_ATOM_AIRMONT,		airmont_params),
	ICPU(INTEL_FAM6_SKYLAKE_MOBILE,		core_params),
	ICPU(INTEL_FAM6_BROADWELL_X,		core_params),
	ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,	core_params),
	ICPU(INTEL_FAM6_BROADWELL_XEON_D,	core_params),
	ICPU(INTEL_FAM6_XEON_PHI_KNL,		knl_params),
1779
	ICPU(INTEL_FAM6_XEON_PHI_KNM,		knl_params),
1780
	ICPU(INTEL_FAM6_ATOM_GOLDMONT,		bxt_params),
1781 1782 1783 1784
	{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

1785
static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
1786
	ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
1787 1788
	ICPU(INTEL_FAM6_BROADWELL_X, core_params),
	ICPU(INTEL_FAM6_SKYLAKE_X, core_params),
D
Dirk Brandewie 已提交
1789 1790 1791
	{}
};

1792 1793 1794 1795
static int intel_pstate_init_cpu(unsigned int cpunum)
{
	struct cpudata *cpu;

1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811
	cpu = all_cpu_data[cpunum];

	if (!cpu) {
		unsigned int size = sizeof(struct cpudata);

		if (per_cpu_limits)
			size += sizeof(struct perf_limits);

		cpu = kzalloc(size, GFP_KERNEL);
		if (!cpu)
			return -ENOMEM;

		all_cpu_data[cpunum] = cpu;
		if (per_cpu_limits)
			cpu->perf_limits = (struct perf_limits *)(cpu + 1);

1812 1813 1814
		cpu->epp_default = -EINVAL;
		cpu->epp_powersave = -EINVAL;
		cpu->epp_saved = -EINVAL;
1815
	}
1816 1817 1818 1819

	cpu = all_cpu_data[cpunum];

	cpu->cpu = cpunum;
1820

1821
	if (hwp_active) {
1822
		intel_pstate_hwp_enable(cpu);
1823 1824 1825
		pid_params.sample_rate_ms = 50;
		pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
	}
1826

1827
	intel_pstate_get_cpu_pstates(cpu);
1828

1829 1830
	intel_pstate_busy_pid_reset(cpu);

J
Joe Perches 已提交
1831
	pr_debug("controlling: cpu %d\n", cpunum);
1832 1833 1834 1835 1836 1837

	return 0;
}

static unsigned int intel_pstate_get(unsigned int cpu_num)
{
1838
	struct cpudata *cpu = all_cpu_data[cpu_num];
1839

1840
	return cpu ? get_avg_frequency(cpu) : 0;
1841 1842
}

1843
static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
1844
{
1845 1846
	struct cpudata *cpu = all_cpu_data[cpu_num];

1847 1848 1849
	if (cpu->update_util_set)
		return;

1850 1851
	/* Prevent intel_pstate_update_util() from using stale data. */
	cpu->sample.time = 0;
1852 1853
	cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
				     intel_pstate_update_util);
1854
	cpu->update_util_set = true;
1855 1856 1857 1858
}

static void intel_pstate_clear_update_util_hook(unsigned int cpu)
{
1859 1860 1861 1862 1863
	struct cpudata *cpu_data = all_cpu_data[cpu];

	if (!cpu_data->update_util_set)
		return;

1864
	cpufreq_remove_update_util_hook(cpu);
1865
	cpu_data->update_util_set = false;
1866 1867 1868
	synchronize_sched();
}

1869 1870 1871 1872 1873
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
{
	limits->no_turbo = 0;
	limits->turbo_disabled = 0;
	limits->max_perf_pct = 100;
1874
	limits->max_perf = int_ext_tofp(1);
1875
	limits->min_perf_pct = 100;
1876
	limits->min_perf = int_ext_tofp(1);
1877 1878 1879 1880 1881 1882
	limits->max_policy_pct = 100;
	limits->max_sysfs_pct = 100;
	limits->min_policy_pct = 0;
	limits->min_sysfs_pct = 0;
}

1883 1884 1885
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
					    struct perf_limits *limits)
{
1886

1887 1888 1889
	limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
					      policy->cpuinfo.max_freq);
	limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
1890 1891 1892
	if (policy->max == policy->min) {
		limits->min_policy_pct = limits->max_policy_pct;
	} else {
1893 1894
		limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
						      policy->cpuinfo.max_freq);
1895 1896 1897
		limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
						 0, 100);
	}
1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911

	/* Normalize user input to [min_policy_pct, max_policy_pct] */
	limits->min_perf_pct = max(limits->min_policy_pct,
				   limits->min_sysfs_pct);
	limits->min_perf_pct = min(limits->max_policy_pct,
				   limits->min_perf_pct);
	limits->max_perf_pct = min(limits->max_policy_pct,
				   limits->max_sysfs_pct);
	limits->max_perf_pct = max(limits->min_policy_pct,
				   limits->max_perf_pct);

	/* Make sure min_perf_pct <= max_perf_pct */
	limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);

1912 1913 1914 1915
	limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
	limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
	limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
	limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
1916 1917 1918 1919 1920

	pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
		 limits->max_perf_pct, limits->min_perf_pct);
}

1921 1922
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
1923
	struct cpudata *cpu;
1924
	struct perf_limits *perf_limits = NULL;
1925

1926 1927 1928
	if (!policy->cpuinfo.max_freq)
		return -ENODEV;

1929 1930 1931
	pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
		 policy->cpuinfo.max_freq, policy->max);

1932
	cpu = all_cpu_data[policy->cpu];
1933 1934
	cpu->policy = policy->policy;

1935 1936 1937 1938 1939
	if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
	    policy->max < policy->cpuinfo.max_freq &&
	    policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
		pr_debug("policy->max > max non turbo frequency\n");
		policy->max = policy->cpuinfo.max_freq;
1940 1941
	}

1942 1943 1944
	if (per_cpu_limits)
		perf_limits = cpu->perf_limits;

1945 1946
	mutex_lock(&intel_pstate_limits_lock);

1947 1948 1949 1950 1951
	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
		if (!perf_limits) {
			limits = &performance_limits;
			perf_limits = limits;
		}
1952
		if (policy->max >= policy->cpuinfo.max_freq) {
J
Joe Perches 已提交
1953
			pr_debug("set performance\n");
1954
			intel_pstate_set_performance_limits(perf_limits);
1955 1956 1957
			goto out;
		}
	} else {
J
Joe Perches 已提交
1958
		pr_debug("set powersave\n");
1959 1960 1961 1962
		if (!perf_limits) {
			limits = &powersave_limits;
			perf_limits = limits;
		}
1963

1964
	}
1965

1966
	intel_pstate_update_perf_limits(policy, perf_limits);
1967
 out:
1968
	if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
1969 1970 1971 1972 1973 1974 1975 1976
		/*
		 * NOHZ_FULL CPUs need this as the governor callback may not
		 * be invoked on them.
		 */
		intel_pstate_clear_update_util_hook(policy->cpu);
		intel_pstate_max_within_limits(cpu);
	}

1977 1978
	intel_pstate_set_update_util_hook(policy->cpu);

1979
	intel_pstate_hwp_set_policy(policy);
D
Dirk Brandewie 已提交
1980

1981 1982
	mutex_unlock(&intel_pstate_limits_lock);

1983 1984 1985 1986 1987
	return 0;
}

static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
1988
	cpufreq_verify_within_cpu_limits(policy);
1989

1990
	if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1991
	    policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1992 1993 1994 1995 1996
		return -EINVAL;

	return 0;
}

1997 1998 1999 2000 2001
static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
	intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
}

2002
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
2003
{
2004
	pr_debug("CPU %d exiting\n", policy->cpu);
2005

2006
	intel_pstate_clear_update_util_hook(policy->cpu);
2007 2008 2009
	if (hwp_active)
		intel_pstate_hwp_save_state(policy);
	else
2010 2011
		intel_cpufreq_stop_cpu(policy);
}
2012

2013 2014 2015
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
	intel_pstate_exit_perf_limits(policy);
2016

2017
	policy->fast_switch_possible = false;
D
Dirk Brandewie 已提交
2018

2019
	return 0;
2020 2021
}

2022
static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2023 2024
{
	struct cpudata *cpu;
2025
	int rc;
2026 2027 2028 2029 2030 2031 2032

	rc = intel_pstate_init_cpu(policy->cpu);
	if (rc)
		return rc;

	cpu = all_cpu_data[policy->cpu];

2033 2034 2035 2036 2037 2038 2039 2040
	/*
	 * We need sane value in the cpu->perf_limits, so inherit from global
	 * perf_limits limits, which are seeded with values based on the
	 * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
	 */
	if (per_cpu_limits)
		memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));

2041 2042
	policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
	policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2043 2044

	/* cpuinfo and default policy values */
2045
	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2046 2047 2048 2049 2050
	update_turbo_state();
	policy->cpuinfo.max_freq = limits->turbo_disabled ?
			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
	policy->cpuinfo.max_freq *= cpu->pstate.scaling;

2051
	intel_pstate_init_acpi_perf_limits(policy);
2052 2053
	cpumask_set_cpu(policy->cpu, policy->cpus);

2054 2055
	policy->fast_switch_possible = true;

2056 2057 2058
	return 0;
}

2059
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2060
{
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070
	int ret = __intel_pstate_cpu_init(policy);

	if (ret)
		return ret;

	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
	if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
	else
		policy->policy = CPUFREQ_POLICY_POWERSAVE;
2071 2072 2073 2074

	return 0;
}

2075
static struct cpufreq_driver intel_pstate = {
2076 2077 2078
	.flags		= CPUFREQ_CONST_LOOPS,
	.verify		= intel_pstate_verify_policy,
	.setpolicy	= intel_pstate_set_policy,
2079
	.suspend	= intel_pstate_hwp_save_state,
2080
	.resume		= intel_pstate_resume,
2081 2082
	.get		= intel_pstate_get,
	.init		= intel_pstate_cpu_init,
2083
	.exit		= intel_pstate_cpu_exit,
2084
	.stop_cpu	= intel_pstate_stop_cpu,
2085 2086 2087
	.name		= "intel_pstate",
};

2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199
static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
{
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	struct perf_limits *perf_limits = limits;

	update_turbo_state();
	policy->cpuinfo.max_freq = limits->turbo_disabled ?
			cpu->pstate.max_freq : cpu->pstate.turbo_freq;

	cpufreq_verify_within_cpu_limits(policy);

	if (per_cpu_limits)
		perf_limits = cpu->perf_limits;

	intel_pstate_update_perf_limits(policy, perf_limits);

	return 0;
}

static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
					       struct cpufreq_policy *policy,
					       unsigned int target_freq)
{
	unsigned int max_freq;

	update_turbo_state();

	max_freq = limits->no_turbo || limits->turbo_disabled ?
			cpu->pstate.max_freq : cpu->pstate.turbo_freq;
	policy->cpuinfo.max_freq = max_freq;
	if (policy->max > max_freq)
		policy->max = max_freq;

	if (target_freq > max_freq)
		target_freq = max_freq;

	return target_freq;
}

static int intel_cpufreq_target(struct cpufreq_policy *policy,
				unsigned int target_freq,
				unsigned int relation)
{
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	struct cpufreq_freqs freqs;
	int target_pstate;

	freqs.old = policy->cur;
	freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);

	cpufreq_freq_transition_begin(policy, &freqs);
	switch (relation) {
	case CPUFREQ_RELATION_L:
		target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
		break;
	case CPUFREQ_RELATION_H:
		target_pstate = freqs.new / cpu->pstate.scaling;
		break;
	default:
		target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
		break;
	}
	target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
	if (target_pstate != cpu->pstate.current_pstate) {
		cpu->pstate.current_pstate = target_pstate;
		wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
			      pstate_funcs.get_val(cpu, target_pstate));
	}
	cpufreq_freq_transition_end(policy, &freqs, false);

	return 0;
}

static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
					      unsigned int target_freq)
{
	struct cpudata *cpu = all_cpu_data[policy->cpu];
	int target_pstate;

	target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
	target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
	intel_pstate_update_pstate(cpu, target_pstate);
	return target_freq;
}

static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
	int ret = __intel_pstate_cpu_init(policy);

	if (ret)
		return ret;

	policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
	/* This reflects the intel_pstate_get_cpu_pstates() setting. */
	policy->cur = policy->cpuinfo.min_freq;

	return 0;
}

static struct cpufreq_driver intel_cpufreq = {
	.flags		= CPUFREQ_CONST_LOOPS,
	.verify		= intel_cpufreq_verify_policy,
	.target		= intel_cpufreq_target,
	.fast_switch	= intel_cpufreq_fast_switch,
	.init		= intel_cpufreq_cpu_init,
	.exit		= intel_pstate_cpu_exit,
	.stop_cpu	= intel_cpufreq_stop_cpu,
	.name		= "intel_cpufreq",
};

static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;

2200 2201 2202
static int no_load __initdata;
static int no_hwp __initdata;
static int hwp_only __initdata;
2203
static unsigned int force_load __initdata;
2204

2205
static int __init intel_pstate_msrs_not_valid(void)
2206
{
2207
	if (!pstate_funcs.get_max() ||
2208 2209
	    !pstate_funcs.get_min() ||
	    !pstate_funcs.get_turbo())
2210 2211 2212 2213
		return -ENODEV;

	return 0;
}
2214

2215
static void __init copy_pid_params(struct pstate_adjust_policy *policy)
2216 2217
{
	pid_params.sample_rate_ms = policy->sample_rate_ms;
2218
	pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
2219 2220 2221 2222 2223 2224 2225
	pid_params.p_gain_pct = policy->p_gain_pct;
	pid_params.i_gain_pct = policy->i_gain_pct;
	pid_params.d_gain_pct = policy->d_gain_pct;
	pid_params.deadband = policy->deadband;
	pid_params.setpoint = policy->setpoint;
}

2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238
#ifdef CONFIG_ACPI
static void intel_pstate_use_acpi_profile(void)
{
	if (acpi_gbl_FADT.preferred_profile == PM_MOBILE)
		pstate_funcs.get_target_pstate =
				get_target_pstate_use_cpu_load;
}
#else
static void intel_pstate_use_acpi_profile(void)
{
}
#endif

2239
static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2240 2241
{
	pstate_funcs.get_max   = funcs->get_max;
2242
	pstate_funcs.get_max_physical = funcs->get_max_physical;
2243 2244
	pstate_funcs.get_min   = funcs->get_min;
	pstate_funcs.get_turbo = funcs->get_turbo;
2245
	pstate_funcs.get_scaling = funcs->get_scaling;
2246
	pstate_funcs.get_val   = funcs->get_val;
2247
	pstate_funcs.get_vid   = funcs->get_vid;
2248 2249
	pstate_funcs.get_target_pstate = funcs->get_target_pstate;

2250
	intel_pstate_use_acpi_profile();
2251 2252
}

2253
#ifdef CONFIG_ACPI
2254

2255
static bool __init intel_pstate_no_acpi_pss(void)
2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
{
	int i;

	for_each_possible_cpu(i) {
		acpi_status status;
		union acpi_object *pss;
		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
		struct acpi_processor *pr = per_cpu(processors, i);

		if (!pr)
			continue;

		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
		if (ACPI_FAILURE(status))
			continue;

		pss = buffer.pointer;
		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
			kfree(pss);
			return false;
		}

		kfree(pss);
	}

	return true;
}

2284
static bool __init intel_pstate_has_acpi_ppc(void)
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
{
	int i;

	for_each_possible_cpu(i) {
		struct acpi_processor *pr = per_cpu(processors, i);

		if (!pr)
			continue;
		if (acpi_has_method(pr->handle, "_PPC"))
			return true;
	}
	return false;
}

enum {
	PSS,
	PPC,
};

2304 2305 2306 2307
struct hw_vendor_info {
	u16  valid;
	char oem_id[ACPI_OEM_ID_SIZE];
	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
2308
	int  oem_pwr_table;
2309 2310 2311
};

/* Hardware vendor-specific info that has its own power management modes */
2312
static struct hw_vendor_info vendor_info[] __initdata = {
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
	{1, "HP    ", "ProLiant", PSS},
	{1, "ORACLE", "X4-2    ", PPC},
	{1, "ORACLE", "X4-2L   ", PPC},
	{1, "ORACLE", "X4-2B   ", PPC},
	{1, "ORACLE", "X3-2    ", PPC},
	{1, "ORACLE", "X3-2L   ", PPC},
	{1, "ORACLE", "X3-2B   ", PPC},
	{1, "ORACLE", "X4470M2 ", PPC},
	{1, "ORACLE", "X4270M3 ", PPC},
	{1, "ORACLE", "X4270M2 ", PPC},
	{1, "ORACLE", "X4170M2 ", PPC},
2324 2325 2326 2327
	{1, "ORACLE", "X4170 M3", PPC},
	{1, "ORACLE", "X4275 M3", PPC},
	{1, "ORACLE", "X6-2    ", PPC},
	{1, "ORACLE", "Sudbury ", PPC},
2328 2329 2330
	{0, "", ""},
};

2331
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2332 2333 2334
{
	struct acpi_table_header hdr;
	struct hw_vendor_info *v_info;
D
Dirk Brandewie 已提交
2335 2336 2337 2338 2339 2340 2341 2342 2343
	const struct x86_cpu_id *id;
	u64 misc_pwr;

	id = x86_match_cpu(intel_pstate_cpu_oob_ids);
	if (id) {
		rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
		if ( misc_pwr & (1 << 8))
			return true;
	}
2344

2345 2346
	if (acpi_disabled ||
	    ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
2347 2348 2349
		return false;

	for (v_info = vendor_info; v_info->valid; v_info++) {
2350
		if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
2351 2352 2353 2354 2355 2356
			!strncmp(hdr.oem_table_id, v_info->oem_table_id,
						ACPI_OEM_TABLE_ID_SIZE))
			switch (v_info->oem_pwr_table) {
			case PSS:
				return intel_pstate_no_acpi_pss();
			case PPC:
2357 2358
				return intel_pstate_has_acpi_ppc() &&
					(!force_load);
2359
			}
2360 2361 2362 2363
	}

	return false;
}
2364 2365 2366 2367 2368 2369 2370 2371 2372 2373

static void intel_pstate_request_control_from_smm(void)
{
	/*
	 * It may be unsafe to request P-states control from SMM if _PPC support
	 * has not been enabled.
	 */
	if (acpi_ppc)
		acpi_processor_pstate_control();
}
2374 2375
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
2376
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
2377
static inline void intel_pstate_request_control_from_smm(void) {}
2378 2379
#endif /* CONFIG_ACPI */

2380 2381 2382 2383 2384
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
	{ X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
	{}
};

2385 2386
static int __init intel_pstate_init(void)
{
2387
	int cpu, rc = 0;
2388
	const struct x86_cpu_id *id;
2389
	struct cpu_defaults *cpu_def;
2390

2391 2392 2393
	if (no_load)
		return -ENODEV;

2394 2395 2396
	if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
		copy_cpu_funcs(&core_params.funcs);
		hwp_active++;
2397
		intel_pstate.attr = hwp_cpufreq_attrs;
2398 2399 2400
		goto hwp_cpu_matched;
	}

2401 2402 2403 2404
	id = x86_match_cpu(intel_pstate_cpu_ids);
	if (!id)
		return -ENODEV;

2405
	cpu_def = (struct cpu_defaults *)id->driver_data;
2406

2407 2408
	copy_pid_params(&cpu_def->pid_policy);
	copy_cpu_funcs(&cpu_def->funcs);
2409

2410 2411 2412
	if (intel_pstate_msrs_not_valid())
		return -ENODEV;

2413 2414 2415 2416 2417 2418 2419 2420
hwp_cpu_matched:
	/*
	 * The Intel pstate driver will be ignored if the platform
	 * firmware has its own power management modes.
	 */
	if (intel_pstate_platform_pwr_mgmt_exists())
		return -ENODEV;

J
Joe Perches 已提交
2421
	pr_info("Intel P-state driver initializing\n");
2422

2423
	all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
2424 2425 2426
	if (!all_cpu_data)
		return -ENOMEM;

2427 2428 2429
	if (!hwp_active && hwp_only)
		goto out;

2430 2431
	intel_pstate_request_control_from_smm();

2432
	rc = cpufreq_register_driver(intel_pstate_driver);
2433 2434 2435 2436 2437
	if (rc)
		goto out;

	intel_pstate_debug_expose_params();
	intel_pstate_sysfs_expose_params();
2438

2439
	if (hwp_active)
J
Joe Perches 已提交
2440
		pr_info("HWP enabled\n");
2441

2442 2443
	return rc;
out:
2444 2445 2446
	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu]) {
2447 2448 2449
			if (intel_pstate_driver == &intel_pstate)
				intel_pstate_clear_update_util_hook(cpu);

2450 2451 2452 2453 2454 2455
			kfree(all_cpu_data[cpu]);
		}
	}

	put_online_cpus();
	vfree(all_cpu_data);
2456 2457 2458 2459
	return -ENODEV;
}
device_initcall(intel_pstate_init);

2460 2461 2462 2463 2464
static int __init intel_pstate_setup(char *str)
{
	if (!str)
		return -EINVAL;

2465
	if (!strcmp(str, "disable")) {
2466
		no_load = 1;
2467 2468 2469 2470 2471
	} else if (!strcmp(str, "passive")) {
		pr_info("Passive mode enabled\n");
		intel_pstate_driver = &intel_cpufreq;
		no_hwp = 1;
	}
2472
	if (!strcmp(str, "no_hwp")) {
J
Joe Perches 已提交
2473
		pr_info("HWP disabled\n");
D
Dirk Brandewie 已提交
2474
		no_hwp = 1;
2475
	}
2476 2477
	if (!strcmp(str, "force"))
		force_load = 1;
2478 2479
	if (!strcmp(str, "hwp_only"))
		hwp_only = 1;
2480 2481
	if (!strcmp(str, "per_cpu_perf_limits"))
		per_cpu_limits = true;
2482 2483 2484 2485 2486 2487

#ifdef CONFIG_ACPI
	if (!strcmp(str, "support_acpi_ppc"))
		acpi_ppc = true;
#endif

2488 2489 2490 2491
	return 0;
}
early_param("intel_pstate", intel_pstate_setup);

2492 2493 2494
MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
MODULE_LICENSE("GPL");