intel_pstate.c 22.1 KB
Newer Older
1
/*
2
 * intel_pstate.c: Native P state management for Intel processors
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * (C) Copyright 2012 Intel Corporation
 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; version 2
 * of the License.
 */

#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/ktime.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/list.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
28
#include <linux/acpi.h>
29 30 31 32 33 34 35 36
#include <trace/events/power.h>

#include <asm/div64.h>
#include <asm/msr.h>
#include <asm/cpu_device_id.h>

#define SAMPLE_COUNT		3

37 38 39
#define BYT_RATIOS		0x66a
#define BYT_VIDS		0x66b
#define BYT_TURBO_RATIOS	0x66c
40
#define BYT_TURBO_VIDS		0x66d
41

42

43
#define FRAC_BITS 8
44 45
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
46

47 48 49 50 51 52 53 54 55 56 57 58

static inline int32_t mul_fp(int32_t x, int32_t y)
{
	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
}

static inline int32_t div_fp(int32_t x, int32_t y)
{
	return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
}

struct sample {
59
	int32_t core_pct_busy;
60 61 62 63 64 65 66 67 68 69 70 71
	u64 aperf;
	u64 mperf;
	int freq;
};

struct pstate_data {
	int	current_pstate;
	int	min_pstate;
	int	max_pstate;
	int	turbo_pstate;
};

72
struct vid_data {
73 74 75
	int min;
	int max;
	int turbo;
76 77 78
	int32_t ratio;
};

79 80 81 82 83 84 85
struct _pid {
	int setpoint;
	int32_t integral;
	int32_t p_gain;
	int32_t i_gain;
	int32_t d_gain;
	int deadband;
86
	int32_t last_err;
87 88 89 90 91 92 93 94 95 96
};

struct cpudata {
	int cpu;

	char name[64];

	struct timer_list timer;

	struct pstate_data pstate;
97
	struct vid_data vid;
98 99 100 101
	struct _pid pid;

	u64	prev_aperf;
	u64	prev_mperf;
102
	struct sample sample;
103 104 105 106 107 108 109 110 111 112 113 114
};

static struct cpudata **all_cpu_data;
struct pstate_adjust_policy {
	int sample_rate_ms;
	int deadband;
	int setpoint;
	int p_gain_pct;
	int d_gain_pct;
	int i_gain_pct;
};

115 116 117 118
struct pstate_funcs {
	int (*get_max)(void);
	int (*get_min)(void);
	int (*get_turbo)(void);
119 120
	void (*set)(struct cpudata*, int pstate);
	void (*get_vid)(struct cpudata *);
121 122
};

123 124 125
struct cpu_defaults {
	struct pstate_adjust_policy pid_policy;
	struct pstate_funcs funcs;
126 127
};

128 129 130
static struct pstate_adjust_policy pid_params;
static struct pstate_funcs pstate_funcs;

131 132 133 134 135 136
struct perf_limits {
	int no_turbo;
	int max_perf_pct;
	int min_perf_pct;
	int32_t max_perf;
	int32_t min_perf;
137 138
	int max_policy_pct;
	int max_sysfs_pct;
139 140 141 142 143 144 145 146
};

static struct perf_limits limits = {
	.no_turbo = 0,
	.max_perf_pct = 100,
	.max_perf = int_tofp(1),
	.min_perf_pct = 0,
	.min_perf = 0,
147 148
	.max_policy_pct = 100,
	.max_sysfs_pct = 100,
149 150 151 152 153 154 155
};

static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
			int deadband, int integral) {
	pid->setpoint = setpoint;
	pid->deadband  = deadband;
	pid->integral  = int_tofp(integral);
156
	pid->last_err  = int_tofp(setpoint) - int_tofp(busy);
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
}

static inline void pid_p_gain_set(struct _pid *pid, int percent)
{
	pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
}

static inline void pid_i_gain_set(struct _pid *pid, int percent)
{
	pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
}

static inline void pid_d_gain_set(struct _pid *pid, int percent)
{

	pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
}

175
static signed int pid_calc(struct _pid *pid, int32_t busy)
176
{
177
	signed int result;
178 179 180
	int32_t pterm, dterm, fp_error;
	int32_t integral_limit;

181
	fp_error = int_tofp(pid->setpoint) - busy;
182

183
	if (abs(fp_error) <= int_tofp(pid->deadband))
184 185 186 187 188 189 190 191 192 193 194 195 196
		return 0;

	pterm = mul_fp(pid->p_gain, fp_error);

	pid->integral += fp_error;

	/* limit the integral term */
	integral_limit = int_tofp(30);
	if (pid->integral > integral_limit)
		pid->integral = integral_limit;
	if (pid->integral < -integral_limit)
		pid->integral = -integral_limit;

197 198
	dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
	pid->last_err = fp_error;
199 200

	result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
201 202 203 204
	if (result >= 0)
		result = result + (1 << (FRAC_BITS-1));
	else
		result = result - (1 << (FRAC_BITS-1));
205 206 207 208 209
	return (signed int)fp_toint(result);
}

static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
{
210 211 212
	pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
	pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
	pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
213 214

	pid_reset(&cpu->pid,
215
		pid_params.setpoint,
216
		100,
217
		pid_params.deadband,
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
		0);
}

static inline void intel_pstate_reset_all_pid(void)
{
	unsigned int cpu;
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu])
			intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
	}
}

/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
	*(u32 *)data = val;
	intel_pstate_reset_all_pid();
	return 0;
}
static int pid_param_get(void *data, u64 *val)
{
	*val = *(u32 *)data;
	return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
			pid_param_set, "%llu\n");

struct pid_param {
	char *name;
	void *value;
};

static struct pid_param pid_files[] = {
251 252 253 254 255 256
	{"sample_rate_ms", &pid_params.sample_rate_ms},
	{"d_gain_pct", &pid_params.d_gain_pct},
	{"i_gain_pct", &pid_params.i_gain_pct},
	{"deadband", &pid_params.deadband},
	{"setpoint", &pid_params.setpoint},
	{"p_gain_pct", &pid_params.p_gain_pct},
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
	{NULL, NULL}
};

static struct dentry *debugfs_parent;
static void intel_pstate_debug_expose_params(void)
{
	int i = 0;

	debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
	if (IS_ERR_OR_NULL(debugfs_parent))
		return;
	while (pid_files[i].name) {
		debugfs_create_file(pid_files[i].name, 0660,
				debugfs_parent, pid_files[i].value,
				&fops_pid_param);
		i++;
	}
}

/************************** debugfs end ************************/

/************************** sysfs begin ************************/
#define show_one(file_name, object)					\
	static ssize_t show_##file_name					\
	(struct kobject *kobj, struct attribute *attr, char *buf)	\
	{								\
		return sprintf(buf, "%u\n", limits.object);		\
	}

static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
				const char *buf, size_t count)
{
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
	limits.no_turbo = clamp_t(int, input, 0 , 1);

	return count;
}

static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
				const char *buf, size_t count)
{
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

308 309
	limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
	limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
	return count;
}

static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
				const char *buf, size_t count)
{
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
	limits.min_perf_pct = clamp_t(int, input, 0 , 100);
	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));

	return count;
}

show_one(no_turbo, no_turbo);
show_one(max_perf_pct, max_perf_pct);
show_one(min_perf_pct, min_perf_pct);

define_one_global_rw(no_turbo);
define_one_global_rw(max_perf_pct);
define_one_global_rw(min_perf_pct);

static struct attribute *intel_pstate_attributes[] = {
	&no_turbo.attr,
	&max_perf_pct.attr,
	&min_perf_pct.attr,
	NULL
};

static struct attribute_group intel_pstate_attr_group = {
	.attrs = intel_pstate_attributes,
};
static struct kobject *intel_pstate_kobject;

static void intel_pstate_sysfs_expose_params(void)
{
	int rc;

	intel_pstate_kobject = kobject_create_and_add("intel_pstate",
						&cpu_subsys.dev_root->kobj);
	BUG_ON(!intel_pstate_kobject);
	rc = sysfs_create_group(intel_pstate_kobject,
				&intel_pstate_attr_group);
	BUG_ON(rc);
}

/************************** sysfs end ************************/
361 362 363 364
static int byt_get_min_pstate(void)
{
	u64 value;
	rdmsrl(BYT_RATIOS, value);
365
	return (value >> 8) & 0x3F;
366 367 368 369 370 371
}

static int byt_get_max_pstate(void)
{
	u64 value;
	rdmsrl(BYT_RATIOS, value);
372
	return (value >> 16) & 0x3F;
373
}
374

375 376 377 378 379 380 381
static int byt_get_turbo_pstate(void)
{
	u64 value;
	rdmsrl(BYT_TURBO_RATIOS, value);
	return value & 0x3F;
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
{
	u64 val;
	int32_t vid_fp;
	u32 vid;

	val = pstate << 8;
	if (limits.no_turbo)
		val |= (u64)1 << 32;

	vid_fp = cpudata->vid.min + mul_fp(
		int_tofp(pstate - cpudata->pstate.min_pstate),
		cpudata->vid.ratio);

	vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
	vid = fp_toint(vid_fp);

399 400 401
	if (pstate > cpudata->pstate.max_pstate)
		vid = cpudata->vid.turbo;

402 403 404 405 406 407 408 409 410
	val |= vid;

	wrmsrl(MSR_IA32_PERF_CTL, val);
}

static void byt_get_vid(struct cpudata *cpudata)
{
	u64 value;

411

412
	rdmsrl(BYT_VIDS, value);
413 414
	cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
	cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
415 416 417 418
	cpudata->vid.ratio = div_fp(
		cpudata->vid.max - cpudata->vid.min,
		int_tofp(cpudata->pstate.max_pstate -
			cpudata->pstate.min_pstate));
419 420 421

	rdmsrl(BYT_TURBO_VIDS, value);
	cpudata->vid.turbo = value & 0x7f;
422 423 424
}


425
static int core_get_min_pstate(void)
426 427
{
	u64 value;
428
	rdmsrl(MSR_PLATFORM_INFO, value);
429 430 431
	return (value >> 40) & 0xFF;
}

432
static int core_get_max_pstate(void)
433 434
{
	u64 value;
435
	rdmsrl(MSR_PLATFORM_INFO, value);
436 437 438
	return (value >> 8) & 0xFF;
}

439
static int core_get_turbo_pstate(void)
440 441 442
{
	u64 value;
	int nont, ret;
443
	rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
444
	nont = core_get_max_pstate();
445 446 447 448 449 450
	ret = ((value) & 255);
	if (ret <= nont)
		ret = nont;
	return ret;
}

451
static void core_set_pstate(struct cpudata *cpudata, int pstate)
452 453 454 455 456 457 458
{
	u64 val;

	val = pstate << 8;
	if (limits.no_turbo)
		val |= (u64)1 << 32;

459
	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
}

static struct cpu_defaults core_params = {
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 97,
		.p_gain_pct = 20,
		.d_gain_pct = 0,
		.i_gain_pct = 0,
	},
	.funcs = {
		.get_max = core_get_max_pstate,
		.get_min = core_get_min_pstate,
		.get_turbo = core_get_turbo_pstate,
		.set = core_set_pstate,
	},
};

479 480 481 482 483 484 485 486 487 488 489 490
static struct cpu_defaults byt_params = {
	.pid_policy = {
		.sample_rate_ms = 10,
		.deadband = 0,
		.setpoint = 97,
		.p_gain_pct = 14,
		.d_gain_pct = 0,
		.i_gain_pct = 4,
	},
	.funcs = {
		.get_max = byt_get_max_pstate,
		.get_min = byt_get_min_pstate,
491
		.get_turbo = byt_get_turbo_pstate,
492 493
		.set = byt_set_pstate,
		.get_vid = byt_get_vid,
494 495 496 497
	},
};


498 499 500
static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
{
	int max_perf = cpu->pstate.turbo_pstate;
501
	int max_perf_adj;
502 503 504 505
	int min_perf;
	if (limits.no_turbo)
		max_perf = cpu->pstate.max_pstate;

506 507
	max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
	*max = clamp_t(int, max_perf_adj,
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526
			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);

	min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
	*min = clamp_t(int, min_perf,
			cpu->pstate.min_pstate, max_perf);
}

static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
{
	int max_perf, min_perf;

	intel_pstate_get_min_max(cpu, &min_perf, &max_perf);

	pstate = clamp_t(int, pstate, min_perf, max_perf);

	if (pstate == cpu->pstate.current_pstate)
		return;

	trace_cpu_frequency(pstate * 100000, cpu->cpu);
527

528 529
	cpu->pstate.current_pstate = pstate;

530
	pstate_funcs.set(cpu, pstate);
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
}

static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
{
	int target;
	target = cpu->pstate.current_pstate + steps;

	intel_pstate_set_pstate(cpu, target);
}

static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
{
	int target;
	target = cpu->pstate.current_pstate - steps;
	intel_pstate_set_pstate(cpu, target);
}

static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
	sprintf(cpu->name, "Intel 2nd generation core");

552 553 554
	cpu->pstate.min_pstate = pstate_funcs.get_min();
	cpu->pstate.max_pstate = pstate_funcs.get_max();
	cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
555

556 557
	if (pstate_funcs.get_vid)
		pstate_funcs.get_vid(cpu);
558
	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
559 560 561 562 563
}

static inline void intel_pstate_calc_busy(struct cpudata *cpu,
					struct sample *sample)
{
564
	int32_t core_pct;
565

566 567 568 569
	core_pct = div_fp(int_tofp((sample->aperf)),
			int_tofp((sample->mperf)));
	core_pct = mul_fp(core_pct, int_tofp(100));

570
	sample->freq = fp_toint(
571
		mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
572

573
	sample->core_pct_busy = core_pct;
574 575 576 577 578 579 580 581
}

static inline void intel_pstate_sample(struct cpudata *cpu)
{
	u64 aperf, mperf;

	rdmsrl(MSR_IA32_APERF, aperf);
	rdmsrl(MSR_IA32_MPERF, mperf);
582

583 584 585
	aperf = aperf >> FRAC_BITS;
	mperf = mperf >> FRAC_BITS;

586 587 588 589
	cpu->sample.aperf = aperf;
	cpu->sample.mperf = mperf;
	cpu->sample.aperf -= cpu->prev_aperf;
	cpu->sample.mperf -= cpu->prev_mperf;
590

591
	intel_pstate_calc_busy(cpu, &cpu->sample);
592 593 594 595 596 597 598 599 600

	cpu->prev_aperf = aperf;
	cpu->prev_mperf = mperf;
}

static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
{
	int sample_time, delay;

601
	sample_time = pid_params.sample_rate_ms;
602 603 604 605
	delay = msecs_to_jiffies(sample_time);
	mod_timer_pinned(&cpu->timer, jiffies + delay);
}

606
static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
607
{
608
	int32_t core_busy, max_pstate, current_pstate;
609

610
	core_busy = cpu->sample.core_pct_busy;
611
	max_pstate = int_tofp(cpu->pstate.max_pstate);
612
	current_pstate = int_tofp(cpu->pstate.current_pstate);
613
	core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
614
	return core_busy;
615 616 617 618
}

static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
{
619
	int32_t busy_scaled;
620 621 622 623 624 625 626 627 628 629
	struct _pid *pid;
	signed int ctl = 0;
	int steps;

	pid = &cpu->pid;
	busy_scaled = intel_pstate_get_scaled_busy(cpu);

	ctl = pid_calc(pid, busy_scaled);

	steps = abs(ctl);
630

631 632 633 634 635 636 637 638 639
	if (ctl < 0)
		intel_pstate_pstate_increase(cpu, steps);
	else
		intel_pstate_pstate_decrease(cpu, steps);
}

static void intel_pstate_timer_func(unsigned long __data)
{
	struct cpudata *cpu = (struct cpudata *) __data;
640
	struct sample *sample;
641 642

	intel_pstate_sample(cpu);
643

644
	sample = &cpu->sample;
645

646
	intel_pstate_adjust_busy_pstate(cpu);
647 648 649 650 651 652 653 654

	trace_pstate_sample(fp_toint(sample->core_pct_busy),
			fp_toint(intel_pstate_get_scaled_busy(cpu)),
			cpu->pstate.current_pstate,
			sample->mperf,
			sample->aperf,
			sample->freq);

655 656 657 658
	intel_pstate_set_sample_time(cpu);
}

#define ICPU(model, policy) \
659 660
	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
			(unsigned long)&policy }
661 662

static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
663 664
	ICPU(0x2a, core_params),
	ICPU(0x2d, core_params),
665
	ICPU(0x37, byt_params),
666 667 668 669 670 671
	ICPU(0x3a, core_params),
	ICPU(0x3c, core_params),
	ICPU(0x3e, core_params),
	ICPU(0x3f, core_params),
	ICPU(0x45, core_params),
	ICPU(0x46, core_params),
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
	{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);

static int intel_pstate_init_cpu(unsigned int cpunum)
{

	const struct x86_cpu_id *id;
	struct cpudata *cpu;

	id = x86_match_cpu(intel_pstate_cpu_ids);
	if (!id)
		return -ENODEV;

	all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
	if (!all_cpu_data[cpunum])
		return -ENOMEM;

	cpu = all_cpu_data[cpunum];

	intel_pstate_get_cpu_pstates(cpu);

	cpu->cpu = cpunum;
695

696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
	init_timer_deferrable(&cpu->timer);
	cpu->timer.function = intel_pstate_timer_func;
	cpu->timer.data =
		(unsigned long)cpu;
	cpu->timer.expires = jiffies + HZ/100;
	intel_pstate_busy_pid_reset(cpu);
	intel_pstate_sample(cpu);

	add_timer_on(&cpu->timer, cpunum);

	pr_info("Intel pstate controlling: cpu %d\n", cpunum);

	return 0;
}

static unsigned int intel_pstate_get(unsigned int cpu_num)
{
	struct sample *sample;
	struct cpudata *cpu;

	cpu = all_cpu_data[cpu_num];
	if (!cpu)
		return 0;
719
	sample = &cpu->sample;
720 721 722 723 724 725 726 727 728
	return sample->freq;
}

static int intel_pstate_set_policy(struct cpufreq_policy *policy)
{
	struct cpudata *cpu;

	cpu = all_cpu_data[policy->cpu];

729 730 731
	if (!policy->cpuinfo.max_freq)
		return -ENODEV;

732 733 734 735 736 737
	if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
		limits.min_perf_pct = 100;
		limits.min_perf = int_tofp(1);
		limits.max_perf_pct = 100;
		limits.max_perf = int_tofp(1);
		limits.no_turbo = 0;
738
		return 0;
739
	}
740 741 742 743
	limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
	limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
	limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));

744 745 746
	limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
	limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
	limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
747
	limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
748 749 750 751 752 753

	return 0;
}

static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
{
754
	cpufreq_verify_within_cpu_limits(policy);
755 756 757 758 759 760 761 762

	if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
		(policy->policy != CPUFREQ_POLICY_PERFORMANCE))
		return -EINVAL;

	return 0;
}

763
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
764
{
765 766
	int cpu_num = policy->cpu;
	struct cpudata *cpu = all_cpu_data[cpu_num];
767

768 769
	pr_info("intel_pstate CPU %d exiting\n", cpu_num);

770
	del_timer_sync(&all_cpu_data[cpu_num]->timer);
771 772 773
	intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
	kfree(all_cpu_data[cpu_num]);
	all_cpu_data[cpu_num] = NULL;
774 775
}

776
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
777 778
{
	struct cpudata *cpu;
779
	int rc;
780 781 782 783 784 785 786 787 788 789 790 791 792

	rc = intel_pstate_init_cpu(policy->cpu);
	if (rc)
		return rc;

	cpu = all_cpu_data[policy->cpu];

	if (!limits.no_turbo &&
		limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
		policy->policy = CPUFREQ_POLICY_PERFORMANCE;
	else
		policy->policy = CPUFREQ_POLICY_POWERSAVE;

793 794
	policy->min = cpu->pstate.min_pstate * 100000;
	policy->max = cpu->pstate.turbo_pstate * 100000;
795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810

	/* cpuinfo and default policy values */
	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
	policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
	cpumask_set_cpu(policy->cpu, policy->cpus);

	return 0;
}

static struct cpufreq_driver intel_pstate_driver = {
	.flags		= CPUFREQ_CONST_LOOPS,
	.verify		= intel_pstate_verify_policy,
	.setpolicy	= intel_pstate_set_policy,
	.get		= intel_pstate_get,
	.init		= intel_pstate_cpu_init,
811
	.stop_cpu	= intel_pstate_stop_cpu,
812 813 814
	.name		= "intel_pstate",
};

815 816
static int __initdata no_load;

817 818 819 820 821 822 823 824
static int intel_pstate_msrs_not_valid(void)
{
	/* Check that all the msr's we are using are valid. */
	u64 aperf, mperf, tmp;

	rdmsrl(MSR_IA32_APERF, aperf);
	rdmsrl(MSR_IA32_MPERF, mperf);

825 826 827
	if (!pstate_funcs.get_max() ||
		!pstate_funcs.get_min() ||
		!pstate_funcs.get_turbo())
828 829 830 831 832 833 834 835 836 837 838 839
		return -ENODEV;

	rdmsrl(MSR_IA32_APERF, tmp);
	if (!(tmp - aperf))
		return -ENODEV;

	rdmsrl(MSR_IA32_MPERF, tmp);
	if (!(tmp - mperf))
		return -ENODEV;

	return 0;
}
840

841
static void copy_pid_params(struct pstate_adjust_policy *policy)
842 843 844 845 846 847 848 849 850
{
	pid_params.sample_rate_ms = policy->sample_rate_ms;
	pid_params.p_gain_pct = policy->p_gain_pct;
	pid_params.i_gain_pct = policy->i_gain_pct;
	pid_params.d_gain_pct = policy->d_gain_pct;
	pid_params.deadband = policy->deadband;
	pid_params.setpoint = policy->setpoint;
}

851
static void copy_cpu_funcs(struct pstate_funcs *funcs)
852 853 854 855 856
{
	pstate_funcs.get_max   = funcs->get_max;
	pstate_funcs.get_min   = funcs->get_min;
	pstate_funcs.get_turbo = funcs->get_turbo;
	pstate_funcs.set       = funcs->set;
857
	pstate_funcs.get_vid   = funcs->get_vid;
858 859
}

860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
#if IS_ENABLED(CONFIG_ACPI)
#include <acpi/processor.h>

static bool intel_pstate_no_acpi_pss(void)
{
	int i;

	for_each_possible_cpu(i) {
		acpi_status status;
		union acpi_object *pss;
		struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
		struct acpi_processor *pr = per_cpu(processors, i);

		if (!pr)
			continue;

		status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
		if (ACPI_FAILURE(status))
			continue;

		pss = buffer.pointer;
		if (pss && pss->type == ACPI_TYPE_PACKAGE) {
			kfree(pss);
			return false;
		}

		kfree(pss);
	}

	return true;
}

struct hw_vendor_info {
	u16  valid;
	char oem_id[ACPI_OEM_ID_SIZE];
	char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
};

/* Hardware vendor-specific info that has its own power management modes */
static struct hw_vendor_info vendor_info[] = {
	{1, "HP    ", "ProLiant"},
	{0, "", ""},
};

static bool intel_pstate_platform_pwr_mgmt_exists(void)
{
	struct acpi_table_header hdr;
	struct hw_vendor_info *v_info;

	if (acpi_disabled
	    || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
		return false;

	for (v_info = vendor_info; v_info->valid; v_info++) {
		if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
		    && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
		    && intel_pstate_no_acpi_pss())
			return true;
	}

	return false;
}
#else /* CONFIG_ACPI not enabled */
static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
#endif /* CONFIG_ACPI */

926 927
static int __init intel_pstate_init(void)
{
928
	int cpu, rc = 0;
929
	const struct x86_cpu_id *id;
930
	struct cpu_defaults *cpu_info;
931

932 933 934
	if (no_load)
		return -ENODEV;

935 936 937 938
	id = x86_match_cpu(intel_pstate_cpu_ids);
	if (!id)
		return -ENODEV;

939 940 941 942 943 944 945
	/*
	 * The Intel pstate driver will be ignored if the platform
	 * firmware has its own power management modes.
	 */
	if (intel_pstate_platform_pwr_mgmt_exists())
		return -ENODEV;

946 947 948 949 950
	cpu_info = (struct cpu_defaults *)id->driver_data;

	copy_pid_params(&cpu_info->pid_policy);
	copy_cpu_funcs(&cpu_info->funcs);

951 952 953
	if (intel_pstate_msrs_not_valid())
		return -ENODEV;

954 955
	pr_info("Intel P-state driver initializing.\n");

956
	all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
957 958 959 960 961 962 963 964 965
	if (!all_cpu_data)
		return -ENOMEM;

	rc = cpufreq_register_driver(&intel_pstate_driver);
	if (rc)
		goto out;

	intel_pstate_debug_expose_params();
	intel_pstate_sysfs_expose_params();
966

967 968
	return rc;
out:
969 970 971 972 973 974 975 976 977 978
	get_online_cpus();
	for_each_online_cpu(cpu) {
		if (all_cpu_data[cpu]) {
			del_timer_sync(&all_cpu_data[cpu]->timer);
			kfree(all_cpu_data[cpu]);
		}
	}

	put_online_cpus();
	vfree(all_cpu_data);
979 980 981 982
	return -ENODEV;
}
device_initcall(intel_pstate_init);

983 984 985 986 987 988 989 990 991 992 993
static int __init intel_pstate_setup(char *str)
{
	if (!str)
		return -EINVAL;

	if (!strcmp(str, "disable"))
		no_load = 1;
	return 0;
}
early_param("intel_pstate", intel_pstate_setup);

994 995 996
MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
MODULE_LICENSE("GPL");