powernv-cpufreq.c 25.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * POWERNV cpufreq driver for the IBM POWER processors
 *
 * (C) Copyright IBM 2014
 *
 * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#define pr_fmt(fmt)	"powernv-cpufreq: " fmt

#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/smp.h>
#include <linux/of.h>
29
#include <linux/reboot.h>
30
#include <linux/slab.h>
31
#include <linux/cpu.h>
32
#include <trace/events/power.h>
33 34

#include <asm/cputhreads.h>
35
#include <asm/firmware.h>
36
#include <asm/reg.h>
37
#include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
38
#include <asm/opal.h>
39
#include <linux/timer.h>
40 41

#define POWERNV_MAX_PSTATES	256
42 43 44
#define PMSR_PSAFE_ENABLE	(1UL << 30)
#define PMSR_SPR_EM_DISABLE	(1UL << 31)
#define PMSR_MAX(x)		((x >> 32) & 0xFF)
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
#define MAX_RAMP_DOWN_TIME				5120
/*
 * On an idle system we want the global pstate to ramp-down from max value to
 * min over a span of ~5 secs. Also we want it to initially ramp-down slowly and
 * then ramp-down rapidly later on.
 *
 * This gives a percentage rampdown for time elapsed in milliseconds.
 * ramp_down_percentage = ((ms * ms) >> 18)
 *			~= 3.8 * (sec * sec)
 *
 * At 0 ms	ramp_down_percent = 0
 * At 5120 ms	ramp_down_percent = 100
 */
#define ramp_down_percent(time)		((time * time) >> 18)

/* Interval after which the timer is queued to bring down global pstate */
#define GPSTATE_TIMER_INTERVAL				2000

/**
 * struct global_pstate_info -	Per policy data structure to maintain history of
 *				global pstates
 * @highest_lpstate:		The local pstate from which we are ramping down
 * @elapsed_time:		Time in ms spent in ramping down from
 *				highest_lpstate
 * @last_sampled_time:		Time from boot in ms when global pstates were
 *				last set
 * @last_lpstate,last_gpstate:	Last set values for local and global pstates
 * @timer:			Is used for ramping down if cpu goes idle for
 *				a long time with global pstate held high
 * @gpstate_lock:		A spinlock to maintain synchronization between
 *				routines called by the timer handler and
 *				governer's target_index calls
 */
struct global_pstate_info {
	int highest_lpstate;
	unsigned int elapsed_time;
	unsigned int last_sampled_time;
	int last_lpstate;
	int last_gpstate;
	spinlock_t gpstate_lock;
	struct timer_list timer;
};

89
static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
90
static bool rebooting, throttled, occ_reset;
91

92 93 94 95 96 97 98 99 100
static const char * const throttle_reason[] = {
	"No throttling",
	"Power Cap",
	"Processor Over Temperature",
	"Power Supply Failure",
	"Over Current",
	"OCC Reset"
};

101 102 103 104 105 106 107 108 109 110
enum throttle_reason_type {
	NO_THROTTLE = 0,
	POWERCAP,
	CPU_OVERTEMP,
	POWER_SUPPLY_FAILURE,
	OVERCURRENT,
	OCC_RESET_THROTTLE,
	OCC_MAX_REASON
};

111 112 113
static struct chip {
	unsigned int id;
	bool throttled;
114 115
	bool restore;
	u8 throttle_reason;
116 117
	cpumask_t mask;
	struct work_struct throttle;
118 119 120
	int throttle_turbo;
	int throttle_sub_turbo;
	int reason[OCC_MAX_REASON];
121 122 123
} *chips;

static int nr_chips;
124
static DEFINE_PER_CPU(struct chip *, chip_info);
125

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * Note: The set of pstates consists of contiguous integers, the
 * smallest of which is indicated by powernv_pstate_info.min, the
 * largest of which is indicated by powernv_pstate_info.max.
 *
 * The nominal pstate is the highest non-turbo pstate in this
 * platform. This is indicated by powernv_pstate_info.nominal.
 */
static struct powernv_pstate_info {
	int min;
	int max;
	int nominal;
	int nr_pstates;
} powernv_pstate_info;

141 142 143 144 145 146 147 148 149 150 151
static inline void reset_gpstates(struct cpufreq_policy *policy)
{
	struct global_pstate_info *gpstates = policy->driver_data;

	gpstates->highest_lpstate = 0;
	gpstates->elapsed_time = 0;
	gpstates->last_sampled_time = 0;
	gpstates->last_lpstate = 0;
	gpstates->last_gpstate = 0;
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
/*
 * Initialize the freq table based on data obtained
 * from the firmware passed via device-tree
 */
static int init_powernv_pstates(void)
{
	struct device_node *power_mgt;
	int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
	const __be32 *pstate_ids, *pstate_freqs;
	u32 len_ids, len_freqs;

	power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
	if (!power_mgt) {
		pr_warn("power-mgt node not found\n");
		return -ENODEV;
	}

	if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
		pr_warn("ibm,pstate-min node not found\n");
		return -ENODEV;
	}

	if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
		pr_warn("ibm,pstate-max node not found\n");
		return -ENODEV;
	}

	if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
				 &pstate_nominal)) {
		pr_warn("ibm,pstate-nominal not found\n");
		return -ENODEV;
	}
	pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
		pstate_nominal, pstate_max);

	pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
	if (!pstate_ids) {
		pr_warn("ibm,pstate-ids not found\n");
		return -ENODEV;
	}

	pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
				      &len_freqs);
	if (!pstate_freqs) {
		pr_warn("ibm,pstate-frequencies-mhz not found\n");
		return -ENODEV;
	}

200 201 202 203 204
	if (len_ids != len_freqs) {
		pr_warn("Entries in ibm,pstate-ids and "
			"ibm,pstate-frequencies-mhz does not match\n");
	}

205 206 207 208 209 210 211 212 213 214 215 216 217
	nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
	if (!nr_pstates) {
		pr_warn("No PStates found\n");
		return -ENODEV;
	}

	pr_debug("NR PStates %d\n", nr_pstates);
	for (i = 0; i < nr_pstates; i++) {
		u32 id = be32_to_cpu(pstate_ids[i]);
		u32 freq = be32_to_cpu(pstate_freqs[i]);

		pr_debug("PState id %d freq %d MHz\n", id, freq);
		powernv_freqs[i].frequency = freq * 1000; /* kHz */
218
		powernv_freqs[i].driver_data = id;
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	}
	/* End of list marker entry */
	powernv_freqs[i].frequency = CPUFREQ_TABLE_END;

	powernv_pstate_info.min = pstate_min;
	powernv_pstate_info.max = pstate_max;
	powernv_pstate_info.nominal = pstate_nominal;
	powernv_pstate_info.nr_pstates = nr_pstates;

	return 0;
}

/* Returns the CPU frequency corresponding to the pstate_id. */
static unsigned int pstate_id_to_freq(int pstate_id)
{
	int i;

	i = powernv_pstate_info.max - pstate_id;
237 238 239 240 241 242
	if (i >= powernv_pstate_info.nr_pstates || i < 0) {
		pr_warn("PState id %d outside of PState table, "
			"reporting nominal id %d instead\n",
			pstate_id, powernv_pstate_info.nominal);
		i = powernv_pstate_info.max - powernv_pstate_info.nominal;
	}
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

	return powernv_freqs[i].frequency;
}

/*
 * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
 * the firmware
 */
static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
					char *buf)
{
	return sprintf(buf, "%u\n",
		pstate_id_to_freq(powernv_pstate_info.nominal));
}

struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
	__ATTR_RO(cpuinfo_nominal_freq);

static struct freq_attr *powernv_cpu_freq_attr[] = {
	&cpufreq_freq_attr_scaling_available_freqs,
	&cpufreq_freq_attr_cpuinfo_nominal_freq,
	NULL,
};

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
#define throttle_attr(name, member)					\
static ssize_t name##_show(struct cpufreq_policy *policy, char *buf)	\
{									\
	struct chip *chip = per_cpu(chip_info, policy->cpu);		\
									\
	return sprintf(buf, "%u\n", chip->member);			\
}									\
									\
static struct freq_attr throttle_attr_##name = __ATTR_RO(name)		\

throttle_attr(unthrottle, reason[NO_THROTTLE]);
throttle_attr(powercap, reason[POWERCAP]);
throttle_attr(overtemp, reason[CPU_OVERTEMP]);
throttle_attr(supply_fault, reason[POWER_SUPPLY_FAILURE]);
throttle_attr(overcurrent, reason[OVERCURRENT]);
throttle_attr(occ_reset, reason[OCC_RESET_THROTTLE]);
throttle_attr(turbo_stat, throttle_turbo);
throttle_attr(sub_turbo_stat, throttle_sub_turbo);

static struct attribute *throttle_attrs[] = {
	&throttle_attr_unthrottle.attr,
	&throttle_attr_powercap.attr,
	&throttle_attr_overtemp.attr,
	&throttle_attr_supply_fault.attr,
	&throttle_attr_overcurrent.attr,
	&throttle_attr_occ_reset.attr,
	&throttle_attr_turbo_stat.attr,
	&throttle_attr_sub_turbo_stat.attr,
	NULL,
};

static const struct attribute_group throttle_attr_grp = {
	.name	= "throttle_stats",
	.attrs	= throttle_attrs,
};

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
/* Helper routines */

/* Access helpers to power mgt SPR */

static inline unsigned long get_pmspr(unsigned long sprn)
{
	switch (sprn) {
	case SPRN_PMCR:
		return mfspr(SPRN_PMCR);

	case SPRN_PMICR:
		return mfspr(SPRN_PMICR);

	case SPRN_PMSR:
		return mfspr(SPRN_PMSR);
	}
	BUG();
}

static inline void set_pmspr(unsigned long sprn, unsigned long val)
{
	switch (sprn) {
	case SPRN_PMCR:
		mtspr(SPRN_PMCR, val);
		return;

	case SPRN_PMICR:
		mtspr(SPRN_PMICR, val);
		return;
	}
	BUG();
}

/*
 * Use objects of this type to query/update
 * pstates on a remote CPU via smp_call_function.
 */
struct powernv_smp_call_data {
	unsigned int freq;
	int pstate_id;
343
	int gpstate_id;
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
};

/*
 * powernv_read_cpu_freq: Reads the current frequency on this CPU.
 *
 * Called via smp_call_function.
 *
 * Note: The caller of the smp_call_function should pass an argument of
 * the type 'struct powernv_smp_call_data *' along with this function.
 *
 * The current frequency on this CPU will be returned via
 * ((struct powernv_smp_call_data *)arg)->freq;
 */
static void powernv_read_cpu_freq(void *arg)
{
	unsigned long pmspr_val;
	s8 local_pstate_id;
	struct powernv_smp_call_data *freq_data = arg;

	pmspr_val = get_pmspr(SPRN_PMSR);

	/*
	 * The local pstate id corresponds bits 48..55 in the PMSR.
	 * Note: Watch out for the sign!
	 */
	local_pstate_id = (pmspr_val >> 48) & 0xFF;
	freq_data->pstate_id = local_pstate_id;
	freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);

	pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
		raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
		freq_data->freq);
}

/*
 * powernv_cpufreq_get: Returns the CPU frequency as reported by the
 * firmware for CPU 'cpu'. This value is reported through the sysfs
 * file cpuinfo_cur_freq.
 */
383
static unsigned int powernv_cpufreq_get(unsigned int cpu)
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
{
	struct powernv_smp_call_data freq_data;

	smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
			&freq_data, 1);

	return freq_data.freq;
}

/*
 * set_pstate: Sets the pstate on this CPU.
 *
 * This is called via an smp_call_function.
 *
 * The caller must ensure that freq_data is of the type
 * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
 * on this CPU should be present in freq_data->pstate_id.
 */
402
static void set_pstate(void *data)
403 404
{
	unsigned long val;
405 406 407
	struct powernv_smp_call_data *freq_data = data;
	unsigned long pstate_ul = freq_data->pstate_id;
	unsigned long gpstate_ul = freq_data->gpstate_id;
408 409 410 411 412

	val = get_pmspr(SPRN_PMCR);
	val = val & 0x0000FFFFFFFFFFFFULL;

	pstate_ul = pstate_ul & 0xFF;
413
	gpstate_ul = gpstate_ul & 0xFF;
414 415

	/* Set both global(bits 56..63) and local(bits 48..55) PStates */
416
	val = val | (gpstate_ul << 56) | (pstate_ul << 48);
417 418 419 420 421 422

	pr_debug("Setting cpu %d pmcr to %016lX\n",
			raw_smp_processor_id(), val);
	set_pmspr(SPRN_PMCR, val);
}

423 424 425 426 427 428 429 430 431
/*
 * get_nominal_index: Returns the index corresponding to the nominal
 * pstate in the cpufreq table
 */
static inline unsigned int get_nominal_index(void)
{
	return powernv_pstate_info.max - powernv_pstate_info.nominal;
}

432
static void powernv_cpufreq_throttle_check(void *data)
433
{
434
	struct chip *chip;
435
	unsigned int cpu = smp_processor_id();
436
	unsigned long pmsr;
437
	int pmsr_pmax;
438 439

	pmsr = get_pmspr(SPRN_PMSR);
440
	chip = this_cpu_read(chip_info);
441

442 443 444
	/* Check for Pmax Capping */
	pmsr_pmax = (s8)PMSR_MAX(pmsr);
	if (pmsr_pmax != powernv_pstate_info.max) {
445
		if (chip->throttled)
446
			goto next;
447
		chip->throttled = true;
448
		if (pmsr_pmax < powernv_pstate_info.nominal) {
449
			pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
450
				     cpu, chip->id, pmsr_pmax,
451
				     powernv_pstate_info.nominal);
452 453 454 455
			chip->throttle_sub_turbo++;
		} else {
			chip->throttle_turbo++;
		}
456 457
		trace_powernv_throttle(chip->id,
				      throttle_reason[chip->throttle_reason],
458
				      pmsr_pmax);
459 460 461 462
	} else if (chip->throttled) {
		chip->throttled = false;
		trace_powernv_throttle(chip->id,
				      throttle_reason[chip->throttle_reason],
463
				      pmsr_pmax);
464 465
	}

466
	/* Check if Psafe_mode_active is set in PMSR. */
467
next:
468
	if (pmsr & PMSR_PSAFE_ENABLE) {
469 470 471 472 473 474 475 476 477 478 479 480
		throttled = true;
		pr_info("Pstate set to safe frequency\n");
	}

	/* Check if SPR_EM_DISABLE is set in PMSR */
	if (pmsr & PMSR_SPR_EM_DISABLE) {
		throttled = true;
		pr_info("Frequency Control disabled from OS\n");
	}

	if (throttled) {
		pr_info("PMSR = %16lx\n", pmsr);
481
		pr_warn("CPU Frequency could be throttled\n");
482 483 484
	}
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583
/**
 * calc_global_pstate - Calculate global pstate
 * @elapsed_time:	Elapsed time in milliseconds
 * @local_pstate:	New local pstate
 * @highest_lpstate:	pstate from which its ramping down
 *
 * Finds the appropriate global pstate based on the pstate from which its
 * ramping down and the time elapsed in ramping down. It follows a quadratic
 * equation which ensures that it reaches ramping down to pmin in 5sec.
 */
static inline int calc_global_pstate(unsigned int elapsed_time,
				     int highest_lpstate, int local_pstate)
{
	int pstate_diff;

	/*
	 * Using ramp_down_percent we get the percentage of rampdown
	 * that we are expecting to be dropping. Difference between
	 * highest_lpstate and powernv_pstate_info.min will give a absolute
	 * number of how many pstates we will drop eventually by the end of
	 * 5 seconds, then just scale it get the number pstates to be dropped.
	 */
	pstate_diff =  ((int)ramp_down_percent(elapsed_time) *
			(highest_lpstate - powernv_pstate_info.min)) / 100;

	/* Ensure that global pstate is >= to local pstate */
	if (highest_lpstate - pstate_diff < local_pstate)
		return local_pstate;
	else
		return highest_lpstate - pstate_diff;
}

static inline void  queue_gpstate_timer(struct global_pstate_info *gpstates)
{
	unsigned int timer_interval;

	/*
	 * Setting up timer to fire after GPSTATE_TIMER_INTERVAL ms, But
	 * if it exceeds MAX_RAMP_DOWN_TIME ms for ramp down time.
	 * Set timer such that it fires exactly at MAX_RAMP_DOWN_TIME
	 * seconds of ramp down time.
	 */
	if ((gpstates->elapsed_time + GPSTATE_TIMER_INTERVAL)
	     > MAX_RAMP_DOWN_TIME)
		timer_interval = MAX_RAMP_DOWN_TIME - gpstates->elapsed_time;
	else
		timer_interval = GPSTATE_TIMER_INTERVAL;

	mod_timer_pinned(&gpstates->timer, jiffies +
			msecs_to_jiffies(timer_interval));
}

/**
 * gpstate_timer_handler
 *
 * @data: pointer to cpufreq_policy on which timer was queued
 *
 * This handler brings down the global pstate closer to the local pstate
 * according quadratic equation. Queues a new timer if it is still not equal
 * to local pstate
 */
void gpstate_timer_handler(unsigned long data)
{
	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
	struct global_pstate_info *gpstates = policy->driver_data;
	int gpstate_id;
	unsigned int time_diff = jiffies_to_msecs(jiffies)
					- gpstates->last_sampled_time;
	struct powernv_smp_call_data freq_data;

	if (!spin_trylock(&gpstates->gpstate_lock))
		return;

	gpstates->last_sampled_time += time_diff;
	gpstates->elapsed_time += time_diff;
	freq_data.pstate_id = gpstates->last_lpstate;

	if ((gpstates->last_gpstate == freq_data.pstate_id) ||
	    (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME)) {
		gpstate_id = freq_data.pstate_id;
		reset_gpstates(policy);
		gpstates->highest_lpstate = freq_data.pstate_id;
	} else {
		gpstate_id = calc_global_pstate(gpstates->elapsed_time,
						gpstates->highest_lpstate,
						freq_data.pstate_id);
	}

	/*
	 * If local pstate is equal to global pstate, rampdown is over
	 * So timer is not required to be queued.
	 */
	if (gpstate_id != freq_data.pstate_id)
		queue_gpstate_timer(gpstates);

	freq_data.gpstate_id = gpstate_id;
	gpstates->last_gpstate = freq_data.gpstate_id;
	gpstates->last_lpstate = freq_data.pstate_id;

584 585
	spin_unlock(&gpstates->gpstate_lock);

586 587 588 589
	/* Timer may get migrated to a different cpu on cpu hot unplug */
	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
}

590 591 592 593 594 595 596 597 598
/*
 * powernv_cpufreq_target_index: Sets the frequency corresponding to
 * the cpufreq table entry indexed by new_index on the cpus in the
 * mask policy->cpus
 */
static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
					unsigned int new_index)
{
	struct powernv_smp_call_data freq_data;
599 600
	unsigned int cur_msec, gpstate_id;
	struct global_pstate_info *gpstates = policy->driver_data;
601

602 603 604
	if (unlikely(rebooting) && new_index != get_nominal_index())
		return 0;

605
	if (!throttled)
606
		powernv_cpufreq_throttle_check(NULL);
607

608 609
	cur_msec = jiffies_to_msecs(get_jiffies_64());

610
	spin_lock(&gpstates->gpstate_lock);
611
	freq_data.pstate_id = powernv_freqs[new_index].driver_data;
612

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
	if (!gpstates->last_sampled_time) {
		gpstate_id = freq_data.pstate_id;
		gpstates->highest_lpstate = freq_data.pstate_id;
		goto gpstates_done;
	}

	if (gpstates->last_gpstate > freq_data.pstate_id) {
		gpstates->elapsed_time += cur_msec -
						 gpstates->last_sampled_time;

		/*
		 * If its has been ramping down for more than MAX_RAMP_DOWN_TIME
		 * we should be resetting all global pstate related data. Set it
		 * equal to local pstate to start fresh.
		 */
		if (gpstates->elapsed_time > MAX_RAMP_DOWN_TIME) {
			reset_gpstates(policy);
			gpstates->highest_lpstate = freq_data.pstate_id;
			gpstate_id = freq_data.pstate_id;
		} else {
		/* Elaspsed_time is less than 5 seconds, continue to rampdown */
			gpstate_id = calc_global_pstate(gpstates->elapsed_time,
							gpstates->highest_lpstate,
							freq_data.pstate_id);
		}
	} else {
		reset_gpstates(policy);
		gpstates->highest_lpstate = freq_data.pstate_id;
		gpstate_id = freq_data.pstate_id;
	}

	/*
	 * If local pstate is equal to global pstate, rampdown is over
	 * So timer is not required to be queued.
	 */
	if (gpstate_id != freq_data.pstate_id)
		queue_gpstate_timer(gpstates);
650 651
	else
		del_timer_sync(&gpstates->timer);
652 653 654 655 656 657 658

gpstates_done:
	freq_data.gpstate_id = gpstate_id;
	gpstates->last_sampled_time = cur_msec;
	gpstates->last_gpstate = freq_data.gpstate_id;
	gpstates->last_lpstate = freq_data.pstate_id;

659 660
	spin_unlock(&gpstates->gpstate_lock);

661 662 663 664 665 666 667 668 669 670 671
	/*
	 * Use smp_call_function to send IPI and execute the
	 * mtspr on target CPU.  We could do that without IPI
	 * if current CPU is within policy->cpus (core)
	 */
	smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
	return 0;
}

static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
672
	int base, i, ret;
673
	struct kernfs_node *kn;
674
	struct global_pstate_info *gpstates;
675 676 677 678 679 680

	base = cpu_first_thread_sibling(policy->cpu);

	for (i = 0; i < threads_per_core; i++)
		cpumask_set_cpu(base + i, policy->cpus);

681 682
	kn = kernfs_find_and_get(policy->kobj.sd, throttle_attr_grp.name);
	if (!kn) {
683 684 685 686 687 688 689 690
		int ret;

		ret = sysfs_create_group(&policy->kobj, &throttle_attr_grp);
		if (ret) {
			pr_info("Failed to create throttle stats directory for cpu %d\n",
				policy->cpu);
			return ret;
		}
691 692
	} else {
		kernfs_put(kn);
693
	}
694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721

	gpstates =  kzalloc(sizeof(*gpstates), GFP_KERNEL);
	if (!gpstates)
		return -ENOMEM;

	policy->driver_data = gpstates;

	/* initialize timer */
	init_timer_deferrable(&gpstates->timer);
	gpstates->timer.data = (unsigned long)policy;
	gpstates->timer.function = gpstate_timer_handler;
	gpstates->timer.expires = jiffies +
				msecs_to_jiffies(GPSTATE_TIMER_INTERVAL);
	spin_lock_init(&gpstates->gpstate_lock);
	ret = cpufreq_table_validate_and_show(policy, powernv_freqs);

	if (ret < 0)
		kfree(policy->driver_data);

	return ret;
}

static int powernv_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
	/* timer is deleted in cpufreq_cpu_stop() */
	kfree(policy->driver_data);

	return 0;
722 723
}

724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
				unsigned long action, void *unused)
{
	int cpu;
	struct cpufreq_policy cpu_policy;

	rebooting = true;
	for_each_online_cpu(cpu) {
		cpufreq_get_policy(&cpu_policy, cpu);
		powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
	}

	return NOTIFY_DONE;
}

static struct notifier_block powernv_cpufreq_reboot_nb = {
	.notifier_call = powernv_cpufreq_reboot_notifier,
};

743 744 745
void powernv_cpufreq_work_fn(struct work_struct *work)
{
	struct chip *chip = container_of(work, struct chip, throttle);
746
	unsigned int cpu;
747
	cpumask_t mask;
748

749 750 751
	get_online_cpus();
	cpumask_and(&mask, &chip->mask, cpu_online_mask);
	smp_call_function_any(&mask,
752
			      powernv_cpufreq_throttle_check, NULL, 0);
753 754

	if (!chip->restore)
755
		goto out;
756 757

	chip->restore = false;
758 759
	for_each_cpu(cpu, &mask) {
		int index;
760 761 762
		struct cpufreq_policy policy;

		cpufreq_get_policy(&policy, cpu);
763 764
		index = cpufreq_frequency_table_target(&policy, policy.cur,
					       CPUFREQ_RELATION_C);
765
		powernv_cpufreq_target_index(&policy, index);
766
		cpumask_andnot(&mask, &mask, policy.cpus);
767
	}
768 769
out:
	put_online_cpus();
770 771
}

772 773 774 775 776
static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
				   unsigned long msg_type, void *_msg)
{
	struct opal_msg *msg = _msg;
	struct opal_occ_msg omsg;
777
	int i;
778 779 780 781 782 783 784 785 786

	if (msg_type != OPAL_MSG_OCC)
		return 0;

	omsg.type = be64_to_cpu(msg->params[0]);

	switch (omsg.type) {
	case OCC_RESET:
		occ_reset = true;
787
		pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
788 789 790 791 792 793 794 795 796
		/*
		 * powernv_cpufreq_throttle_check() is called in
		 * target() callback which can detect the throttle state
		 * for governors like ondemand.
		 * But static governors will not call target() often thus
		 * report throttling here.
		 */
		if (!throttled) {
			throttled = true;
797
			pr_warn("CPU frequency is throttled for duration\n");
798
		}
799

800 801
		break;
	case OCC_LOAD:
802
		pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
803 804 805 806 807 808 809 810
		break;
	case OCC_THROTTLE:
		omsg.chip = be64_to_cpu(msg->params[1]);
		omsg.throttle_status = be64_to_cpu(msg->params[2]);

		if (occ_reset) {
			occ_reset = false;
			throttled = false;
811
			pr_info("OCC Active, CPU frequency is no longer throttled\n");
812

813 814
			for (i = 0; i < nr_chips; i++) {
				chips[i].restore = true;
815
				schedule_work(&chips[i].throttle);
816
			}
817

818 819 820
			return 0;
		}

821 822 823 824 825
		for (i = 0; i < nr_chips; i++)
			if (chips[i].id == omsg.chip)
				break;

		if (omsg.throttle_status >= 0 &&
826
		    omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS) {
827
			chips[i].throttle_reason = omsg.throttle_status;
828 829
			chips[i].reason[omsg.throttle_status]++;
		}
830

831 832 833 834
		if (!omsg.throttle_status)
			chips[i].restore = true;

		schedule_work(&chips[i].throttle);
835 836 837 838 839 840 841 842 843 844
	}
	return 0;
}

static struct notifier_block powernv_cpufreq_opal_nb = {
	.notifier_call	= powernv_cpufreq_occ_msg,
	.next		= NULL,
	.priority	= 0,
};

845 846 847
static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
	struct powernv_smp_call_data freq_data;
848
	struct global_pstate_info *gpstates = policy->driver_data;
849 850

	freq_data.pstate_id = powernv_pstate_info.min;
851
	freq_data.gpstate_id = powernv_pstate_info.min;
852
	smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
853
	del_timer_sync(&gpstates->timer);
854 855
}

856 857 858 859
static struct cpufreq_driver powernv_cpufreq_driver = {
	.name		= "powernv-cpufreq",
	.flags		= CPUFREQ_CONST_LOOPS,
	.init		= powernv_cpufreq_cpu_init,
860
	.exit		= powernv_cpufreq_cpu_exit,
861 862 863
	.verify		= cpufreq_generic_frequency_table_verify,
	.target_index	= powernv_cpufreq_target_index,
	.get		= powernv_cpufreq_get,
864
	.stop_cpu	= powernv_cpufreq_stop_cpu,
865 866 867
	.attr		= powernv_cpu_freq_attr,
};

868 869 870 871 872
static int init_chip_info(void)
{
	unsigned int chip[256];
	unsigned int cpu, i;
	unsigned int prev_chip_id = UINT_MAX;
873

874
	for_each_possible_cpu(cpu) {
875 876 877 878 879 880 881 882
		unsigned int id = cpu_to_chip_id(cpu);

		if (prev_chip_id != id) {
			prev_chip_id = id;
			chip[nr_chips++] = id;
		}
	}

883
	chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
884
	if (!chips)
885
		return -ENOMEM;
886 887 888

	for (i = 0; i < nr_chips; i++) {
		chips[i].id = chip[i];
889 890
		cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
		INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
891 892
		for_each_cpu(cpu, &chips[i].mask)
			per_cpu(chip_info, cpu) =  &chips[i];
893 894 895 896 897
	}

	return 0;
}

898 899 900 901 902 903 904 905 906 907 908 909
static inline void clean_chip_info(void)
{
	kfree(chips);
}

static inline void unregister_all_notifiers(void)
{
	opal_message_notifier_unregister(OPAL_MSG_OCC,
					 &powernv_cpufreq_opal_nb);
	unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
}

910 911 912 913
static int __init powernv_cpufreq_init(void)
{
	int rc = 0;

914
	/* Don't probe on pseries (guest) platforms */
915
	if (!firmware_has_feature(FW_FEATURE_OPAL))
916 917
		return -ENODEV;

918 919
	/* Discover pstates from device tree and init */
	rc = init_powernv_pstates();
920 921
	if (rc)
		goto out;
922

923 924 925
	/* Populate chip info */
	rc = init_chip_info();
	if (rc)
926
		goto out;
927

928
	register_reboot_notifier(&powernv_cpufreq_reboot_nb);
929
	opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
930 931 932 933 934 935 936 937 938 939 940

	rc = cpufreq_register_driver(&powernv_cpufreq_driver);
	if (!rc)
		return 0;

	pr_info("Failed to register the cpufreq driver (%d)\n", rc);
	unregister_all_notifiers();
	clean_chip_info();
out:
	pr_info("Platform driver disabled. System does not support PState control\n");
	return rc;
941 942 943 944 945 946
}
module_init(powernv_cpufreq_init);

static void __exit powernv_cpufreq_exit(void)
{
	cpufreq_unregister_driver(&powernv_cpufreq_driver);
947 948
	unregister_all_notifiers();
	clean_chip_info();
949 950 951 952 953
}
module_exit(powernv_cpufreq_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");