arm_big_little.c 17.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * ARM big.LITTLE Platforms CPUFreq support
 *
 * Copyright (C) 2013 ARM Ltd.
 * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
 *
 * Copyright (C) 2013 Linaro.
 * Viresh Kumar <viresh.kumar@linaro.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
 * kind, whether express or implied; without even the implied warranty
 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
26
#include <linux/cpu_cooling.h>
27
#include <linux/export.h>
28
#include <linux/module.h>
29
#include <linux/mutex.h>
30
#include <linux/of_platform.h>
31
#include <linux/pm_opp.h>
32 33 34 35 36 37 38
#include <linux/slab.h>
#include <linux/topology.h>
#include <linux/types.h>

#include "arm_big_little.h"

/* Currently we support only two clusters */
39 40
#define A15_CLUSTER	0
#define A7_CLUSTER	1
41 42
#define MAX_CLUSTERS	2

43
#ifdef CONFIG_BL_SWITCHER
44
#include <asm/bL_switcher.h>
45 46 47
static bool bL_switching_enabled;
#define is_bL_switching_enabled()	bL_switching_enabled
#define set_switching_enabled(x)	(bL_switching_enabled = (x))
48 49
#else
#define is_bL_switching_enabled()	false
50
#define set_switching_enabled(x)	do { } while (0)
51 52 53
#define bL_switch_request(...)		do { } while (0)
#define bL_switcher_put_enabled()	do { } while (0)
#define bL_switcher_get_enabled()	do { } while (0)
54 55 56 57 58
#endif

#define ACTUAL_FREQ(cluster, freq)  ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq)    ((cluster == A7_CLUSTER) ? freq >> 1 : freq)

59
static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
60 61
static struct cpufreq_arm_bL_ops *arm_bL_ops;
static struct clk *clk[MAX_CLUSTERS];
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];

static unsigned int clk_big_min;	/* (Big) clock frequencies */
static unsigned int clk_little_max;	/* Maximum clock frequency (Little) */

static DEFINE_PER_CPU(unsigned int, physical_cluster);
static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);

static struct mutex cluster_lock[MAX_CLUSTERS];

static inline int raw_cpu_to_cluster(int cpu)
{
	return topology_physical_package_id(cpu);
}

static inline int cpu_to_cluster(int cpu)
{
	return is_bL_switching_enabled() ?
		MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
}

static unsigned int find_cluster_maxfreq(int cluster)
{
	int j;
	u32 max_freq = 0, cpu_freq;

	for_each_online_cpu(j) {
		cpu_freq = per_cpu(cpu_last_req_freq, j);

		if ((cluster == per_cpu(physical_cluster, j)) &&
				(max_freq < cpu_freq))
			max_freq = cpu_freq;
	}

	pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
			max_freq);

	return max_freq;
}

static unsigned int clk_get_cpu_rate(unsigned int cpu)
{
	u32 cur_cluster = per_cpu(physical_cluster, cpu);
	u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;

	/* For switcher we use virtual A7 clock rates */
	if (is_bL_switching_enabled())
		rate = VIRT_FREQ(cur_cluster, rate);

	pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
			cur_cluster, rate);

	return rate;
}

static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
{
	if (is_bL_switching_enabled()) {
		pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
					cpu));

		return per_cpu(cpu_last_req_freq, cpu);
	} else {
		return clk_get_cpu_rate(cpu);
	}
}
129

130 131
static unsigned int
bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
132
{
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	u32 new_rate, prev_rate;
	int ret;
	bool bLs = is_bL_switching_enabled();

	mutex_lock(&cluster_lock[new_cluster]);

	if (bLs) {
		prev_rate = per_cpu(cpu_last_req_freq, cpu);
		per_cpu(cpu_last_req_freq, cpu) = rate;
		per_cpu(physical_cluster, cpu) = new_cluster;

		new_rate = find_cluster_maxfreq(new_cluster);
		new_rate = ACTUAL_FREQ(new_cluster, new_rate);
	} else {
		new_rate = rate;
	}

	pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
			__func__, cpu, old_cluster, new_cluster, new_rate);

	ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
154 155 156 157 158 159 160 161 162 163 164 165 166
	if (!ret) {
		/*
		 * FIXME: clk_set_rate hasn't returned an error here however it
		 * may be that clk_change_rate failed due to hardware or
		 * firmware issues and wasn't able to report that due to the
		 * current design of the clk core layer. To work around this
		 * problem we will read back the clock rate and check it is
		 * correct. This needs to be removed once clk core is fixed.
		 */
		if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
			ret = -EIO;
	}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	if (WARN_ON(ret)) {
		pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
				new_cluster);
		if (bLs) {
			per_cpu(cpu_last_req_freq, cpu) = prev_rate;
			per_cpu(physical_cluster, cpu) = old_cluster;
		}

		mutex_unlock(&cluster_lock[new_cluster]);

		return ret;
	}

	mutex_unlock(&cluster_lock[new_cluster]);

	/* Recalc freq for old cluster when switching clusters */
	if (old_cluster != new_cluster) {
		pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
				__func__, cpu, old_cluster, new_cluster);

		/* Switch cluster */
		bL_switch_request(cpu, new_cluster);

		mutex_lock(&cluster_lock[old_cluster]);
191

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
		/* Set freq of old cluster if there are cpus left on it */
		new_rate = find_cluster_maxfreq(old_cluster);
		new_rate = ACTUAL_FREQ(old_cluster, new_rate);

		if (new_rate) {
			pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
					__func__, old_cluster, new_rate);

			if (clk_set_rate(clk[old_cluster], new_rate * 1000))
				pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
						__func__, ret, old_cluster);
		}
		mutex_unlock(&cluster_lock[old_cluster]);
	}

	return 0;
208 209 210 211
}

/* Set clock frequency */
static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
212
		unsigned int index)
213
{
214
	u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
215
	unsigned int freqs_new;
216

217 218
	cur_cluster = cpu_to_cluster(cpu);
	new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
219

220
	freqs_new = freq_table[cur_cluster][index].frequency;
221

222 223
	if (is_bL_switching_enabled()) {
		if ((actual_cluster == A15_CLUSTER) &&
224
				(freqs_new < clk_big_min)) {
225 226
			new_cluster = A7_CLUSTER;
		} else if ((actual_cluster == A7_CLUSTER) &&
227
				(freqs_new > clk_little_max)) {
228 229 230 231
			new_cluster = A15_CLUSTER;
		}
	}

232
	return bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs_new);
233 234
}

235 236 237 238 239 240 241 242 243 244 245 246 247
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
{
	int count;

	for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
		;

	return count;
}

/* get the minimum frequency in the cpufreq_frequency_table */
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
{
248
	struct cpufreq_frequency_table *pos;
249
	uint32_t min_freq = ~0;
250 251 252
	cpufreq_for_each_entry(pos, table)
		if (pos->frequency < min_freq)
			min_freq = pos->frequency;
253 254 255 256 257 258
	return min_freq;
}

/* get the maximum frequency in the cpufreq_frequency_table */
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
{
259
	struct cpufreq_frequency_table *pos;
260
	uint32_t max_freq = 0;
261 262 263
	cpufreq_for_each_entry(pos, table)
		if (pos->frequency > max_freq)
			max_freq = pos->frequency;
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	return max_freq;
}

static int merge_cluster_tables(void)
{
	int i, j, k = 0, count = 1;
	struct cpufreq_frequency_table *table;

	for (i = 0; i < MAX_CLUSTERS; i++)
		count += get_table_count(freq_table[i]);

	table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
	if (!table)
		return -ENOMEM;

	freq_table[MAX_CLUSTERS] = table;

	/* Add in reverse order to get freqs in increasing order */
	for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
		for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
				j++) {
			table[k].frequency = VIRT_FREQ(i,
					freq_table[i][j].frequency);
			pr_debug("%s: index: %d, freq: %d\n", __func__, k,
					table[k].frequency);
			k++;
		}
	}

	table[k].driver_data = k;
	table[k].frequency = CPUFREQ_TABLE_END;

	pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);

	return 0;
}

301 302
static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
					    const struct cpumask *cpumask)
303 304 305 306 307 308 309 310
{
	u32 cluster = raw_cpu_to_cluster(cpu_dev->id);

	if (!freq_table[cluster])
		return;

	clk_put(clk[cluster]);
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
311
	if (arm_bL_ops->free_opp_table)
312
		arm_bL_ops->free_opp_table(cpumask);
313 314 315
	dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}

316 317
static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
					   const struct cpumask *cpumask)
318 319
{
	u32 cluster = cpu_to_cluster(cpu_dev->id);
320 321 322 323 324 325
	int i;

	if (atomic_dec_return(&cluster_usage[cluster]))
		return;

	if (cluster < MAX_CLUSTERS)
326
		return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
327

328 329 330 331 332 333 334
	for_each_present_cpu(i) {
		struct device *cdev = get_cpu_device(i);
		if (!cdev) {
			pr_err("%s: failed to get cpu%d device\n", __func__, i);
			return;
		}

335
		_put_cluster_clk_and_freq_table(cdev, cpumask);
336
	}
337 338 339

	/* free virtual table */
	kfree(freq_table[cluster]);
340 341
}

342 343
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
					   const struct cpumask *cpumask)
344
{
345
	u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
346 347
	int ret;

348
	if (freq_table[cluster])
349 350
		return 0;

351
	ret = arm_bL_ops->init_opp_table(cpumask);
352 353 354
	if (ret) {
		dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
				__func__, cpu_dev->id, ret);
355
		goto out;
356 357
	}

358
	ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
359 360 361
	if (ret) {
		dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
				__func__, cpu_dev->id, ret);
362
		goto free_opp_table;
363 364
	}

365
	clk[cluster] = clk_get(cpu_dev, NULL);
366 367 368 369 370 371 372 373 374 375
	if (!IS_ERR(clk[cluster])) {
		dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
				__func__, clk[cluster], freq_table[cluster],
				cluster);
		return 0;
	}

	dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
			__func__, cpu_dev->id, cluster);
	ret = PTR_ERR(clk[cluster]);
376
	dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
377

378 379
free_opp_table:
	if (arm_bL_ops->free_opp_table)
380
		arm_bL_ops->free_opp_table(cpumask);
381
out:
382 383 384 385 386
	dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
			cluster);
	return ret;
}

387 388
static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
					  const struct cpumask *cpumask)
389 390 391 392 393 394 395 396
{
	u32 cluster = cpu_to_cluster(cpu_dev->id);
	int i, ret;

	if (atomic_inc_return(&cluster_usage[cluster]) != 1)
		return 0;

	if (cluster < MAX_CLUSTERS) {
397
		ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
		if (ret)
			atomic_dec(&cluster_usage[cluster]);
		return ret;
	}

	/*
	 * Get data for all clusters and fill virtual cluster with a merge of
	 * both
	 */
	for_each_present_cpu(i) {
		struct device *cdev = get_cpu_device(i);
		if (!cdev) {
			pr_err("%s: failed to get cpu%d device\n", __func__, i);
			return -ENODEV;
		}

414
		ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
		if (ret)
			goto put_clusters;
	}

	ret = merge_cluster_tables();
	if (ret)
		goto put_clusters;

	/* Assuming 2 cluster, set clk_big_min and clk_little_max */
	clk_big_min = get_table_min(freq_table[0]);
	clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));

	pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
			__func__, cluster, clk_big_min, clk_little_max);

	return 0;

put_clusters:
	for_each_present_cpu(i) {
		struct device *cdev = get_cpu_device(i);
		if (!cdev) {
			pr_err("%s: failed to get cpu%d device\n", __func__, i);
			return -ENODEV;
		}

440
		_put_cluster_clk_and_freq_table(cdev, cpumask);
441 442 443 444 445 446 447
	}

	atomic_dec(&cluster_usage[cluster]);

	return ret;
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461
/* Per-CPU initialization */
static int bL_cpufreq_init(struct cpufreq_policy *policy)
{
	u32 cur_cluster = cpu_to_cluster(policy->cpu);
	struct device *cpu_dev;
	int ret;

	cpu_dev = get_cpu_device(policy->cpu);
	if (!cpu_dev) {
		pr_err("%s: failed to get cpu%d device\n", __func__,
				policy->cpu);
		return -ENODEV;
	}

462
	if (cur_cluster < MAX_CLUSTERS) {
463 464
		int cpu;

465 466
		cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));

467 468
		for_each_cpu(cpu, policy->cpus)
			per_cpu(physical_cluster, cpu) = cur_cluster;
469 470 471 472 473
	} else {
		/* Assumption: during init, we are always running on A15 */
		per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
	}

474 475 476 477 478 479 480 481 482 483 484 485
	ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
	if (ret)
		return ret;

	ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
	if (ret) {
		dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
			policy->cpu, cur_cluster);
		put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
		return ret;
	}

486 487 488 489 490 491
	if (arm_bL_ops->get_transition_latency)
		policy->cpuinfo.transition_latency =
			arm_bL_ops->get_transition_latency(cpu_dev);
	else
		policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;

492 493
	if (is_bL_switching_enabled())
		per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
494

495
	dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
496 497 498 499 500 501
	return 0;
}

static int bL_cpufreq_exit(struct cpufreq_policy *policy)
{
	struct device *cpu_dev;
502 503 504 505 506 507
	int cur_cluster = cpu_to_cluster(policy->cpu);

	if (cur_cluster < MAX_CLUSTERS) {
		cpufreq_cooling_unregister(cdev[cur_cluster]);
		cdev[cur_cluster] = NULL;
	}
508 509 510 511 512 513 514 515

	cpu_dev = get_cpu_device(policy->cpu);
	if (!cpu_dev) {
		pr_err("%s: failed to get cpu%d device\n", __func__,
				policy->cpu);
		return -ENODEV;
	}

516
	put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
517 518 519 520 521
	dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);

	return 0;
}

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
static void bL_cpufreq_ready(struct cpufreq_policy *policy)
{
	struct device *cpu_dev = get_cpu_device(policy->cpu);
	int cur_cluster = cpu_to_cluster(policy->cpu);
	struct device_node *np;

	/* Do not register a cpu_cooling device if we are in IKS mode */
	if (cur_cluster >= MAX_CLUSTERS)
		return;

	np = of_node_get(cpu_dev->of_node);
	if (WARN_ON(!np))
		return;

	if (of_find_property(np, "#cooling-cells", NULL)) {
		u32 power_coefficient = 0;

		of_property_read_u32(np, "dynamic-power-coefficient",
				     &power_coefficient);

		cdev[cur_cluster] = of_cpufreq_power_cooling_register(np,
				policy->related_cpus, power_coefficient, NULL);
		if (IS_ERR(cdev[cur_cluster])) {
			dev_err(cpu_dev,
				"running cpufreq without cooling device: %ld\n",
				PTR_ERR(cdev[cur_cluster]));
			cdev[cur_cluster] = NULL;
		}
	}
	of_node_put(np);
}

554 555
static struct cpufreq_driver bL_cpufreq_driver = {
	.name			= "arm-big-little",
556
	.flags			= CPUFREQ_STICKY |
557 558
					CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
					CPUFREQ_NEED_INITIAL_FREQ_CHECK,
559
	.verify			= cpufreq_generic_frequency_table_verify,
560
	.target_index		= bL_cpufreq_set_target,
561
	.get			= bL_cpufreq_get_rate,
562 563
	.init			= bL_cpufreq_init,
	.exit			= bL_cpufreq_exit,
564
	.ready			= bL_cpufreq_ready,
565
	.attr			= cpufreq_generic_attr,
566 567
};

568
#ifdef CONFIG_BL_SWITCHER
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
static int bL_cpufreq_switcher_notifier(struct notifier_block *nfb,
					unsigned long action, void *_arg)
{
	pr_debug("%s: action: %ld\n", __func__, action);

	switch (action) {
	case BL_NOTIFY_PRE_ENABLE:
	case BL_NOTIFY_PRE_DISABLE:
		cpufreq_unregister_driver(&bL_cpufreq_driver);
		break;

	case BL_NOTIFY_POST_ENABLE:
		set_switching_enabled(true);
		cpufreq_register_driver(&bL_cpufreq_driver);
		break;

	case BL_NOTIFY_POST_DISABLE:
		set_switching_enabled(false);
		cpufreq_register_driver(&bL_cpufreq_driver);
		break;

	default:
		return NOTIFY_DONE;
	}

	return NOTIFY_OK;
}

static struct notifier_block bL_switcher_notifier = {
	.notifier_call = bL_cpufreq_switcher_notifier,
};

601 602 603 604 605 606 607 608 609 610 611 612 613 614
static int __bLs_register_notifier(void)
{
	return bL_switcher_register_notifier(&bL_switcher_notifier);
}

static int __bLs_unregister_notifier(void)
{
	return bL_switcher_unregister_notifier(&bL_switcher_notifier);
}
#else
static int __bLs_register_notifier(void) { return 0; }
static int __bLs_unregister_notifier(void) { return 0; }
#endif

615 616
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
{
617
	int ret, i;
618 619 620 621 622 623 624 625 626 627 628 629 630 631

	if (arm_bL_ops) {
		pr_debug("%s: Already registered: %s, exiting\n", __func__,
				arm_bL_ops->name);
		return -EBUSY;
	}

	if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
		pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
		return -ENODEV;
	}

	arm_bL_ops = ops;

632
	set_switching_enabled(bL_switcher_get_enabled());
633

634 635 636
	for (i = 0; i < MAX_CLUSTERS; i++)
		mutex_init(&cluster_lock[i]);

637 638 639 640 641 642
	ret = cpufreq_register_driver(&bL_cpufreq_driver);
	if (ret) {
		pr_info("%s: Failed registering platform driver: %s, err: %d\n",
				__func__, ops->name, ret);
		arm_bL_ops = NULL;
	} else {
643
		ret = __bLs_register_notifier();
644 645 646 647 648 649 650
		if (ret) {
			cpufreq_unregister_driver(&bL_cpufreq_driver);
			arm_bL_ops = NULL;
		} else {
			pr_info("%s: Registered platform driver: %s\n",
					__func__, ops->name);
		}
651 652
	}

653
	bL_switcher_put_enabled();
654 655 656 657 658 659 660 661 662 663 664 665
	return ret;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_register);

void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
{
	if (arm_bL_ops != ops) {
		pr_err("%s: Registered with: %s, can't unregister, exiting\n",
				__func__, arm_bL_ops->name);
		return;
	}

666
	bL_switcher_get_enabled();
667
	__bLs_unregister_notifier();
668
	cpufreq_unregister_driver(&bL_cpufreq_driver);
669
	bL_switcher_put_enabled();
670 671 672 673 674
	pr_info("%s: Un-registered platform driver: %s\n", __func__,
			arm_bL_ops->name);
	arm_bL_ops = NULL;
}
EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);
675 676 677 678

MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver");
MODULE_LICENSE("GPL v2");