cpufreq_spudemand.c 4.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
/*
 * spu aware cpufreq governor for the cell processor
 *
 * © Copyright IBM Corporation 2006-2008
 *
 * Author: Christian Krafft <krafft@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/cpufreq.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <asm/machdep.h>
#include <asm/spu.h>

#define POLL_TIME	100000		/* in µs */
#define EXP		753		/* exp(-1) in fixed-point */

struct spu_gov_info_struct {
	unsigned long busy_spus;	/* fixed-point */
	struct cpufreq_policy *policy;
	struct delayed_work work;
	unsigned int poll_int;		/* µs */
};
static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);

static struct workqueue_struct *kspugov_wq;

static int calc_freq(struct spu_gov_info_struct *info)
{
	int cpu;
	int busy_spus;

	cpu = info->policy->cpu;
	busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);

	CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
	pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
			cpu, busy_spus, info->busy_spus);

	return info->policy->max * info->busy_spus / FIXED_1;
}

static void spu_gov_work(struct work_struct *work)
{
	struct spu_gov_info_struct *info;
	int delay;
	unsigned long target_freq;

	info = container_of(work, struct spu_gov_info_struct, work.work);

	/* after cancel_delayed_work_sync we unset info->policy */
	BUG_ON(info->policy == NULL);

	target_freq = calc_freq(info);
	__cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);

	delay = usecs_to_jiffies(info->poll_int);
	queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
}

static void spu_gov_init_work(struct spu_gov_info_struct *info)
{
	int delay = usecs_to_jiffies(info->poll_int);
	INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
	queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
}

static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
{
	cancel_delayed_work_sync(&info->work);
}

static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
{
	unsigned int cpu = policy->cpu;
	struct spu_gov_info_struct *info, *affected_info;
	int i;
	int ret = 0;

	info = &per_cpu(spu_gov_info, cpu);

	switch (event) {
	case CPUFREQ_GOV_START:
		if (!cpu_online(cpu)) {
			printk(KERN_ERR "cpu %d is not online\n", cpu);
			ret = -EINVAL;
			break;
		}

		if (!policy->cur) {
			printk(KERN_ERR "no cpu specified in policy\n");
			ret = -EINVAL;
			break;
		}

		/* initialize spu_gov_info for all affected cpus */
113
		for_each_cpu(i, policy->cpus) {
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
			affected_info = &per_cpu(spu_gov_info, i);
			affected_info->policy = policy;
		}

		info->poll_int = POLL_TIME;

		/* setup timer */
		spu_gov_init_work(info);

		break;

	case CPUFREQ_GOV_STOP:
		/* cancel timer */
		spu_gov_cancel_work(info);

		/* clean spu_gov_info for all affected cpus */
130
		for_each_cpu (i, policy->cpus) {
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
			info = &per_cpu(spu_gov_info, i);
			info->policy = NULL;
		}

		break;
	}

	return ret;
}

static struct cpufreq_governor spu_governor = {
	.name = "spudemand",
	.governor = spu_gov_govern,
	.owner = THIS_MODULE,
};

/*
 * module init and destoy
 */

static int __init spu_gov_init(void)
{
	int ret;

	kspugov_wq = create_workqueue("kspugov");
	if (!kspugov_wq) {
		printk(KERN_ERR "creation of kspugov failed\n");
		ret = -EFAULT;
		goto out;
	}

	ret = cpufreq_register_governor(&spu_governor);
	if (ret) {
		printk(KERN_ERR "registration of governor failed\n");
		destroy_workqueue(kspugov_wq);
		goto out;
	}
out:
	return ret;
}

static void __exit spu_gov_exit(void)
{
	cpufreq_unregister_governor(&spu_governor);
	destroy_workqueue(kspugov_wq);
}


module_init(spu_gov_init);
module_exit(spu_gov_exit);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");