cpuidle-pseries.c 6.1 KB
Newer Older
1
/*
2
 *  cpuidle-pseries - idle state cpuidle driver.
3 4 5 6 7 8 9 10 11 12 13
 *  Adapted from drivers/idle/intel_idle.c and
 *  drivers/acpi/processor_idle.c
 *
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
14
#include <linux/notifier.h>
15 16 17 18 19

#include <asm/paca.h>
#include <asm/reg.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
20
#include <asm/plpar_wrappers.h>
21 22

struct cpuidle_driver pseries_idle_driver = {
23 24
	.name             = "pseries_idle",
	.owner            = THIS_MODULE,
25 26 27 28 29 30 31
};

#define MAX_IDLE_STATE_COUNT	2

static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
static struct cpuidle_state *cpuidle_state_table;

32
static inline void idle_loop_prolog(unsigned long *in_purr)
33 34 35 36 37 38 39 40 41
{
	*in_purr = mfspr(SPRN_PURR);
	/*
	 * Indicate to the HV that we are idle. Now would be
	 * a good time to find other work to dispatch.
	 */
	get_lppaca()->idle = 1;
}

42
static inline void idle_loop_epilog(unsigned long in_purr)
43
{
44 45 46 47 48
	u64 wait_cycles;

	wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
	wait_cycles += mfspr(SPRN_PURR) - in_purr;
	get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
49 50 51 52 53 54 55 56 57
	get_lppaca()->idle = 0;
}

static int snooze_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;

58
	idle_loop_prolog(&in_purr);
59 60
	local_irq_enable();
	set_thread_flag(TIF_POLLING_NRFLAG);
61

62
	while (!need_resched()) {
63 64
		HMT_low();
		HMT_very_low();
65 66 67
	}

	HMT_medium();
68 69 70
	clear_thread_flag(TIF_POLLING_NRFLAG);
	smp_mb();

71 72
	idle_loop_epilog(in_purr);

73 74 75
	return index;
}

76 77 78
static void check_and_cede_processor(void)
{
	/*
79 80 81
	 * Ensure our interrupt state is properly tracked,
	 * also checks if no interrupt has occurred while we
	 * were soft-disabled
82
	 */
83
	if (prep_irq_for_idle()) {
84
		cede_processor();
85 86 87 88 89 90
#ifdef CONFIG_TRACE_IRQFLAGS
		/* Ensure that H_CEDE returns with IRQs on */
		if (WARN_ON(!(mfmsr() & MSR_EE)))
			__hard_irq_enable();
#endif
	}
91 92
}

93 94 95 96 97 98
static int dedicated_cede_loop(struct cpuidle_device *dev,
				struct cpuidle_driver *drv,
				int index)
{
	unsigned long in_purr;

99
	idle_loop_prolog(&in_purr);
100 101 102
	get_lppaca()->donate_dedicated_cpu = 1;

	HMT_medium();
103
	check_and_cede_processor();
104 105

	get_lppaca()->donate_dedicated_cpu = 0;
106 107 108

	idle_loop_epilog(in_purr);

109 110 111 112 113 114 115 116 117
	return index;
}

static int shared_cede_loop(struct cpuidle_device *dev,
			struct cpuidle_driver *drv,
			int index)
{
	unsigned long in_purr;

118
	idle_loop_prolog(&in_purr);
119 120 121 122 123 124 125 126

	/*
	 * Yield the processor to the hypervisor.  We return if
	 * an external interrupt occurs (which are driven prior
	 * to returning here) or if a prod occurs from another
	 * processor. When returning here, external interrupts
	 * are enabled.
	 */
127
	check_and_cede_processor();
128

129 130
	idle_loop_epilog(in_purr);

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
	return index;
}

/*
 * States for dedicated partition case.
 */
static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = {
	{ /* Snooze */
		.name = "snooze",
		.desc = "snooze",
		.flags = CPUIDLE_FLAG_TIME_VALID,
		.exit_latency = 0,
		.target_residency = 0,
		.enter = &snooze_loop },
	{ /* CEDE */
		.name = "CEDE",
		.desc = "CEDE",
		.flags = CPUIDLE_FLAG_TIME_VALID,
149 150
		.exit_latency = 10,
		.target_residency = 100,
151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
		.enter = &dedicated_cede_loop },
};

/*
 * States for shared partition case.
 */
static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
	{ /* Shared Cede */
		.name = "Shared Cede",
		.desc = "Shared Cede",
		.flags = CPUIDLE_FLAG_TIME_VALID,
		.exit_latency = 0,
		.target_residency = 0,
		.enter = &shared_cede_loop },
};

167 168 169 170 171 172 173 174 175 176 177 178 179 180
void update_smt_snooze_delay(int cpu, int residency)
{
	struct cpuidle_driver *drv = cpuidle_get_driver();
	struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu);

	if (cpuidle_state_table != dedicated_states)
		return;

	if (residency < 0) {
		/* Disable the Nap state on that cpu */
		if (dev)
			dev->states_usage[1].disable = 1;
	} else
		if (drv)
181
			drv->states[1].target_residency = residency;
182 183
}

184 185
static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
			unsigned long action, void *hcpu)
186
{
187
	int hotcpu = (unsigned long)hcpu;
188
	struct cpuidle_device *dev =
189
				per_cpu(cpuidle_devices, hotcpu);
190

191 192 193 194 195
	if (dev && cpuidle_get_driver()) {
		switch (action) {
		case CPU_ONLINE:
		case CPU_ONLINE_FROZEN:
			cpuidle_pause_and_lock();
196
			cpuidle_enable_device(dev);
197 198 199 200 201 202 203 204 205 206 207 208
			cpuidle_resume_and_unlock();
			break;

		case CPU_DEAD:
		case CPU_DEAD_FROZEN:
			cpuidle_pause_and_lock();
			cpuidle_disable_device(dev);
			cpuidle_resume_and_unlock();
			break;

		default:
			return NOTIFY_DONE;
209
		}
210
	}
211
	return NOTIFY_OK;
212 213
}

214 215 216 217
static struct notifier_block setup_hotplug_notifier = {
	.notifier_call = pseries_cpuidle_add_cpu_notifier,
};

218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
/*
 * pseries_cpuidle_driver_init()
 */
static int pseries_cpuidle_driver_init(void)
{
	int idle_state;
	struct cpuidle_driver *drv = &pseries_idle_driver;

	drv->state_count = 0;

	for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) {

		if (idle_state > max_idle_state)
			break;

		/* is the state not enabled? */
		if (cpuidle_state_table[idle_state].enter == NULL)
			continue;

		drv->states[drv->state_count] =	/* structure copy */
			cpuidle_state_table[idle_state];

		drv->state_count += 1;
	}

	return 0;
}

/*
 * pseries_idle_probe()
 * Choose state table for shared versus dedicated partition
 */
static int pseries_idle_probe(void)
{

253 254 255
	if (cpuidle_disable != IDLE_NO_OVERRIDE)
		return -ENODEV;

256 257 258 259 260
	if (max_idle_state == 0) {
		printk(KERN_DEBUG "pseries processor idle disabled.\n");
		return -EPERM;
	}

261 262 263 264 265 266 267
	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
		if (lppaca_shared_proc(get_lppaca()))
			cpuidle_state_table = shared_states;
		else
			cpuidle_state_table = dedicated_states;
	} else
		return -ENODEV;
268 269 270 271 272 273 274 275 276 277 278 279 280

	return 0;
}

static int __init pseries_processor_idle_init(void)
{
	int retval;

	retval = pseries_idle_probe();
	if (retval)
		return retval;

	pseries_cpuidle_driver_init();
281
	retval = cpuidle_register(&pseries_idle_driver, NULL);
282 283 284 285 286
	if (retval) {
		printk(KERN_DEBUG "Registration of pseries driver failed.\n");
		return retval;
	}

287
	register_cpu_notifier(&setup_hotplug_notifier);
288 289 290 291
	printk(KERN_DEBUG "pseries_idle_driver registered\n");
	return 0;
}

292
device_initcall(pseries_processor_idle_init);