therm_throt.c 7.8 KB
Newer Older
1
/*
2 3
 * Thermal throttle event support code (such as syslog messaging and rate
 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c).
I
Ingo Molnar 已提交
4
 *
5 6 7 8 9
 * This allows consistent reporting of CPU thermal throttle events.
 *
 * Maintains a counter in /sys that keeps track of the number of thermal
 * events, such that the user knows how bad the thermal problem might be
 * (since the logging to syslog and mcelog is rate limited).
10 11 12 13
 *
 * Author: Dmitriy Zavin (dmitriyz@google.com)
 *
 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c.
14
 *          Inspired by Ross Biro's and Al Borchers' counter code.
15
 */
16
#include <linux/interrupt.h>
I
Ingo Molnar 已提交
17 18
#include <linux/notifier.h>
#include <linux/jiffies.h>
19
#include <linux/kernel.h>
20
#include <linux/percpu.h>
21
#include <linux/sysdev.h>
22 23 24
#include <linux/types.h>
#include <linux/init.h>
#include <linux/smp.h>
25
#include <linux/cpu.h>
I
Ingo Molnar 已提交
26

27 28 29
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/apic.h>
30 31
#include <asm/idle.h>
#include <asm/mce.h>
32
#include <asm/msr.h>
33 34

/* How long to wait between reporting thermal events */
I
Ingo Molnar 已提交
35
#define CHECK_INTERVAL		(300 * HZ)
36

37 38
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
I
Ingo Molnar 已提交
39

H
Hidetoshi Seto 已提交
40
static atomic_t therm_throt_en		= ATOMIC_INIT(0);
41 42

#ifdef CONFIG_SYSFS
I
Ingo Molnar 已提交
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
#define define_therm_throt_sysdev_one_ro(_name)				\
	static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)

#define define_therm_throt_sysdev_show_func(name)			\
static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev,	\
					struct sysdev_attribute *attr,	\
					      char *buf)		\
{									\
	unsigned int cpu = dev->id;					\
	ssize_t ret;							\
									\
	preempt_disable();	/* CPU hotplug */			\
	if (cpu_online(cpu))						\
		ret = sprintf(buf, "%lu\n",				\
			      per_cpu(thermal_throttle_##name, cpu));	\
	else								\
		ret = 0;						\
	preempt_enable();						\
									\
	return ret;							\
63 64 65 66 67 68 69 70 71 72 73
}

define_therm_throt_sysdev_show_func(count);
define_therm_throt_sysdev_one_ro(count);

static struct attribute *thermal_throttle_attrs[] = {
	&attr_count.attr,
	NULL
};

static struct attribute_group thermal_throttle_attr_group = {
I
Ingo Molnar 已提交
74 75
	.attrs	= thermal_throttle_attrs,
	.name	= "thermal_throttle"
76 77
};
#endif /* CONFIG_SYSFS */
78 79

/***
80
 * therm_throt_process - Process thermal throttling event from interrupt
81 82 83 84
 * @curr: Whether the condition is current or not (boolean), since the
 *        thermal interrupt normally gets called both when the thermal
 *        event begins and once the event has ended.
 *
85
 * This function is called by the thermal interrupt after the
86 87 88 89 90 91 92 93 94
 * IRQ has been acknowledged.
 *
 * It will take care of rate limiting and printing messages to the syslog.
 *
 * Returns: 0 : Event should NOT be further logged, i.e. still in
 *              "timeout" from previous log message.
 *          1 : Event should be logged further, and a message has been
 *              printed to the syslog.
 */
H
Hidetoshi Seto 已提交
95
static int therm_throt_process(int curr)
96 97
{
	unsigned int cpu = smp_processor_id();
98
	__u64 tmp_jiffs = get_jiffies_64();
99

100 101 102
	if (curr)
		__get_cpu_var(thermal_throttle_count)++;

103
	if (time_before64(tmp_jiffs, __get_cpu_var(next_check)))
104 105
		return 0;

106
	__get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
107 108 109 110

	/* if we just entered the thermal event */
	if (curr) {
		printk(KERN_CRIT "CPU%d: Temperature above threshold, "
111 112 113
		       "cpu clock throttled (total events = %lu)\n", cpu,
		       __get_cpu_var(thermal_throttle_count));

114 115 116 117 118 119 120
		add_taint(TAINT_MACHINE_CHECK);
	} else {
		printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
	}

	return 1;
}
121 122

#ifdef CONFIG_SYSFS
I
Ingo Molnar 已提交
123
/* Add/Remove thermal_throttle interface for CPU device: */
124
static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
125
{
I
Ingo Molnar 已提交
126 127
	return sysfs_create_group(&sys_dev->kobj,
				  &thermal_throttle_attr_group);
128 129
}

130
static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
131
{
132
	sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
133 134
}

I
Ingo Molnar 已提交
135
/* Mutex protecting device creation against CPU hotplug: */
136 137 138
static DEFINE_MUTEX(therm_cpu_lock);

/* Get notified when a cpu comes on/off. Be hotplug friendly. */
I
Ingo Molnar 已提交
139 140 141 142
static __cpuinit int
thermal_throttle_cpu_callback(struct notifier_block *nfb,
			      unsigned long action,
			      void *hcpu)
143 144 145
{
	unsigned int cpu = (unsigned long)hcpu;
	struct sys_device *sys_dev;
146
	int err = 0;
147 148

	sys_dev = get_cpu_sysdev(cpu);
I
Ingo Molnar 已提交
149

150
	switch (action) {
151 152
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
153
		mutex_lock(&therm_cpu_lock);
154
		err = thermal_throttle_add_dev(sys_dev);
155
		mutex_unlock(&therm_cpu_lock);
156
		WARN_ON(err);
157
		break;
158 159
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:
160
	case CPU_DEAD:
161
	case CPU_DEAD_FROZEN:
162
		mutex_lock(&therm_cpu_lock);
163
		thermal_throttle_remove_dev(sys_dev);
164
		mutex_unlock(&therm_cpu_lock);
165 166
		break;
	}
167
	return err ? NOTIFY_BAD : NOTIFY_OK;
168 169
}

S
Satyam Sharma 已提交
170
static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
171 172 173 174 175 176 177
{
	.notifier_call = thermal_throttle_cpu_callback,
};

static __init int thermal_throttle_init_device(void)
{
	unsigned int cpu = 0;
178
	int err;
179 180 181 182 183 184 185 186 187 188

	if (!atomic_read(&therm_throt_en))
		return 0;

	register_hotcpu_notifier(&thermal_throttle_cpu_notifier);

#ifdef CONFIG_HOTPLUG_CPU
	mutex_lock(&therm_cpu_lock);
#endif
	/* connect live CPUs to sysfs */
189 190 191 192
	for_each_online_cpu(cpu) {
		err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
		WARN_ON(err);
	}
193 194 195 196 197 198 199
#ifdef CONFIG_HOTPLUG_CPU
	mutex_unlock(&therm_cpu_lock);
#endif

	return 0;
}
device_initcall(thermal_throttle_init_device);
200

201
#endif /* CONFIG_SYSFS */
202 203

/* Thermal transition interrupt handler */
204
static void intel_thermal_interrupt(void)
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
{
	__u64 msr_val;

	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
	if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
		mce_log_therm_throt_event(msr_val);
}

static void unexpected_thermal_interrupt(void)
{
	printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
			smp_processor_id());
	add_taint(TAINT_MACHINE_CHECK);
}

static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;

asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
{
	exit_idle();
	irq_enter();
	inc_irq_stat(irq_thermal_count);
	smp_thermal_vector();
	irq_exit();
	/* Ack only at the end to avoid potential reentry */
	ack_APIC_irq();
}

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
void intel_init_thermal(struct cpuinfo_x86 *c)
{
	unsigned int cpu = smp_processor_id();
	int tm2 = 0;
	u32 l, h;

	/* Thermal monitoring depends on ACPI and clock modulation*/
	if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC))
		return;

	/*
	 * First check if its enabled already, in which case there might
	 * be some SMM goo which handles it, so we can't even put a handler
	 * since it might be delivered via SMI already:
	 */
	rdmsr(MSR_IA32_MISC_ENABLE, l, h);
	h = apic_read(APIC_LVTTHMR);
	if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
		printk(KERN_DEBUG
		       "CPU%d: Thermal monitoring handled by SMI\n", cpu);
		return;
	}

	if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
		tm2 = 1;

	/* Check whether a vector already exists */
	if (h & APIC_VECTOR_MASK) {
		printk(KERN_DEBUG
		       "CPU%d: Thermal LVT vector (%#x) already installed\n",
		       cpu, (h & APIC_VECTOR_MASK));
		return;
	}

	/* We'll mask the thermal vector in the lapic till we're ready: */
	h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED;
	apic_write(APIC_LVTTHMR, h);

	rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
	wrmsr(MSR_IA32_THERM_INTERRUPT,
		l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h);

275
	smp_thermal_vector = intel_thermal_interrupt;
276 277 278 279 280 281 282 283 284 285 286 287 288 289

	rdmsr(MSR_IA32_MISC_ENABLE, l, h);
	wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);

	/* Unmask the thermal vector: */
	l = apic_read(APIC_LVTTHMR);
	apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);

	printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
	       cpu, tm2 ? "TM2" : "TM1");

	/* enable thermal throttle processing */
	atomic_set(&therm_throt_en, 1);
}