p4.c 6.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*
 * P4 specific Machine Check Exception Reporting
 */

#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/smp.h>

#include <asm/processor.h> 
#include <asm/system.h>
#include <asm/msr.h>
#include <asm/apic.h>

#include "mce.h"

/* as supported by the P4/Xeon family */
struct intel_mce_extended_msrs {
	u32 eax;
	u32 ebx;
	u32 ecx;
	u32 edx;
	u32 esi;
	u32 edi;
	u32 ebp;
	u32 esp;
	u32 eflags;
	u32 eip;
	/* u32 *reserved[]; */
};

static int mce_num_extended_msrs = 0;


#ifdef CONFIG_X86_MCE_P4THERMAL
static void unexpected_thermal_interrupt(struct pt_regs *regs)
{	
	printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
			smp_processor_id());
	add_taint(TAINT_MACHINE_CHECK);
}

/* P4/Xeon Thermal transition interrupt handler */
static void intel_thermal_interrupt(struct pt_regs *regs)
{
	u32 l, h;
	unsigned int cpu = smp_processor_id();
	static unsigned long next[NR_CPUS];

	ack_APIC_irq();

	if (time_after(next[cpu], jiffies))
		return;

	next[cpu] = jiffies + HZ*5;
	rdmsr(MSR_IA32_THERM_STATUS, l, h);
	if (l & 0x1) {
		printk(KERN_EMERG "CPU%d: Temperature above threshold\n", cpu);
		printk(KERN_EMERG "CPU%d: Running in modulated clock mode\n",
				cpu);
		add_taint(TAINT_MACHINE_CHECK);
	} else {
		printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
	}
}

/* Thermal interrupt handler for this CPU setup */
static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;

fastcall void smp_thermal_interrupt(struct pt_regs *regs)
{
	irq_enter();
	vendor_thermal_interrupt(regs);
	irq_exit();
}

/* P4/Xeon Thermal regulation detect and init */
L
Li Shaohua 已提交
80
static void __devinit intel_init_thermal(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
{
	u32 l, h;
	unsigned int cpu = smp_processor_id();

	/* Thermal monitoring */
	if (!cpu_has(c, X86_FEATURE_ACPI))
		return;	/* -ENODEV */

	/* Clock modulation */
	if (!cpu_has(c, X86_FEATURE_ACC))
		return;	/* -ENODEV */

	/* first check if its enabled already, in which case there might
	 * be some SMM goo which handles it, so we can't even put a handler
	 * since it might be delivered via SMI already -zwanem.
	 */
	rdmsr (MSR_IA32_MISC_ENABLE, l, h);
	h = apic_read(APIC_LVTTHMR);
	if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
		printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
				cpu);
		return; /* -EBUSY */
	}

	/* check whether a vector already exists, temporarily masked? */	
	if (h & APIC_VECTOR_MASK) {
		printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already "
				"installed\n",
			cpu, (h & APIC_VECTOR_MASK));
		return; /* -EBUSY */
	}

	/* The temperature transition interrupt handler setup */
	h = THERMAL_APIC_VECTOR;		/* our delivery vector */
	h |= (APIC_DM_FIXED | APIC_LVT_MASKED);	/* we'll mask till we're ready */
	apic_write_around(APIC_LVTTHMR, h);

	rdmsr (MSR_IA32_THERM_INTERRUPT, l, h);
	wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);

	/* ok we're good to go... */
	vendor_thermal_interrupt = intel_thermal_interrupt;
	
	rdmsr (MSR_IA32_MISC_ENABLE, l, h);
	wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h);
	
	l = apic_read (APIC_LVTTHMR);
	apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
	printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
	return;
}
#endif /* CONFIG_X86_MCE_P4THERMAL */


/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
static inline int intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
{
	u32 h;

	if (mce_num_extended_msrs == 0)
		goto done;

	rdmsr (MSR_IA32_MCG_EAX, r->eax, h);
	rdmsr (MSR_IA32_MCG_EBX, r->ebx, h);
	rdmsr (MSR_IA32_MCG_ECX, r->ecx, h);
	rdmsr (MSR_IA32_MCG_EDX, r->edx, h);
	rdmsr (MSR_IA32_MCG_ESI, r->esi, h);
	rdmsr (MSR_IA32_MCG_EDI, r->edi, h);
	rdmsr (MSR_IA32_MCG_EBP, r->ebp, h);
	rdmsr (MSR_IA32_MCG_ESP, r->esp, h);
	rdmsr (MSR_IA32_MCG_EFLAGS, r->eflags, h);
	rdmsr (MSR_IA32_MCG_EIP, r->eip, h);

	/* can we rely on kmalloc to do a dynamic
	 * allocation for the reserved registers?
	 */
done:
	return mce_num_extended_msrs;
}

static fastcall void intel_machine_check(struct pt_regs * regs, long error_code)
{
	int recover=1;
	u32 alow, ahigh, high, low;
	u32 mcgstl, mcgsth;
	int i;
	struct intel_mce_extended_msrs dbg;

	rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
	if (mcgstl & (1<<0))	/* Recoverable ? */
		recover=0;

	printk (KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n",
		smp_processor_id(), mcgsth, mcgstl);

	if (intel_get_extended_msrs(&dbg)) {
		printk (KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n",
			smp_processor_id(), dbg.eip, dbg.eflags);
		printk (KERN_DEBUG "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n",
			dbg.eax, dbg.ebx, dbg.ecx, dbg.edx);
		printk (KERN_DEBUG "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
			dbg.esi, dbg.edi, dbg.ebp, dbg.esp);
	}

	for (i=0; i<nr_mce_banks; i++) {
		rdmsr (MSR_IA32_MC0_STATUS+i*4,low, high);
		if (high & (1<<31)) {
			if (high & (1<<29))
				recover |= 1;
			if (high & (1<<25))
				recover |= 2;
			printk (KERN_EMERG "Bank %d: %08x%08x", i, high, low);
			high &= ~(1<<31);
			if (high & (1<<27)) {
				rdmsr (MSR_IA32_MC0_MISC+i*4, alow, ahigh);
				printk ("[%08x%08x]", ahigh, alow);
			}
			if (high & (1<<26)) {
				rdmsr (MSR_IA32_MC0_ADDR+i*4, alow, ahigh);
				printk (" at %08x%08x", ahigh, alow);
			}
			printk ("\n");
		}
	}

	if (recover & 2)
		panic ("CPU context corrupt");
	if (recover & 1)
		panic ("Unable to continue");

	printk(KERN_EMERG "Attempting to continue.\n");
	/* 
	 * Do not clear the MSR_IA32_MCi_STATUS if the error is not 
	 * recoverable/continuable.This will allow BIOS to look at the MSRs
	 * for errors if the OS could not log the error.
	 */
	for (i=0; i<nr_mce_banks; i++) {
		u32 msr;
		msr = MSR_IA32_MC0_STATUS+i*4;
		rdmsr (msr, low, high);
		if (high&(1<<31)) {
			/* Clear it */
			wrmsr(msr, 0UL, 0UL);
			/* Serialize */
			wmb();
			add_taint(TAINT_MACHINE_CHECK);
		}
	}
	mcgstl &= ~(1<<2);
	wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth);
}


L
Li Shaohua 已提交
234
void __devinit intel_p4_mcheck_init(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
{
	u32 l, h;
	int i;
	
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	for (i=0; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());

	/* Check for P4/Xeon extended MCE MSRs */
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<9))	{/* MCG_EXT_P */
		mce_num_extended_msrs = (l >> 16) & 0xff;
		printk (KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
				" available\n",
			smp_processor_id(), mce_num_extended_msrs);

#ifdef CONFIG_X86_MCE_P4THERMAL
		/* Check for P4/Xeon Thermal monitor */
		intel_init_thermal(c);
#endif
	}
}