generic.c 21.4 KB
Newer Older
1 2
/*
 * This only handles 32bit MTRR on 32bit hosts. This is strictly wrong
L
Lucas De Marchi 已提交
3
 * because MTRRs can span up to 40 bits (36bits on most modern x86)
4 5 6 7
 */
#define DEBUG

#include <linux/module.h>
L
Linus Torvalds 已提交
8
#include <linux/init.h>
9
#include <linux/io.h>
L
Linus Torvalds 已提交
10
#include <linux/mm.h>
11

D
Dave Jones 已提交
12
#include <asm/processor-flags.h>
13
#include <asm/cpufeature.h>
L
Linus Torvalds 已提交
14
#include <asm/tlbflush.h>
15 16
#include <asm/mtrr.h>
#include <asm/msr.h>
17
#include <asm/pat.h>
18

L
Linus Torvalds 已提交
19 20
#include "mtrr.h"

21
struct fixed_range_block {
22 23
	int base_msr;		/* start address of an MTRR block */
	int ranges;		/* number of MTRRs in this block  */
24 25 26
};

static struct fixed_range_block fixed_range_blocks[] = {
27 28 29
	{ MSR_MTRRfix64K_00000, 1 }, /* one   64k MTRR  */
	{ MSR_MTRRfix16K_80000, 2 }, /* two   16k MTRRs */
	{ MSR_MTRRfix4K_C0000,  8 }, /* eight  4k MTRRs */
30 31 32
	{}
};

L
Linus Torvalds 已提交
33
static unsigned long smp_changes_mask;
34
static int mtrr_state_set;
35
u64 mtrr_tom2;
L
Linus Torvalds 已提交
36

37
struct mtrr_state_type mtrr_state;
S
Sheng Yang 已提交
38 39
EXPORT_SYMBOL_GPL(mtrr_state);

40
/*
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
 * BIOS is expected to clear MtrrFixDramModEn bit, see for example
 * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
 * Opteron Processors" (26094 Rev. 3.30 February 2006), section
 * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
 * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
 * 0 for operation."
 */
static inline void k8_check_syscfg_dram_mod_en(void)
{
	u32 lo, hi;

	if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
	      (boot_cpu_data.x86 >= 0x0f)))
		return;

	rdmsr(MSR_K8_SYSCFG, lo, hi);
	if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
		printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
		       " not cleared by BIOS, clearing this bit\n",
		       smp_processor_id());
		lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
		mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
	}
}

66 67 68 69 70 71 72 73 74 75 76 77
/* Get the size of contiguous MTRR range */
static u64 get_mtrr_size(u64 mask)
{
	u64 size;

	mask >>= PAGE_SHIFT;
	mask |= size_or_mask;
	size = -mask;
	size <<= PAGE_SHIFT;
	return size;
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
/*
 * Check and return the effective type for MTRR-MTRR type overlap.
 * Returns 1 if the effective type is UNCACHEABLE, else returns 0
 */
static int check_type_overlap(u8 *prev, u8 *curr)
{
	if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) {
		*prev = MTRR_TYPE_UNCACHABLE;
		*curr = MTRR_TYPE_UNCACHABLE;
		return 1;
	}

	if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) ||
	    (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) {
		*prev = MTRR_TYPE_WRTHROUGH;
		*curr = MTRR_TYPE_WRTHROUGH;
	}

	if (*prev != *curr) {
		*prev = MTRR_TYPE_UNCACHABLE;
		*curr = MTRR_TYPE_UNCACHABLE;
		return 1;
	}

	return 0;
}

105
/*
106
 * Error/Semi-error returns:
107
 * MTRR_TYPE_INVALID - when MTRR is not enabled
108 109 110
 * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
 *		corresponds only to [start:*partial_end].
 *		Caller has to lookup again for [*partial_end:end].
111
 */
112
static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
113 114 115 116 117
{
	int i;
	u64 base, mask;
	u8 prev_match, curr_match;

118
	*repeat = 0;
119
	if (!mtrr_state_set)
120
		return MTRR_TYPE_INVALID;
121

122
	if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
123
		return MTRR_TYPE_INVALID;
124 125 126 127 128

	/* Make end inclusive end, instead of exclusive */
	end--;

	/* Look in fixed ranges. Just return the type as per start */
129 130 131
	if ((start < 0x100000) &&
	    (mtrr_state.have_fixed) &&
	    (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
132 133 134 135 136 137 138 139 140 141
		int idx;

		if (start < 0x80000) {
			idx = 0;
			idx += (start >> 16);
			return mtrr_state.fixed_ranges[idx];
		} else if (start < 0xC0000) {
			idx = 1 * 8;
			idx += ((start - 0x80000) >> 14);
			return mtrr_state.fixed_ranges[idx];
142
		} else {
143 144 145 146 147 148 149 150 151 152 153
			idx = 3 * 8;
			idx += ((start - 0xC0000) >> 12);
			return mtrr_state.fixed_ranges[idx];
		}
	}

	/*
	 * Look in variable ranges
	 * Look of multiple ranges matching this address and pick type
	 * as per MTRR precedence
	 */
154
	prev_match = MTRR_TYPE_INVALID;
155
	for (i = 0; i < num_var_ranges; ++i) {
156
		unsigned short start_state, end_state, inclusive;
157 158 159 160 161 162 163 164 165 166 167

		if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
			continue;

		base = (((u64)mtrr_state.var_ranges[i].base_hi) << 32) +
		       (mtrr_state.var_ranges[i].base_lo & PAGE_MASK);
		mask = (((u64)mtrr_state.var_ranges[i].mask_hi) << 32) +
		       (mtrr_state.var_ranges[i].mask_lo & PAGE_MASK);

		start_state = ((start & mask) == (base & mask));
		end_state = ((end & mask) == (base & mask));
168
		inclusive = ((start < base) && (end > base));
169

170
		if ((start_state != end_state) || inclusive) {
171 172
			/*
			 * We have start:end spanning across an MTRR.
173 174 175 176 177 178 179 180 181
			 * We split the region into either
			 *
			 * - start_state:1
			 * (start:mtrr_end)(mtrr_end:end)
			 * - end_state:1
			 * (start:mtrr_start)(mtrr_start:end)
			 * - inclusive:1
			 * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
			 *
182
			 * depending on kind of overlap.
183 184 185 186 187 188
			 *
			 * Return the type of the first region and a pointer
			 * to the start of next region so that caller will be
			 * advised to lookup again after having adjusted start
			 * and end.
			 *
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
			 * Note: This way we handle multiple overlaps as well.
			 */
			if (start_state)
				*partial_end = base + get_mtrr_size(mask);
			else
				*partial_end = base;

			if (unlikely(*partial_end <= start)) {
				WARN_ON(1);
				*partial_end = start + PAGE_SIZE;
			}

			end = *partial_end - 1; /* end is inclusive */
			*repeat = 1;
		}
204

205
		if ((start & mask) != (base & mask))
206 207 208
			continue;

		curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
209
		if (prev_match == MTRR_TYPE_INVALID) {
210 211 212 213
			prev_match = curr_match;
			continue;
		}

214 215
		if (check_type_overlap(&prev_match, &curr_match))
			return curr_match;
216 217
	}

218 219
	if (mtrr_tom2) {
		if (start >= (1ULL<<32) && (end < mtrr_tom2))
220 221 222
			return MTRR_TYPE_WRBACK;
	}

223
	if (prev_match != MTRR_TYPE_INVALID)
224 225 226 227 228
		return prev_match;

	return mtrr_state.def_type;
}

229 230 231
/*
 * Returns the effective MTRR type for the region
 * Error return:
232
 * MTRR_TYPE_INVALID - when MTRR is not enabled
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
 */
u8 mtrr_type_lookup(u64 start, u64 end)
{
	u8 type, prev_type;
	int repeat;
	u64 partial_end;

	type = __mtrr_type_lookup(start, end, &partial_end, &repeat);

	/*
	 * Common path is with repeat = 0.
	 * However, we can have cases where [start:end] spans across some
	 * MTRR range. Do repeated lookups for that case here.
	 */
	while (repeat) {
		prev_type = type;
		start = partial_end;
		type = __mtrr_type_lookup(start, end, &partial_end, &repeat);

		if (check_type_overlap(&prev_type, &type))
			return type;
	}

	return type;
}

259
/* Get the MSR pair relating to a var range */
260
static void
L
Linus Torvalds 已提交
261 262 263 264 265 266
get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
{
	rdmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
	rdmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
}

267
/* Fill the MSR pair relating to a var range */
268 269 270 271 272 273 274 275 276 277 278 279 280
void fill_mtrr_var_range(unsigned int index,
		u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi)
{
	struct mtrr_var_range *vr;

	vr = mtrr_state.var_ranges;

	vr[index].base_lo = base_lo;
	vr[index].base_hi = base_hi;
	vr[index].mask_lo = mask_lo;
	vr[index].mask_hi = mask_hi;
}

281
static void get_fixed_ranges(mtrr_type *frs)
L
Linus Torvalds 已提交
282
{
283
	unsigned int *p = (unsigned int *)frs;
L
Linus Torvalds 已提交
284 285
	int i;

286 287
	k8_check_syscfg_dram_mod_en();

288
	rdmsr(MSR_MTRRfix64K_00000, p[0], p[1]);
L
Linus Torvalds 已提交
289 290

	for (i = 0; i < 2; i++)
291
		rdmsr(MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
L
Linus Torvalds 已提交
292
	for (i = 0; i < 8; i++)
293
		rdmsr(MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
L
Linus Torvalds 已提交
294 295
}

296 297
void mtrr_save_fixed_ranges(void *info)
{
A
Andrew Morton 已提交
298 299
	if (cpu_has_mtrr)
		get_fixed_ranges(mtrr_state.fixed_ranges);
300 301
}

302 303 304 305 306 307 308 309 310
static unsigned __initdata last_fixed_start;
static unsigned __initdata last_fixed_end;
static mtrr_type __initdata last_fixed_type;

static void __init print_fixed_last(void)
{
	if (!last_fixed_end)
		return;

311 312
	pr_debug("  %05X-%05X %s\n", last_fixed_start,
		 last_fixed_end - 1, mtrr_attrib_to_str(last_fixed_type));
313 314 315 316 317

	last_fixed_end = 0;
}

static void __init update_fixed_last(unsigned base, unsigned end,
318
				     mtrr_type type)
319 320 321 322 323 324
{
	last_fixed_start = base;
	last_fixed_end = end;
	last_fixed_type = type;
}

325 326
static void __init
print_fixed(unsigned base, unsigned step, const mtrr_type *types)
J
Jan Beulich 已提交
327 328 329
{
	unsigned i;

330 331 332 333 334 335 336 337 338 339 340 341 342
	for (i = 0; i < 8; ++i, ++types, base += step) {
		if (last_fixed_end == 0) {
			update_fixed_last(base, base + step, *types);
			continue;
		}
		if (last_fixed_end == base && last_fixed_type == *types) {
			last_fixed_end = base + step;
			continue;
		}
		/* new segments: gap or different type */
		print_fixed_last();
		update_fixed_last(base, base + step, *types);
	}
J
Jan Beulich 已提交
343 344
}

345 346 347
static void prepare_set(void);
static void post_set(void);

Y
Yinghai Lu 已提交
348 349 350 351 352
static void __init print_mtrr_state(void)
{
	unsigned int i;
	int high_width;

353 354
	pr_debug("MTRR default type: %s\n",
		 mtrr_attrib_to_str(mtrr_state.def_type));
Y
Yinghai Lu 已提交
355
	if (mtrr_state.have_fixed) {
356
		pr_debug("MTRR fixed ranges %sabled:\n",
357 358 359
			((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
			 (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
			 "en" : "dis");
Y
Yinghai Lu 已提交
360 361
		print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
		for (i = 0; i < 2; ++i)
362 363
			print_fixed(0x80000 + i * 0x20000, 0x04000,
				    mtrr_state.fixed_ranges + (i + 1) * 8);
Y
Yinghai Lu 已提交
364
		for (i = 0; i < 8; ++i)
365 366
			print_fixed(0xC0000 + i * 0x08000, 0x01000,
				    mtrr_state.fixed_ranges + (i + 3) * 8);
367 368 369

		/* tail */
		print_fixed_last();
Y
Yinghai Lu 已提交
370
	}
371
	pr_debug("MTRR variable ranges %sabled:\n",
372
		 mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
373
	high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
374

Y
Yinghai Lu 已提交
375 376
	for (i = 0; i < num_var_ranges; ++i) {
		if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
377 378 379 380 381 382 383 384 385
			pr_debug("  %u base %0*X%05X000 mask %0*X%05X000 %s\n",
				 i,
				 high_width,
				 mtrr_state.var_ranges[i].base_hi,
				 mtrr_state.var_ranges[i].base_lo >> 12,
				 high_width,
				 mtrr_state.var_ranges[i].mask_hi,
				 mtrr_state.var_ranges[i].mask_lo >> 12,
				 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
Y
Yinghai Lu 已提交
386
		else
387
			pr_debug("  %u disabled\n", i);
Y
Yinghai Lu 已提交
388
	}
389 390
	if (mtrr_tom2)
		pr_debug("TOM2: %016llx aka %lldM\n", mtrr_tom2, mtrr_tom2>>20);
Y
Yinghai Lu 已提交
391 392
}

393
/* Grab all of the MTRR state for this CPU into *state */
394
void __init get_mtrr_state(void)
L
Linus Torvalds 已提交
395 396
{
	struct mtrr_var_range *vrs;
397
	unsigned long flags;
398 399
	unsigned lo, dummy;
	unsigned int i;
L
Linus Torvalds 已提交
400 401 402

	vrs = mtrr_state.var_ranges;

403
	rdmsr(MSR_MTRRcap, lo, dummy);
J
Jan Beulich 已提交
404 405
	mtrr_state.have_fixed = (lo >> 8) & 1;

L
Linus Torvalds 已提交
406 407
	for (i = 0; i < num_var_ranges; i++)
		get_mtrr_var_range(i, &vrs[i]);
J
Jan Beulich 已提交
408 409
	if (mtrr_state.have_fixed)
		get_fixed_ranges(mtrr_state.fixed_ranges);
L
Linus Torvalds 已提交
410

411
	rdmsr(MSR_MTRRdefType, lo, dummy);
L
Linus Torvalds 已提交
412 413
	mtrr_state.def_type = (lo & 0xff);
	mtrr_state.enabled = (lo & 0xc00) >> 10;
J
Jan Beulich 已提交
414

415
	if (amd_special_default_mtrr()) {
416
		unsigned low, high;
417

418
		/* TOP_MEM2 */
419
		rdmsr(MSR_K8_TOP_MEM2, low, high);
420 421 422
		mtrr_tom2 = high;
		mtrr_tom2 <<= 32;
		mtrr_tom2 |= low;
Y
Yinghai Lu 已提交
423
		mtrr_tom2 &= 0xffffff800000ULL;
424
	}
Y
Yinghai Lu 已提交
425 426 427

	print_mtrr_state();

428 429 430 431 432 433 434 435 436 437
	mtrr_state_set = 1;

	/* PAT setup for BP. We need to go through sync steps here */
	local_irq_save(flags);
	prepare_set();

	pat_init();

	post_set();
	local_irq_restore(flags);
L
Linus Torvalds 已提交
438 439
}

440
/* Some BIOS's are messed up and don't set all MTRRs the same! */
L
Linus Torvalds 已提交
441 442 443 444 445 446 447
void __init mtrr_state_warn(void)
{
	unsigned long mask = smp_changes_mask;

	if (!mask)
		return;
	if (mask & MTRR_CHANGE_MASK_FIXED)
448
		pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
L
Linus Torvalds 已提交
449
	if (mask & MTRR_CHANGE_MASK_VARIABLE)
450
		pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
L
Linus Torvalds 已提交
451
	if (mask & MTRR_CHANGE_MASK_DEFTYPE)
452 453
		pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");

L
Linus Torvalds 已提交
454 455 456 457
	printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
	printk(KERN_INFO "mtrr: corrected configuration.\n");
}

458 459 460 461 462
/*
 * Doesn't attempt to pass an error out to MTRR users
 * because it's quite complicated in some cases and probably not
 * worth it because the best error handling is to ignore it.
 */
L
Linus Torvalds 已提交
463 464
void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
{
465
	if (wrmsr_safe(msr, a, b) < 0) {
L
Linus Torvalds 已提交
466 467 468
		printk(KERN_ERR
			"MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
			smp_processor_id(), msr, a, b);
469
	}
L
Linus Torvalds 已提交
470 471
}

472
/**
473 474
 * set_fixed_range - checks & updates a fixed-range MTRR if it
 *		     differs from the value it should have
475 476 477
 * @msr: MSR address of the MTTR which should be checked and updated
 * @changed: pointer which indicates whether the MTRR needed to be changed
 * @msrwords: pointer to the MSR values which the MSR should have
478
 */
479
static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
480 481 482 483 484 485 486
{
	unsigned lo, hi;

	rdmsr(msr, lo, hi);

	if (lo != msrwords[0] || hi != msrwords[1]) {
		mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
487
		*changed = true;
488 489 490
	}
}

491 492 493 494 495 496 497 498
/**
 * generic_get_free_region - Get a free MTRR.
 * @base: The starting (base) address of the region.
 * @size: The size (in bytes) of the region.
 * @replace_reg: mtrr index to be replaced; set to invalid value if none.
 *
 * Returns: The index of the region on success, else negative on error.
 */
499 500
int
generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
L
Linus Torvalds 已提交
501
{
J
Jan Beulich 已提交
502
	unsigned long lbase, lsize;
503 504
	mtrr_type ltype;
	int i, max;
L
Linus Torvalds 已提交
505 506

	max = num_var_ranges;
J
Jan Beulich 已提交
507 508
	if (replace_reg >= 0 && replace_reg < max)
		return replace_reg;
509

L
Linus Torvalds 已提交
510 511 512 513 514
	for (i = 0; i < max; ++i) {
		mtrr_if->get(i, &lbase, &lsize, &ltype);
		if (lsize == 0)
			return i;
	}
515

L
Linus Torvalds 已提交
516 517 518
	return -ENOSPC;
}

A
Adrian Bunk 已提交
519
static void generic_get_mtrr(unsigned int reg, unsigned long *base,
J
Jan Beulich 已提交
520
			     unsigned long *size, mtrr_type *type)
L
Linus Torvalds 已提交
521
{
522 523 524
	u32 mask_lo, mask_hi, base_lo, base_hi;
	unsigned int hi;
	u64 tmp, mask;
L
Linus Torvalds 已提交
525

Y
Yinghai Lu 已提交
526 527 528 529
	/*
	 * get_mtrr doesn't need to update mtrr_state, also it could be called
	 * from any cpu, so try to print it out directly.
	 */
530
	get_cpu();
531

L
Linus Torvalds 已提交
532
	rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi);
Y
Yinghai Lu 已提交
533

L
Linus Torvalds 已提交
534
	if ((mask_lo & 0x800) == 0) {
535
		/*  Invalid (i.e. free) range */
L
Linus Torvalds 已提交
536 537 538
		*base = 0;
		*size = 0;
		*type = 0;
539
		goto out_put_cpu;
L
Linus Torvalds 已提交
540 541 542 543
	}

	rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);

544
	/* Work out the shifted address mask: */
545 546
	tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
	mask = size_or_mask | tmp;
547 548

	/* Expand tmp with high bits to all 1s: */
549
	hi = fls64(tmp);
Y
Yinghai Lu 已提交
550
	if (hi > 0) {
551
		tmp |= ~((1ULL<<(hi - 1)) - 1);
Y
Yinghai Lu 已提交
552

553
		if (tmp != mask) {
A
Alan Cox 已提交
554
			printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
555
			add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
556
			mask = tmp;
Y
Yinghai Lu 已提交
557 558
		}
	}
L
Linus Torvalds 已提交
559

560 561 562 563
	/*
	 * This works correctly if size is a power of two, i.e. a
	 * contiguous range:
	 */
564 565
	*size = -mask;
	*base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
L
Linus Torvalds 已提交
566
	*type = base_lo & 0xff;
Y
Yinghai Lu 已提交
567

568 569
out_put_cpu:
	put_cpu();
L
Linus Torvalds 已提交
570 571
}

572
/**
573 574
 * set_fixed_ranges - checks & updates the fixed-range MTRRs if they
 *		      differ from the saved set
575
 * @frs: pointer to fixed-range MTRR values, saved by get_fixed_ranges()
576
 */
577
static int set_fixed_ranges(mtrr_type *frs)
L
Linus Torvalds 已提交
578
{
579
	unsigned long long *saved = (unsigned long long *)frs;
580
	bool changed = false;
581
	int block = -1, range;
L
Linus Torvalds 已提交
582

583 584
	k8_check_syscfg_dram_mod_en();

585 586 587 588 589
	while (fixed_range_blocks[++block].ranges) {
		for (range = 0; range < fixed_range_blocks[block].ranges; range++)
			set_fixed_range(fixed_range_blocks[block].base_msr + range,
					&changed, (unsigned int *)saved++);
	}
L
Linus Torvalds 已提交
590 591 592 593

	return changed;
}

594 595 596 597
/*
 * Set the MSR pair relating to a var range.
 * Returns true if changes are made.
 */
598
static bool set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
L
Linus Torvalds 已提交
599 600
{
	unsigned int lo, hi;
601
	bool changed = false;
L
Linus Torvalds 已提交
602 603 604

	rdmsr(MTRRphysBase_MSR(index), lo, hi);
	if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
605 606
	    || (vr->base_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
607

L
Linus Torvalds 已提交
608
		mtrr_wrmsr(MTRRphysBase_MSR(index), vr->base_lo, vr->base_hi);
609
		changed = true;
L
Linus Torvalds 已提交
610 611 612 613 614
	}

	rdmsr(MTRRphysMask_MSR(index), lo, hi);

	if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
615 616
	    || (vr->mask_hi & (size_and_mask >> (32 - PAGE_SHIFT))) !=
		(hi & (size_and_mask >> (32 - PAGE_SHIFT)))) {
L
Linus Torvalds 已提交
617
		mtrr_wrmsr(MTRRphysMask_MSR(index), vr->mask_lo, vr->mask_hi);
618
		changed = true;
L
Linus Torvalds 已提交
619 620 621 622
	}
	return changed;
}

J
Jan Beulich 已提交
623 624
static u32 deftype_lo, deftype_hi;

625 626 627 628 629 630
/**
 * set_mtrr_state - Set the MTRR state for this CPU.
 *
 * NOTE: The CPU must already be in a safe state for MTRR changes.
 * RETURNS: 0 if no changes made, else a mask indicating what was changed.
 */
J
Jan Beulich 已提交
631
static unsigned long set_mtrr_state(void)
L
Linus Torvalds 已提交
632 633
{
	unsigned long change_mask = 0;
634
	unsigned int i;
L
Linus Torvalds 已提交
635

636
	for (i = 0; i < num_var_ranges; i++) {
L
Linus Torvalds 已提交
637 638
		if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
			change_mask |= MTRR_CHANGE_MASK_VARIABLE;
639
	}
L
Linus Torvalds 已提交
640

J
Jan Beulich 已提交
641
	if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
L
Linus Torvalds 已提交
642 643
		change_mask |= MTRR_CHANGE_MASK_FIXED;

644 645 646 647
	/*
	 * Set_mtrr_restore restores the old value of MTRRdefType,
	 * so to set it we fiddle with the saved value:
	 */
L
Linus Torvalds 已提交
648 649
	if ((deftype_lo & 0xff) != mtrr_state.def_type
	    || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
650 651 652

		deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type |
			     (mtrr_state.enabled << 10);
L
Linus Torvalds 已提交
653 654 655 656 657 658 659
		change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
	}

	return change_mask;
}


660
static unsigned long cr4;
661
static DEFINE_RAW_SPINLOCK(set_atomicity_lock);
L
Linus Torvalds 已提交
662 663

/*
664 665 666 667 668
 * Since we are disabling the cache don't allow any interrupts,
 * they would run extremely slow and would only increase the pain.
 *
 * The caller must ensure that local interrupts are disabled and
 * are reenabled after post_set() has been called.
L
Linus Torvalds 已提交
669
 */
670
static void prepare_set(void) __acquires(set_atomicity_lock)
L
Linus Torvalds 已提交
671 672 673
{
	unsigned long cr0;

674 675 676 677 678 679
	/*
	 * Note that this is not ideal
	 * since the cache is only flushed/disabled for this CPU while the
	 * MTRRs are changed, but changing this requires more invasive
	 * changes to the way the kernel boots
	 */
L
Linus Torvalds 已提交
680

681
	raw_spin_lock(&set_atomicity_lock);
L
Linus Torvalds 已提交
682

683
	/* Enter the no-fill (CD=1, NW=0) cache mode and flush caches. */
D
Dave Jones 已提交
684
	cr0 = read_cr0() | X86_CR0_CD;
L
Linus Torvalds 已提交
685 686 687
	write_cr0(cr0);
	wbinvd();

688 689
	/* Save value of CR4 and clear Page Global Enable (bit 7) */
	if (cpu_has_pge) {
690 691
		cr4 = __read_cr4();
		__write_cr4(cr4 & ~X86_CR4_PGE);
L
Linus Torvalds 已提交
692 693 694
	}

	/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
695
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
L
Linus Torvalds 已提交
696 697
	__flush_tlb();

698
	/* Save MTRR state */
699
	rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
L
Linus Torvalds 已提交
700

701
	/* Disable MTRRs, and set the default type to uncached */
702
	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
703
	wbinvd();
L
Linus Torvalds 已提交
704 705
}

706
static void post_set(void) __releases(set_atomicity_lock)
L
Linus Torvalds 已提交
707
{
708
	/* Flush TLBs (no need to flush caches - they are disabled) */
709
	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
L
Linus Torvalds 已提交
710 711 712
	__flush_tlb();

	/* Intel (P6) standard MTRRs */
713
	mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
714 715

	/* Enable caches */
716
	write_cr0(read_cr0() & ~X86_CR0_CD);
L
Linus Torvalds 已提交
717

718 719
	/* Restore value of CR4 */
	if (cpu_has_pge)
720
		__write_cr4(cr4);
721
	raw_spin_unlock(&set_atomicity_lock);
L
Linus Torvalds 已提交
722 723 724 725 726 727 728 729 730 731 732
}

static void generic_set_all(void)
{
	unsigned long mask, count;
	unsigned long flags;

	local_irq_save(flags);
	prepare_set();

	/* Actually set the state */
J
Jan Beulich 已提交
733
	mask = set_mtrr_state();
L
Linus Torvalds 已提交
734

735 736 737
	/* also set PAT */
	pat_init();

L
Linus Torvalds 已提交
738 739 740
	post_set();
	local_irq_restore(flags);

741
	/* Use the atomic bitops to update the global mask */
L
Linus Torvalds 已提交
742 743 744 745 746
	for (count = 0; count < sizeof mask * 8; ++count) {
		if (mask & 0x01)
			set_bit(count, &smp_changes_mask);
		mask >>= 1;
	}
747

L
Linus Torvalds 已提交
748 749
}

750 751 752 753 754 755 756 757 758 759
/**
 * generic_set_mtrr - set variable MTRR register on the local CPU.
 *
 * @reg: The register to set.
 * @base: The base address of the region.
 * @size: The size of the region. If this is 0 the region is disabled.
 * @type: The type of the region.
 *
 * Returns nothing.
 */
L
Linus Torvalds 已提交
760 761 762 763
static void generic_set_mtrr(unsigned int reg, unsigned long base,
			     unsigned long size, mtrr_type type)
{
	unsigned long flags;
S
Shaohua Li 已提交
764 765 766
	struct mtrr_var_range *vr;

	vr = &mtrr_state.var_ranges[reg];
L
Linus Torvalds 已提交
767 768 769 770 771

	local_irq_save(flags);
	prepare_set();

	if (size == 0) {
772 773 774 775
		/*
		 * The invalid bit is kept in the mask, so we simply
		 * clear the relevant mask register to disable a range.
		 */
L
Linus Torvalds 已提交
776
		mtrr_wrmsr(MTRRphysMask_MSR(reg), 0, 0);
S
Shaohua Li 已提交
777
		memset(vr, 0, sizeof(struct mtrr_var_range));
L
Linus Torvalds 已提交
778
	} else {
S
Shaohua Li 已提交
779 780 781 782 783 784 785
		vr->base_lo = base << PAGE_SHIFT | type;
		vr->base_hi = (base & size_and_mask) >> (32 - PAGE_SHIFT);
		vr->mask_lo = -size << PAGE_SHIFT | 0x800;
		vr->mask_hi = (-size & size_and_mask) >> (32 - PAGE_SHIFT);

		mtrr_wrmsr(MTRRphysBase_MSR(reg), vr->base_lo, vr->base_hi);
		mtrr_wrmsr(MTRRphysMask_MSR(reg), vr->mask_lo, vr->mask_hi);
L
Linus Torvalds 已提交
786 787 788 789 790 791
	}

	post_set();
	local_irq_restore(flags);
}

792 793
int generic_validate_add_page(unsigned long base, unsigned long size,
			      unsigned int type)
L
Linus Torvalds 已提交
794 795 796
{
	unsigned long lbase, last;

797 798 799 800
	/*
	 * For Intel PPro stepping <= 7
	 * must be 4 MiB aligned and not touch 0x70000000 -> 0x7003FFFF
	 */
L
Linus Torvalds 已提交
801 802 803 804
	if (is_cpu(INTEL) && boot_cpu_data.x86 == 6 &&
	    boot_cpu_data.x86_model == 1 &&
	    boot_cpu_data.x86_mask <= 7) {
		if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
805
			pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
L
Linus Torvalds 已提交
806 807
			return -EINVAL;
		}
808
		if (!(base + size < 0x70000 || base > 0x7003F) &&
L
Linus Torvalds 已提交
809 810
		    (type == MTRR_TYPE_WRCOMB
		     || type == MTRR_TYPE_WRBACK)) {
811
			pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
L
Linus Torvalds 已提交
812 813 814 815
			return -EINVAL;
		}
	}

816 817 818 819
	/*
	 * Check upper bits of base and last are equal and lower bits are 0
	 * for base and 1 for last
	 */
L
Linus Torvalds 已提交
820 821
	last = base + size - 1;
	for (lbase = base; !(lbase & 1) && (last & 1);
822 823
	     lbase = lbase >> 1, last = last >> 1)
		;
L
Linus Torvalds 已提交
824
	if (lbase != last) {
825
		pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
L
Linus Torvalds 已提交
826 827 828 829 830 831 832 833
		return -EINVAL;
	}
	return 0;
}

static int generic_have_wrcomb(void)
{
	unsigned long config, dummy;
834
	rdmsr(MSR_MTRRcap, config, dummy);
835
	return config & (1 << 10);
L
Linus Torvalds 已提交
836 837 838 839 840 841 842
}

int positive_have_wrcomb(void)
{
	return 1;
}

843 844
/*
 * Generic structure...
L
Linus Torvalds 已提交
845
 */
846
const struct mtrr_ops generic_mtrr_ops = {
847 848 849 850 851 852 853
	.use_intel_if		= 1,
	.set_all		= generic_set_all,
	.get			= generic_get_mtrr,
	.get_free_region	= generic_get_free_region,
	.set			= generic_set_mtrr,
	.validate_add_page	= generic_validate_add_page,
	.have_wrcomb		= generic_have_wrcomb,
L
Linus Torvalds 已提交
854
};