intel_cacheinfo.c 29.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *	Routines to indentify caches on Intel CPU.
L
Linus Torvalds 已提交
3
 *
4 5
 *	Changes:
 *	Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
A
Alan Cox 已提交
6
 *	Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7
 *	Andi Kleen / Andreas Herrmann	: CPUID4 emulation on AMD.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14
 */

#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
T
Tim Schmielau 已提交
15
#include <linux/sched.h>
16
#include <linux/pci.h>
L
Linus Torvalds 已提交
17 18

#include <asm/processor.h>
A
Alan Cox 已提交
19
#include <linux/smp.h>
20
#include <asm/k8.h>
21
#include <asm/smp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28

#define LVL_1_INST	1
#define LVL_1_DATA	2
#define LVL_2		3
#define LVL_3		4
#define LVL_TRACE	5

A
Alan Cox 已提交
29
struct _cache_table {
L
Linus Torvalds 已提交
30 31 32 33 34
	unsigned char descriptor;
	char cache_type;
	short size;
};

D
Dave Jones 已提交
35 36
#define MB(x)	((x) * 1024)

A
Alan Cox 已提交
37 38 39
/* All the cache descriptor types we care about (no TLB or
   trace cache entries) */

40
static const struct _cache_table __cpuinitconst cache_table[] =
L
Linus Torvalds 已提交
41 42 43
{
	{ 0x06, LVL_1_INST, 8 },	/* 4-way set assoc, 32 byte line size */
	{ 0x08, LVL_1_INST, 16 },	/* 4-way set assoc, 32 byte line size */
44
	{ 0x09, LVL_1_INST, 32 },	/* 4-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
45 46
	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
47 48
	{ 0x0d, LVL_1_DATA, 16 },	/* 4-way set assoc, 64 byte line size */
	{ 0x21, LVL_2,      256 },	/* 8-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
49
	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
D
Dave Jones 已提交
50 51 52
	{ 0x23, LVL_3,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x25, LVL_3,      MB(2) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x29, LVL_3,      MB(4) },	/* 8-way set assoc, sectored cache, 64 byte line size */
L
Linus Torvalds 已提交
53 54 55
	{ 0x2c, LVL_1_DATA, 32 },	/* 8-way set assoc, 64 byte line size */
	{ 0x30, LVL_1_INST, 32 },	/* 8-way set assoc, 64 byte line size */
	{ 0x39, LVL_2,      128 },	/* 4-way set assoc, sectored cache, 64 byte line size */
56
	{ 0x3a, LVL_2,      192 },	/* 6-way set assoc, sectored cache, 64 byte line size */
L
Linus Torvalds 已提交
57 58
	{ 0x3b, LVL_2,      128 },	/* 2-way set assoc, sectored cache, 64 byte line size */
	{ 0x3c, LVL_2,      256 },	/* 4-way set assoc, sectored cache, 64 byte line size */
59 60
	{ 0x3d, LVL_2,      384 },	/* 6-way set assoc, sectored cache, 64 byte line size */
	{ 0x3e, LVL_2,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
61
	{ 0x3f, LVL_2,      256 },	/* 2-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
62 63 64
	{ 0x41, LVL_2,      128 },	/* 4-way set assoc, 32 byte line size */
	{ 0x42, LVL_2,      256 },	/* 4-way set assoc, 32 byte line size */
	{ 0x43, LVL_2,      512 },	/* 4-way set assoc, 32 byte line size */
D
Dave Jones 已提交
65 66 67 68 69 70 71 72 73 74
	{ 0x44, LVL_2,      MB(1) },	/* 4-way set assoc, 32 byte line size */
	{ 0x45, LVL_2,      MB(2) },	/* 4-way set assoc, 32 byte line size */
	{ 0x46, LVL_3,      MB(4) },	/* 4-way set assoc, 64 byte line size */
	{ 0x47, LVL_3,      MB(8) },	/* 8-way set assoc, 64 byte line size */
	{ 0x49, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4a, LVL_3,      MB(6) },	/* 12-way set assoc, 64 byte line size */
	{ 0x4b, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4c, LVL_3,      MB(12) },	/* 12-way set assoc, 64 byte line size */
	{ 0x4d, LVL_3,      MB(16) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4e, LVL_2,      MB(6) },	/* 24-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
75 76 77 78 79 80 81
	{ 0x60, LVL_1_DATA, 16 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x66, LVL_1_DATA, 8 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x67, LVL_1_DATA, 16 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x68, LVL_1_DATA, 32 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x70, LVL_TRACE,  12 },	/* 8-way set assoc */
	{ 0x71, LVL_TRACE,  16 },	/* 8-way set assoc */
	{ 0x72, LVL_TRACE,  32 },	/* 8-way set assoc */
82
	{ 0x73, LVL_TRACE,  64 },	/* 8-way set assoc */
D
Dave Jones 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
	{ 0x78, LVL_2,      MB(1) },	/* 4-way set assoc, 64 byte line size */
	{ 0x79, LVL_2,      128 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7a, LVL_2,      256 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7b, LVL_2,      512 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7c, LVL_2,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7d, LVL_2,      MB(2) },	/* 8-way set assoc, 64 byte line size */
	{ 0x7f, LVL_2,      512 },	/* 2-way set assoc, 64 byte line size */
	{ 0x82, LVL_2,      256 },	/* 8-way set assoc, 32 byte line size */
	{ 0x83, LVL_2,      512 },	/* 8-way set assoc, 32 byte line size */
	{ 0x84, LVL_2,      MB(1) },	/* 8-way set assoc, 32 byte line size */
	{ 0x85, LVL_2,      MB(2) },	/* 8-way set assoc, 32 byte line size */
	{ 0x86, LVL_2,      512 },	/* 4-way set assoc, 64 byte line size */
	{ 0x87, LVL_2,      MB(1) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd0, LVL_3,      512 },	/* 4-way set assoc, 64 byte line size */
	{ 0xd1, LVL_3,      MB(1) },	/* 4-way set assoc, 64 byte line size */
	{ 0xd2, LVL_3,      MB(2) },	/* 4-way set assoc, 64 byte line size */
	{ 0xd6, LVL_3,      MB(1) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd7, LVL_3,      MB(2) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd8, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
	{ 0xdc, LVL_3,      MB(2) },	/* 12-way set assoc, 64 byte line size */
	{ 0xdd, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
	{ 0xde, LVL_3,      MB(8) },	/* 12-way set assoc, 64 byte line size */
	{ 0xe2, LVL_3,      MB(2) },	/* 16-way set assoc, 64 byte line size */
	{ 0xe3, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
	{ 0xe4, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
	{ 0xea, LVL_3,      MB(12) },	/* 24-way set assoc, 64 byte line size */
	{ 0xeb, LVL_3,      MB(18) },	/* 24-way set assoc, 64 byte line size */
	{ 0xec, LVL_3,      MB(24) },	/* 24-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
111 112 113 114
	{ 0x00, 0, 0}
};


A
Alan Cox 已提交
115
enum _cache_type {
L
Linus Torvalds 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	CACHE_TYPE_NULL	= 0,
	CACHE_TYPE_DATA = 1,
	CACHE_TYPE_INST = 2,
	CACHE_TYPE_UNIFIED = 3
};

union _cpuid4_leaf_eax {
	struct {
		enum _cache_type	type:5;
		unsigned int		level:3;
		unsigned int		is_self_initializing:1;
		unsigned int		is_fully_associative:1;
		unsigned int		reserved:4;
		unsigned int		num_threads_sharing:12;
		unsigned int		num_cores_on_die:6;
	} split;
	u32 full;
};

union _cpuid4_leaf_ebx {
	struct {
		unsigned int		coherency_line_size:12;
		unsigned int		physical_line_partition:10;
		unsigned int		ways_of_associativity:10;
	} split;
	u32 full;
};

union _cpuid4_leaf_ecx {
	struct {
		unsigned int		number_of_sets:32;
	} split;
	u32 full;
};

struct _cpuid4_info {
	union _cpuid4_leaf_eax eax;
	union _cpuid4_leaf_ebx ebx;
	union _cpuid4_leaf_ecx ecx;
	unsigned long size;
156 157
	bool can_disable;
	unsigned int l3_indices;
158 159 160 161 162 163 164 165 166
	DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};

/* subset of above _cpuid4_info w/o shared_cpu_map */
struct _cpuid4_info_regs {
	union _cpuid4_leaf_eax eax;
	union _cpuid4_leaf_ebx ebx;
	union _cpuid4_leaf_ecx ecx;
	unsigned long size;
167 168
	bool can_disable;
	unsigned int l3_indices;
L
Linus Torvalds 已提交
169 170
};

171 172 173 174
unsigned short			num_cache_leaves;

/* AMD doesn't have CPUID4. Emulate it here to report the same
   information to the user.  This makes some assumptions about the machine:
175
   L2 not shared, no SMT etc. that is currently true on AMD CPUs.
176 177 178 179 180

   In theory the TLBs could be reported as fake type (they are in "dummy").
   Maybe later */
union l1_cache {
	struct {
A
Alan Cox 已提交
181 182 183 184
		unsigned line_size:8;
		unsigned lines_per_tag:8;
		unsigned assoc:8;
		unsigned size_in_kb:8;
185 186 187 188 189 190
	};
	unsigned val;
};

union l2_cache {
	struct {
A
Alan Cox 已提交
191 192 193 194
		unsigned line_size:8;
		unsigned lines_per_tag:4;
		unsigned assoc:4;
		unsigned size_in_kb:16;
195 196 197 198
	};
	unsigned val;
};

199 200
union l3_cache {
	struct {
A
Alan Cox 已提交
201 202 203 204 205
		unsigned line_size:8;
		unsigned lines_per_tag:4;
		unsigned assoc:4;
		unsigned res:2;
		unsigned size_encoded:14;
206 207 208 209
	};
	unsigned val;
};

210
static const unsigned short __cpuinitconst assocs[] = {
211 212 213 214 215 216 217
	[1] = 1,
	[2] = 2,
	[4] = 4,
	[6] = 8,
	[8] = 16,
	[0xa] = 32,
	[0xb] = 48,
218
	[0xc] = 64,
219 220 221
	[0xd] = 96,
	[0xe] = 128,
	[0xf] = 0xffff /* fully associative - no way to show this currently */
222 223
};

224 225
static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
226

227 228 229 230
static void __cpuinit
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
		     union _cpuid4_leaf_ebx *ebx,
		     union _cpuid4_leaf_ecx *ecx)
231 232 233 234 235
{
	unsigned dummy;
	unsigned line_size, lines_per_tag, assoc, size_in_kb;
	union l1_cache l1i, l1d;
	union l2_cache l2;
236 237
	union l3_cache l3;
	union l1_cache *l1 = &l1d;
238 239 240 241 242 243

	eax->full = 0;
	ebx->full = 0;
	ecx->full = 0;

	cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
244
	cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
245

246 247 248 249 250 251
	switch (leaf) {
	case 1:
		l1 = &l1i;
	case 0:
		if (!l1->val)
			return;
252
		assoc = assocs[l1->assoc];
253 254 255
		line_size = l1->line_size;
		lines_per_tag = l1->lines_per_tag;
		size_in_kb = l1->size_in_kb;
256 257 258 259
		break;
	case 2:
		if (!l2.val)
			return;
260
		assoc = assocs[l2.assoc];
261 262 263 264
		line_size = l2.line_size;
		lines_per_tag = l2.lines_per_tag;
		/* cpu_data has errata corrections for K7 applied */
		size_in_kb = current_cpu_data.x86_cache_size;
265 266 267 268
		break;
	case 3:
		if (!l3.val)
			return;
269
		assoc = assocs[l3.assoc];
270 271 272
		line_size = l3.line_size;
		lines_per_tag = l3.lines_per_tag;
		size_in_kb = l3.size_encoded * 512;
273 274 275 276
		if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
			size_in_kb = size_in_kb >> 1;
			assoc = assoc >> 1;
		}
277 278 279
		break;
	default:
		return;
280 281
	}

282 283 284
	eax->split.is_self_initializing = 1;
	eax->split.type = types[leaf];
	eax->split.level = levels[leaf];
285
	eax->split.num_threads_sharing = 0;
286 287 288
	eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;


289
	if (assoc == 0xffff)
290 291
		eax->split.is_fully_associative = 1;
	ebx->split.coherency_line_size = line_size - 1;
292
	ebx->split.ways_of_associativity = assoc - 1;
293 294 295 296
	ebx->split.physical_line_partition = lines_per_tag - 1;
	ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
		(ebx->split.ways_of_associativity + 1) - 1;
}
L
Linus Torvalds 已提交
297

298 299 300 301 302 303 304
struct _cache_attr {
	struct attribute attr;
	ssize_t (*show)(struct _cpuid4_info *, char *);
	ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
};

#ifdef CONFIG_CPU_SUP_AMD
305 306 307 308 309 310 311 312 313 314
static unsigned int __cpuinit amd_calc_l3_indices(void)
{
	/*
	 * We're called over smp_call_function_single() and therefore
	 * are on the correct cpu.
	 */
	int cpu = smp_processor_id();
	int node = cpu_to_node(cpu);
	struct pci_dev *dev = node_to_k8_nb_misc(node);
	unsigned int sc0, sc1, sc2, sc3;
315
	u32 val = 0;
316 317 318 319 320 321 322 323 324 325 326 327

	pci_read_config_dword(dev, 0x1C4, &val);

	/* calculate subcache sizes */
	sc0 = !(val & BIT(0));
	sc1 = !(val & BIT(4));
	sc2 = !(val & BIT(8))  + !(val & BIT(9));
	sc3 = !(val & BIT(12)) + !(val & BIT(13));

	return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
}

328
static void __cpuinit
329
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
330 331 332
{
	if (index < 3)
		return;
333 334 335 336

	if (boot_cpu_data.x86 == 0x11)
		return;

337 338
	/* see errata #382 and #388 */
	if ((boot_cpu_data.x86 == 0x10) &&
339
	    ((boot_cpu_data.x86_model < 0x8) ||
340
	     (boot_cpu_data.x86_mask  < 0x1)))
341 342 343 344
		return;

	/* not in virtualized environments */
	if (num_k8_northbridges == 0)
345 346
		return;

347 348
	this_leaf->can_disable = true;
	this_leaf->l3_indices  = amd_calc_l3_indices();
349 350
}

351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
				  unsigned int index)
{
	int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
	int node = amd_get_nb_id(cpu);
	struct pci_dev *dev = node_to_k8_nb_misc(node);
	unsigned int reg = 0;

	if (!this_leaf->can_disable)
		return -EINVAL;

	if (!dev)
		return -EINVAL;

	pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
	return sprintf(buf, "0x%08x\n", reg);
}

#define SHOW_CACHE_DISABLE(index)					\
static ssize_t								\
show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf)	\
{									\
	return show_cache_disable(this_leaf, buf, index);		\
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)

static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
	const char *buf, size_t count, unsigned int index)
{
	int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
	int node = amd_get_nb_id(cpu);
	struct pci_dev *dev = node_to_k8_nb_misc(node);
	unsigned long val = 0;

#define SUBCACHE_MASK	(3UL << 20)
#define SUBCACHE_INDEX	0xfff

	if (!this_leaf->can_disable)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!dev)
		return -EINVAL;

	if (strict_strtoul(buf, 10, &val) < 0)
		return -EINVAL;

	/* do not allow writes outside of allowed bits */
	if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
	    ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
		return -EINVAL;

	val |= BIT(30);
	pci_write_config_dword(dev, 0x1BC + index * 4, val);
	/*
	 * We need to WBINVD on a core on the node containing the L3 cache which
	 * indices we disable therefore a simple wbinvd() is not sufficient.
	 */
	wbinvd_on_cpu(cpu);
	pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
	return count;
}

#define STORE_CACHE_DISABLE(index)					\
static ssize_t								\
store_cache_disable_##index(struct _cpuid4_info *this_leaf,		\
			    const char *buf, size_t count)		\
{									\
	return store_cache_disable(this_leaf, buf, count, index);	\
423
}
424 425 426 427 428 429 430 431 432 433 434 435 436 437
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)

static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
		show_cache_disable_0, store_cache_disable_0);
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
		show_cache_disable_1, store_cache_disable_1);

#else	/* CONFIG_CPU_SUP_AMD */
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
};
#endif /* CONFIG_CPU_SUP_AMD */
438

439
static int
440 441
__cpuinit cpuid4_cache_lookup_regs(int index,
				   struct _cpuid4_info_regs *this_leaf)
L
Linus Torvalds 已提交
442
{
443 444 445 446
	union _cpuid4_leaf_eax 	eax;
	union _cpuid4_leaf_ebx 	ebx;
	union _cpuid4_leaf_ecx 	ecx;
	unsigned		edx;
L
Linus Torvalds 已提交
447

448
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
449
		amd_cpuid4(index, &eax, &ebx, &ecx);
450 451
		if (boot_cpu_data.x86 >= 0x10)
			amd_check_l3_disable(index, this_leaf);
452 453 454 455
	} else {
		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
	}

456
	if (eax.split.type == CACHE_TYPE_NULL)
457
		return -EIO; /* better error ? */
L
Linus Torvalds 已提交
458

459 460 461
	this_leaf->eax = eax;
	this_leaf->ebx = ebx;
	this_leaf->ecx = ecx;
462 463 464 465
	this_leaf->size = (ecx.split.number_of_sets          + 1) *
			  (ebx.split.coherency_line_size     + 1) *
			  (ebx.split.physical_line_partition + 1) *
			  (ebx.split.ways_of_associativity   + 1);
L
Linus Torvalds 已提交
466 467 468
	return 0;
}

469
static int __cpuinit find_num_cache_leaves(void)
L
Linus Torvalds 已提交
470 471 472
{
	unsigned int		eax, ebx, ecx, edx;
	union _cpuid4_leaf_eax	cache_eax;
473
	int 			i = -1;
L
Linus Torvalds 已提交
474

475 476 477
	do {
		++i;
		/* Do cpuid(4) loop to find out num_cache_leaves */
L
Linus Torvalds 已提交
478 479
		cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
		cache_eax.full = eax;
480 481
	} while (cache_eax.split.type != CACHE_TYPE_NULL);
	return i;
L
Linus Torvalds 已提交
482 483
}

484
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
485
{
A
Alan Cox 已提交
486 487
	/* Cache sizes */
	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
L
Linus Torvalds 已提交
488 489
	unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
	unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
490
	unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
491
#ifdef CONFIG_X86_HT
492
	unsigned int cpu = c->cpu_index;
493
#endif
L
Linus Torvalds 已提交
494

495
	if (c->cpuid_level > 3) {
L
Linus Torvalds 已提交
496 497 498 499 500 501 502 503 504 505 506 507 508
		static int is_initialized;

		if (is_initialized == 0) {
			/* Init num_cache_leaves from boot CPU */
			num_cache_leaves = find_num_cache_leaves();
			is_initialized++;
		}

		/*
		 * Whenever possible use cpuid(4), deterministic cache
		 * parameters cpuid leaf to find the cache details
		 */
		for (i = 0; i < num_cache_leaves; i++) {
509
			struct _cpuid4_info_regs this_leaf;
L
Linus Torvalds 已提交
510 511
			int retval;

512
			retval = cpuid4_cache_lookup_regs(i, &this_leaf);
L
Linus Torvalds 已提交
513
			if (retval >= 0) {
A
Alan Cox 已提交
514 515
				switch (this_leaf.eax.split.level) {
				case 1:
L
Linus Torvalds 已提交
516 517 518 519 520 521 522
					if (this_leaf.eax.split.type ==
							CACHE_TYPE_DATA)
						new_l1d = this_leaf.size/1024;
					else if (this_leaf.eax.split.type ==
							CACHE_TYPE_INST)
						new_l1i = this_leaf.size/1024;
					break;
A
Alan Cox 已提交
523
				case 2:
L
Linus Torvalds 已提交
524
					new_l2 = this_leaf.size/1024;
525 526 527
					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
					index_msb = get_count_order(num_threads_sharing);
					l2_id = c->apicid >> index_msb;
L
Linus Torvalds 已提交
528
					break;
A
Alan Cox 已提交
529
				case 3:
L
Linus Torvalds 已提交
530
					new_l3 = this_leaf.size/1024;
531
					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
A
Alan Cox 已提交
532 533
					index_msb = get_count_order(
							num_threads_sharing);
534
					l3_id = c->apicid >> index_msb;
L
Linus Torvalds 已提交
535
					break;
A
Alan Cox 已提交
536
				default:
L
Linus Torvalds 已提交
537 538 539 540 541
					break;
				}
			}
		}
	}
542 543 544 545 546
	/*
	 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
	 * trace cache
	 */
	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
L
Linus Torvalds 已提交
547
		/* supports eax=2  call */
548 549
		int j, n;
		unsigned int regs[4];
L
Linus Torvalds 已提交
550
		unsigned char *dp = (unsigned char *)regs;
551 552 553 554
		int only_trace = 0;

		if (num_cache_leaves != 0 && c->x86 == 15)
			only_trace = 1;
L
Linus Torvalds 已提交
555 556 557 558

		/* Number of times to iterate */
		n = cpuid_eax(2) & 0xFF;

A
Alan Cox 已提交
559
		for (i = 0 ; i < n ; i++) {
L
Linus Torvalds 已提交
560 561 562
			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);

			/* If bit 31 is set, this is an unknown format */
A
Alan Cox 已提交
563 564 565
			for (j = 0 ; j < 3 ; j++)
				if (regs[j] & (1 << 31))
					regs[j] = 0;
L
Linus Torvalds 已提交
566 567

			/* Byte 0 is level count, not a descriptor */
A
Alan Cox 已提交
568
			for (j = 1 ; j < 16 ; j++) {
L
Linus Torvalds 已提交
569 570 571 572
				unsigned char des = dp[j];
				unsigned char k = 0;

				/* look up this descriptor in the table */
A
Alan Cox 已提交
573
				while (cache_table[k].descriptor != 0) {
L
Linus Torvalds 已提交
574
					if (cache_table[k].descriptor == des) {
575 576
						if (only_trace && cache_table[k].cache_type != LVL_TRACE)
							break;
L
Linus Torvalds 已提交
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
						switch (cache_table[k].cache_type) {
						case LVL_1_INST:
							l1i += cache_table[k].size;
							break;
						case LVL_1_DATA:
							l1d += cache_table[k].size;
							break;
						case LVL_2:
							l2 += cache_table[k].size;
							break;
						case LVL_3:
							l3 += cache_table[k].size;
							break;
						case LVL_TRACE:
							trace += cache_table[k].size;
							break;
						}

						break;
					}

					k++;
				}
			}
		}
602
	}
L
Linus Torvalds 已提交
603

604 605
	if (new_l1d)
		l1d = new_l1d;
L
Linus Torvalds 已提交
606

607 608
	if (new_l1i)
		l1i = new_l1i;
L
Linus Torvalds 已提交
609

610 611
	if (new_l2) {
		l2 = new_l2;
612
#ifdef CONFIG_X86_HT
613
		per_cpu(cpu_llc_id, cpu) = l2_id;
614
#endif
615
	}
L
Linus Torvalds 已提交
616

617 618
	if (new_l3) {
		l3 = new_l3;
619
#ifdef CONFIG_X86_HT
620
		per_cpu(cpu_llc_id, cpu) = l3_id;
621
#endif
L
Linus Torvalds 已提交
622 623
	}

624 625
	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));

L
Linus Torvalds 已提交
626 627 628
	return l2;
}

629 630
#ifdef CONFIG_SYSFS

L
Linus Torvalds 已提交
631
/* pointer to _cpuid4_info array (for each cache leaf) */
632 633
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
#define CPUID4_INFO_IDX(x, y)	(&((per_cpu(ici_cpuid4_info, x))[y]))
L
Linus Torvalds 已提交
634 635

#ifdef CONFIG_SMP
636
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
L
Linus Torvalds 已提交
637
{
638
	struct _cpuid4_info	*this_leaf, *sibling_leaf;
L
Linus Torvalds 已提交
639
	unsigned long num_threads_sharing;
640
	int index_msb, i, sibling;
641
	struct cpuinfo_x86 *c = &cpu_data(cpu);
L
Linus Torvalds 已提交
642

643
	if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
644
		for_each_cpu(i, c->llc_shared_map) {
645
			if (!per_cpu(ici_cpuid4_info, i))
646 647
				continue;
			this_leaf = CPUID4_INFO_IDX(i, index);
648 649 650 651 652
			for_each_cpu(sibling, c->llc_shared_map) {
				if (!cpu_online(sibling))
					continue;
				set_bit(sibling, this_leaf->shared_cpu_map);
			}
653 654 655
		}
		return;
	}
L
Linus Torvalds 已提交
656 657 658 659
	this_leaf = CPUID4_INFO_IDX(cpu, index);
	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;

	if (num_threads_sharing == 1)
660
		cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
661 662 663 664
	else {
		index_msb = get_count_order(num_threads_sharing);

		for_each_online_cpu(i) {
665 666
			if (cpu_data(i).apicid >> index_msb ==
			    c->apicid >> index_msb) {
667 668
				cpumask_set_cpu(i,
					to_cpumask(this_leaf->shared_cpu_map));
669
				if (i != cpu && per_cpu(ici_cpuid4_info, i))  {
670 671 672 673
					sibling_leaf =
						CPUID4_INFO_IDX(i, index);
					cpumask_set_cpu(cpu, to_cpumask(
						sibling_leaf->shared_cpu_map));
674 675 676 677 678
				}
			}
		}
	}
}
679
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
680 681 682 683 684
{
	struct _cpuid4_info	*this_leaf, *sibling_leaf;
	int sibling;

	this_leaf = CPUID4_INFO_IDX(cpu, index);
685
	for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
686
		sibling_leaf = CPUID4_INFO_IDX(sibling, index);
687 688
		cpumask_clear_cpu(cpu,
				  to_cpumask(sibling_leaf->shared_cpu_map));
689
	}
L
Linus Torvalds 已提交
690 691
}
#else
A
Alan Cox 已提交
692 693 694 695 696 697 698
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
}

static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
}
L
Linus Torvalds 已提交
699 700
#endif

701
static void __cpuinit free_cache_attributes(unsigned int cpu)
L
Linus Torvalds 已提交
702
{
703 704 705 706 707
	int i;

	for (i = 0; i < num_cache_leaves; i++)
		cache_remove_shared_cpu_map(cpu, i);

708 709
	kfree(per_cpu(ici_cpuid4_info, cpu));
	per_cpu(ici_cpuid4_info, cpu) = NULL;
L
Linus Torvalds 已提交
710 711
}

712 713 714 715 716 717 718 719 720
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
	struct _cpuid4_info_regs *leaf_regs =
		(struct _cpuid4_info_regs *)this_leaf;

	return cpuid4_cache_lookup_regs(index, leaf_regs);
}

721
static void __cpuinit get_cpu_leaves(void *_retval)
L
Linus Torvalds 已提交
722
{
723
	int j, *retval = _retval, cpu = smp_processor_id();
724

L
Linus Torvalds 已提交
725 726
	/* Do cpuid and store the results */
	for (j = 0; j < num_cache_leaves; j++) {
727
		struct _cpuid4_info *this_leaf;
L
Linus Torvalds 已提交
728
		this_leaf = CPUID4_INFO_IDX(cpu, j);
729 730
		*retval = cpuid4_cache_lookup(j, this_leaf);
		if (unlikely(*retval < 0)) {
731 732 733 734
			int i;

			for (i = 0; i < j; i++)
				cache_remove_shared_cpu_map(cpu, i);
735
			break;
736
		}
L
Linus Torvalds 已提交
737 738
		cache_shared_cpu_map_setup(cpu, j);
	}
739 740 741 742 743 744 745 746 747
}

static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
	int			retval;

	if (num_cache_leaves == 0)
		return -ENOENT;

748
	per_cpu(ici_cpuid4_info, cpu) = kzalloc(
749
	    sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
750
	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
751
		return -ENOMEM;
L
Linus Torvalds 已提交
752

753
	smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
754
	if (retval) {
755 756
		kfree(per_cpu(ici_cpuid4_info, cpu));
		per_cpu(ici_cpuid4_info, cpu) = NULL;
757 758
	}

759
	return retval;
L
Linus Torvalds 已提交
760 761 762 763 764 765 766 767
}

#include <linux/kobject.h>
#include <linux/sysfs.h>

extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */

/* pointer to kobject for cpuX/cache */
768
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
L
Linus Torvalds 已提交
769 770 771 772 773 774 775 776

struct _index_kobject {
	struct kobject kobj;
	unsigned int cpu;
	unsigned short index;
};

/* pointer to array of kobjects for cpuX/cache/indexY */
777 778
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
#define INDEX_KOBJECT_PTR(x, y)		(&((per_cpu(ici_index_kobject, x))[y]))
L
Linus Torvalds 已提交
779 780 781 782 783

#define show_one_plus(file_name, object, val)				\
static ssize_t show_##file_name						\
			(struct _cpuid4_info *this_leaf, char *buf)	\
{									\
A
Alan Cox 已提交
784
	return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
L
Linus Torvalds 已提交
785 786 787 788 789 790 791 792 793 794
}

show_one_plus(level, eax.split.level, 0);
show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);

static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
{
A
Alan Cox 已提交
795
	return sprintf(buf, "%luK\n", this_leaf->size / 1024);
L
Linus Torvalds 已提交
796 797
}

798 799
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
					int type, char *buf)
L
Linus Torvalds 已提交
800
{
801
	ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
802 803
	int n = 0;

804
	if (len > 1) {
805
		const struct cpumask *mask;
806

807
		mask = to_cpumask(this_leaf->shared_cpu_map);
A
Alan Cox 已提交
808
		n = type ?
809 810
			cpulist_scnprintf(buf, len-2, mask) :
			cpumask_scnprintf(buf, len-2, mask);
811 812
		buf[n++] = '\n';
		buf[n] = '\0';
813 814
	}
	return n;
L
Linus Torvalds 已提交
815 816
}

817 818 819 820 821 822 823 824 825 826
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
{
	return show_shared_cpu_map_func(leaf, 0, buf);
}

static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
{
	return show_shared_cpu_map_func(leaf, 1, buf);
}

827 828 829 830
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
{
	switch (this_leaf->eax.split.type) {
	case CACHE_TYPE_DATA:
L
Linus Torvalds 已提交
831
		return sprintf(buf, "Data\n");
832
	case CACHE_TYPE_INST:
L
Linus Torvalds 已提交
833
		return sprintf(buf, "Instruction\n");
834
	case CACHE_TYPE_UNIFIED:
L
Linus Torvalds 已提交
835
		return sprintf(buf, "Unified\n");
836
	default:
L
Linus Torvalds 已提交
837 838 839 840
		return sprintf(buf, "Unknown\n");
	}
}

841 842
#define to_object(k)	container_of(k, struct _index_kobject, kobj)
#define to_attr(a)	container_of(a, struct _cache_attr, attr)
843

L
Linus Torvalds 已提交
844 845 846 847 848 849 850 851 852 853 854 855
#define define_one_ro(_name) \
static struct _cache_attr _name = \
	__ATTR(_name, 0444, show_##_name, NULL)

define_one_ro(level);
define_one_ro(type);
define_one_ro(coherency_line_size);
define_one_ro(physical_line_partition);
define_one_ro(ways_of_associativity);
define_one_ro(number_of_sets);
define_one_ro(size);
define_one_ro(shared_cpu_map);
856
define_one_ro(shared_cpu_list);
L
Linus Torvalds 已提交
857

858 859 860 861 862 863 864 865 866 867
#define DEFAULT_SYSFS_CACHE_ATTRS	\
	&type.attr,			\
	&level.attr,			\
	&coherency_line_size.attr,	\
	&physical_line_partition.attr,	\
	&ways_of_associativity.attr,	\
	&number_of_sets.attr,		\
	&size.attr,			\
	&shared_cpu_map.attr,		\
	&shared_cpu_list.attr
868

A
Alan Cox 已提交
869
static struct attribute *default_attrs[] = {
870 871 872 873 874 875
	DEFAULT_SYSFS_CACHE_ATTRS,
	NULL
};

static struct attribute *default_l3_attrs[] = {
	DEFAULT_SYSFS_CACHE_ATTRS,
876
#ifdef CONFIG_CPU_SUP_AMD
877 878
	&cache_disable_0.attr,
	&cache_disable_1.attr,
879
#endif
L
Linus Torvalds 已提交
880 881 882
	NULL
};

A
Alan Cox 已提交
883
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
L
Linus Torvalds 已提交
884 885 886 887 888 889 890 891
{
	struct _cache_attr *fattr = to_attr(attr);
	struct _index_kobject *this_leaf = to_object(kobj);
	ssize_t ret;

	ret = fattr->show ?
		fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
			buf) :
892
		0;
L
Linus Torvalds 已提交
893 894 895
	return ret;
}

A
Alan Cox 已提交
896 897
static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
L
Linus Torvalds 已提交
898
{
899 900 901 902
	struct _cache_attr *fattr = to_attr(attr);
	struct _index_kobject *this_leaf = to_object(kobj);
	ssize_t ret;

903 904 905
	ret = fattr->store ?
		fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
			buf, count) :
906 907
		0;
	return ret;
L
Linus Torvalds 已提交
908 909
}

910
static const struct sysfs_ops sysfs_ops = {
L
Linus Torvalds 已提交
911 912 913 914 915 916 917 918 919 920 921 922 923
	.show   = show,
	.store  = store,
};

static struct kobj_type ktype_cache = {
	.sysfs_ops	= &sysfs_ops,
	.default_attrs	= default_attrs,
};

static struct kobj_type ktype_percpu_entry = {
	.sysfs_ops	= &sysfs_ops,
};

924
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
L
Linus Torvalds 已提交
925
{
926 927 928 929
	kfree(per_cpu(ici_cache_kobject, cpu));
	kfree(per_cpu(ici_index_kobject, cpu));
	per_cpu(ici_cache_kobject, cpu) = NULL;
	per_cpu(ici_index_kobject, cpu) = NULL;
L
Linus Torvalds 已提交
930 931 932
	free_cache_attributes(cpu);
}

933
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
L
Linus Torvalds 已提交
934
{
935
	int err;
L
Linus Torvalds 已提交
936 937 938 939

	if (num_cache_leaves == 0)
		return -ENOENT;

940 941 942
	err = detect_cache_attributes(cpu);
	if (err)
		return err;
L
Linus Torvalds 已提交
943 944

	/* Allocate all required memory */
945
	per_cpu(ici_cache_kobject, cpu) =
946
		kzalloc(sizeof(struct kobject), GFP_KERNEL);
947
	if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
L
Linus Torvalds 已提交
948 949
		goto err_out;

950
	per_cpu(ici_index_kobject, cpu) = kzalloc(
A
Alan Cox 已提交
951
	    sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
952
	if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
L
Linus Torvalds 已提交
953 954 955 956 957 958 959 960 961
		goto err_out;

	return 0;

err_out:
	cpuid4_cache_sysfs_exit(cpu);
	return -ENOMEM;
}

962
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
963

L
Linus Torvalds 已提交
964
/* Add/Remove cache interface for CPU device */
965
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
L
Linus Torvalds 已提交
966 967 968 969
{
	unsigned int cpu = sys_dev->id;
	unsigned long i, j;
	struct _index_kobject *this_object;
970
	struct _cpuid4_info   *this_leaf;
971
	int retval;
L
Linus Torvalds 已提交
972 973 974 975 976

	retval = cpuid4_cache_sysfs_init(cpu);
	if (unlikely(retval < 0))
		return retval;

977
	retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
978
				      &ktype_percpu_entry,
979
				      &sys_dev->kobj, "%s", "cache");
980 981 982 983
	if (retval < 0) {
		cpuid4_cache_sysfs_exit(cpu);
		return retval;
	}
L
Linus Torvalds 已提交
984 985

	for (i = 0; i < num_cache_leaves; i++) {
A
Alan Cox 已提交
986
		this_object = INDEX_KOBJECT_PTR(cpu, i);
L
Linus Torvalds 已提交
987 988
		this_object->cpu = cpu;
		this_object->index = i;
989 990 991 992 993 994 995 996

		this_leaf = CPUID4_INFO_IDX(cpu, i);

		if (this_leaf->can_disable)
			ktype_cache.default_attrs = default_l3_attrs;
		else
			ktype_cache.default_attrs = default_attrs;

997
		retval = kobject_init_and_add(&(this_object->kobj),
998
					      &ktype_cache,
999
					      per_cpu(ici_cache_kobject, cpu),
1000
					      "index%1lu", i);
L
Linus Torvalds 已提交
1001
		if (unlikely(retval)) {
A
Alan Cox 已提交
1002 1003
			for (j = 0; j < i; j++)
				kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1004
			kobject_put(per_cpu(ici_cache_kobject, cpu));
L
Linus Torvalds 已提交
1005
			cpuid4_cache_sysfs_exit(cpu);
1006
			return retval;
L
Linus Torvalds 已提交
1007
		}
1008
		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
L
Linus Torvalds 已提交
1009
	}
1010
	cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1011

1012
	kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1013
	return 0;
L
Linus Torvalds 已提交
1014 1015
}

1016
static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
L
Linus Torvalds 已提交
1017 1018 1019 1020
{
	unsigned int cpu = sys_dev->id;
	unsigned long i;

1021
	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1022
		return;
1023
	if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1024
		return;
1025
	cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1026 1027

	for (i = 0; i < num_cache_leaves; i++)
A
Alan Cox 已提交
1028
		kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1029
	kobject_put(per_cpu(ici_cache_kobject, cpu));
L
Linus Torvalds 已提交
1030
	cpuid4_cache_sysfs_exit(cpu);
1031 1032
}

1033
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1034 1035 1036 1037 1038 1039 1040 1041
					unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct sys_device *sys_dev;

	sys_dev = get_cpu_sysdev(cpu);
	switch (action) {
	case CPU_ONLINE:
1042
	case CPU_ONLINE_FROZEN:
1043 1044 1045
		cache_add_dev(sys_dev);
		break;
	case CPU_DEAD:
1046
	case CPU_DEAD_FROZEN:
1047 1048 1049 1050
		cache_remove_dev(sys_dev);
		break;
	}
	return NOTIFY_OK;
L
Linus Torvalds 已提交
1051 1052
}

A
Alan Cox 已提交
1053
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1054
	.notifier_call = cacheinfo_cpu_callback,
L
Linus Torvalds 已提交
1055 1056
};

1057
static int __cpuinit cache_sysfs_init(void)
L
Linus Torvalds 已提交
1058
{
1059 1060
	int i;

L
Linus Torvalds 已提交
1061 1062 1063
	if (num_cache_leaves == 0)
		return 0;

1064
	for_each_online_cpu(i) {
1065 1066
		int err;
		struct sys_device *sys_dev = get_cpu_sysdev(i);
1067

1068 1069 1070
		err = cache_add_dev(sys_dev);
		if (err)
			return err;
1071
	}
1072
	register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1073
	return 0;
L
Linus Torvalds 已提交
1074 1075
}

1076
device_initcall(cache_sysfs_init);
L
Linus Torvalds 已提交
1077 1078

#endif