intel_cacheinfo.c 29.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *	Routines to indentify caches on Intel CPU.
L
Linus Torvalds 已提交
3
 *
4 5
 *	Changes:
 *	Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
A
Alan Cox 已提交
6
 *	Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7
 *	Andi Kleen / Andreas Herrmann	: CPUID4 emulation on AMD.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14
 */

#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
T
Tim Schmielau 已提交
15
#include <linux/sched.h>
16
#include <linux/pci.h>
L
Linus Torvalds 已提交
17 18

#include <asm/processor.h>
A
Alan Cox 已提交
19
#include <linux/smp.h>
20
#include <asm/k8.h>
21
#include <asm/smp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28

#define LVL_1_INST	1
#define LVL_1_DATA	2
#define LVL_2		3
#define LVL_3		4
#define LVL_TRACE	5

A
Alan Cox 已提交
29
struct _cache_table {
L
Linus Torvalds 已提交
30 31 32 33 34
	unsigned char descriptor;
	char cache_type;
	short size;
};

D
Dave Jones 已提交
35 36
#define MB(x)	((x) * 1024)

A
Alan Cox 已提交
37 38 39
/* All the cache descriptor types we care about (no TLB or
   trace cache entries) */

40
static const struct _cache_table __cpuinitconst cache_table[] =
L
Linus Torvalds 已提交
41 42 43
{
	{ 0x06, LVL_1_INST, 8 },	/* 4-way set assoc, 32 byte line size */
	{ 0x08, LVL_1_INST, 16 },	/* 4-way set assoc, 32 byte line size */
44
	{ 0x09, LVL_1_INST, 32 },	/* 4-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
45 46
	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
47 48
	{ 0x0d, LVL_1_DATA, 16 },	/* 4-way set assoc, 64 byte line size */
	{ 0x21, LVL_2,      256 },	/* 8-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
49
	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
D
Dave Jones 已提交
50 51 52
	{ 0x23, LVL_3,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x25, LVL_3,      MB(2) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x29, LVL_3,      MB(4) },	/* 8-way set assoc, sectored cache, 64 byte line size */
L
Linus Torvalds 已提交
53 54 55
	{ 0x2c, LVL_1_DATA, 32 },	/* 8-way set assoc, 64 byte line size */
	{ 0x30, LVL_1_INST, 32 },	/* 8-way set assoc, 64 byte line size */
	{ 0x39, LVL_2,      128 },	/* 4-way set assoc, sectored cache, 64 byte line size */
56
	{ 0x3a, LVL_2,      192 },	/* 6-way set assoc, sectored cache, 64 byte line size */
L
Linus Torvalds 已提交
57 58
	{ 0x3b, LVL_2,      128 },	/* 2-way set assoc, sectored cache, 64 byte line size */
	{ 0x3c, LVL_2,      256 },	/* 4-way set assoc, sectored cache, 64 byte line size */
59 60
	{ 0x3d, LVL_2,      384 },	/* 6-way set assoc, sectored cache, 64 byte line size */
	{ 0x3e, LVL_2,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
61
	{ 0x3f, LVL_2,      256 },	/* 2-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
62 63 64
	{ 0x41, LVL_2,      128 },	/* 4-way set assoc, 32 byte line size */
	{ 0x42, LVL_2,      256 },	/* 4-way set assoc, 32 byte line size */
	{ 0x43, LVL_2,      512 },	/* 4-way set assoc, 32 byte line size */
D
Dave Jones 已提交
65 66 67 68 69 70 71 72 73 74
	{ 0x44, LVL_2,      MB(1) },	/* 4-way set assoc, 32 byte line size */
	{ 0x45, LVL_2,      MB(2) },	/* 4-way set assoc, 32 byte line size */
	{ 0x46, LVL_3,      MB(4) },	/* 4-way set assoc, 64 byte line size */
	{ 0x47, LVL_3,      MB(8) },	/* 8-way set assoc, 64 byte line size */
	{ 0x49, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4a, LVL_3,      MB(6) },	/* 12-way set assoc, 64 byte line size */
	{ 0x4b, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4c, LVL_3,      MB(12) },	/* 12-way set assoc, 64 byte line size */
	{ 0x4d, LVL_3,      MB(16) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4e, LVL_2,      MB(6) },	/* 24-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
75 76 77 78 79 80 81
	{ 0x60, LVL_1_DATA, 16 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x66, LVL_1_DATA, 8 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x67, LVL_1_DATA, 16 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x68, LVL_1_DATA, 32 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x70, LVL_TRACE,  12 },	/* 8-way set assoc */
	{ 0x71, LVL_TRACE,  16 },	/* 8-way set assoc */
	{ 0x72, LVL_TRACE,  32 },	/* 8-way set assoc */
82
	{ 0x73, LVL_TRACE,  64 },	/* 8-way set assoc */
D
Dave Jones 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
	{ 0x78, LVL_2,      MB(1) },	/* 4-way set assoc, 64 byte line size */
	{ 0x79, LVL_2,      128 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7a, LVL_2,      256 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7b, LVL_2,      512 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7c, LVL_2,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7d, LVL_2,      MB(2) },	/* 8-way set assoc, 64 byte line size */
	{ 0x7f, LVL_2,      512 },	/* 2-way set assoc, 64 byte line size */
	{ 0x82, LVL_2,      256 },	/* 8-way set assoc, 32 byte line size */
	{ 0x83, LVL_2,      512 },	/* 8-way set assoc, 32 byte line size */
	{ 0x84, LVL_2,      MB(1) },	/* 8-way set assoc, 32 byte line size */
	{ 0x85, LVL_2,      MB(2) },	/* 8-way set assoc, 32 byte line size */
	{ 0x86, LVL_2,      512 },	/* 4-way set assoc, 64 byte line size */
	{ 0x87, LVL_2,      MB(1) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd0, LVL_3,      512 },	/* 4-way set assoc, 64 byte line size */
	{ 0xd1, LVL_3,      MB(1) },	/* 4-way set assoc, 64 byte line size */
	{ 0xd2, LVL_3,      MB(2) },	/* 4-way set assoc, 64 byte line size */
	{ 0xd6, LVL_3,      MB(1) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd7, LVL_3,      MB(2) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd8, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
	{ 0xdc, LVL_3,      MB(2) },	/* 12-way set assoc, 64 byte line size */
	{ 0xdd, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
	{ 0xde, LVL_3,      MB(8) },	/* 12-way set assoc, 64 byte line size */
	{ 0xe2, LVL_3,      MB(2) },	/* 16-way set assoc, 64 byte line size */
	{ 0xe3, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
	{ 0xe4, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
	{ 0xea, LVL_3,      MB(12) },	/* 24-way set assoc, 64 byte line size */
	{ 0xeb, LVL_3,      MB(18) },	/* 24-way set assoc, 64 byte line size */
	{ 0xec, LVL_3,      MB(24) },	/* 24-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
111 112 113 114
	{ 0x00, 0, 0}
};


A
Alan Cox 已提交
115
enum _cache_type {
L
Linus Torvalds 已提交
116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
	CACHE_TYPE_NULL	= 0,
	CACHE_TYPE_DATA = 1,
	CACHE_TYPE_INST = 2,
	CACHE_TYPE_UNIFIED = 3
};

union _cpuid4_leaf_eax {
	struct {
		enum _cache_type	type:5;
		unsigned int		level:3;
		unsigned int		is_self_initializing:1;
		unsigned int		is_fully_associative:1;
		unsigned int		reserved:4;
		unsigned int		num_threads_sharing:12;
		unsigned int		num_cores_on_die:6;
	} split;
	u32 full;
};

union _cpuid4_leaf_ebx {
	struct {
		unsigned int		coherency_line_size:12;
		unsigned int		physical_line_partition:10;
		unsigned int		ways_of_associativity:10;
	} split;
	u32 full;
};

union _cpuid4_leaf_ecx {
	struct {
		unsigned int		number_of_sets:32;
	} split;
	u32 full;
};

struct _cpuid4_info {
	union _cpuid4_leaf_eax eax;
	union _cpuid4_leaf_ebx ebx;
	union _cpuid4_leaf_ecx ecx;
	unsigned long size;
156 157
	bool can_disable;
	unsigned int l3_indices;
158 159 160 161 162 163 164 165 166
	DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};

/* subset of above _cpuid4_info w/o shared_cpu_map */
struct _cpuid4_info_regs {
	union _cpuid4_leaf_eax eax;
	union _cpuid4_leaf_ebx ebx;
	union _cpuid4_leaf_ecx ecx;
	unsigned long size;
167 168
	bool can_disable;
	unsigned int l3_indices;
L
Linus Torvalds 已提交
169 170
};

171 172 173 174
unsigned short			num_cache_leaves;

/* AMD doesn't have CPUID4. Emulate it here to report the same
   information to the user.  This makes some assumptions about the machine:
175
   L2 not shared, no SMT etc. that is currently true on AMD CPUs.
176 177 178 179 180

   In theory the TLBs could be reported as fake type (they are in "dummy").
   Maybe later */
union l1_cache {
	struct {
A
Alan Cox 已提交
181 182 183 184
		unsigned line_size:8;
		unsigned lines_per_tag:8;
		unsigned assoc:8;
		unsigned size_in_kb:8;
185 186 187 188 189 190
	};
	unsigned val;
};

union l2_cache {
	struct {
A
Alan Cox 已提交
191 192 193 194
		unsigned line_size:8;
		unsigned lines_per_tag:4;
		unsigned assoc:4;
		unsigned size_in_kb:16;
195 196 197 198
	};
	unsigned val;
};

199 200
union l3_cache {
	struct {
A
Alan Cox 已提交
201 202 203 204 205
		unsigned line_size:8;
		unsigned lines_per_tag:4;
		unsigned assoc:4;
		unsigned res:2;
		unsigned size_encoded:14;
206 207 208 209
	};
	unsigned val;
};

210
static const unsigned short __cpuinitconst assocs[] = {
211 212 213 214 215 216 217
	[1] = 1,
	[2] = 2,
	[4] = 4,
	[6] = 8,
	[8] = 16,
	[0xa] = 32,
	[0xb] = 48,
218
	[0xc] = 64,
219 220 221
	[0xd] = 96,
	[0xe] = 128,
	[0xf] = 0xffff /* fully associative - no way to show this currently */
222 223
};

224 225
static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
226

227 228 229 230
static void __cpuinit
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
		     union _cpuid4_leaf_ebx *ebx,
		     union _cpuid4_leaf_ecx *ecx)
231 232 233 234 235
{
	unsigned dummy;
	unsigned line_size, lines_per_tag, assoc, size_in_kb;
	union l1_cache l1i, l1d;
	union l2_cache l2;
236 237
	union l3_cache l3;
	union l1_cache *l1 = &l1d;
238 239 240 241 242 243

	eax->full = 0;
	ebx->full = 0;
	ecx->full = 0;

	cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
244
	cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
245

246 247 248 249 250 251
	switch (leaf) {
	case 1:
		l1 = &l1i;
	case 0:
		if (!l1->val)
			return;
252
		assoc = assocs[l1->assoc];
253 254 255
		line_size = l1->line_size;
		lines_per_tag = l1->lines_per_tag;
		size_in_kb = l1->size_in_kb;
256 257 258 259
		break;
	case 2:
		if (!l2.val)
			return;
260
		assoc = assocs[l2.assoc];
261 262 263 264
		line_size = l2.line_size;
		lines_per_tag = l2.lines_per_tag;
		/* cpu_data has errata corrections for K7 applied */
		size_in_kb = current_cpu_data.x86_cache_size;
265 266 267 268
		break;
	case 3:
		if (!l3.val)
			return;
269
		assoc = assocs[l3.assoc];
270 271 272
		line_size = l3.line_size;
		lines_per_tag = l3.lines_per_tag;
		size_in_kb = l3.size_encoded * 512;
273 274 275 276
		if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
			size_in_kb = size_in_kb >> 1;
			assoc = assoc >> 1;
		}
277 278 279
		break;
	default:
		return;
280 281
	}

282 283 284
	eax->split.is_self_initializing = 1;
	eax->split.type = types[leaf];
	eax->split.level = levels[leaf];
285
	eax->split.num_threads_sharing = 0;
286 287 288
	eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;


289
	if (assoc == 0xffff)
290 291
		eax->split.is_fully_associative = 1;
	ebx->split.coherency_line_size = line_size - 1;
292
	ebx->split.ways_of_associativity = assoc - 1;
293 294 295 296
	ebx->split.physical_line_partition = lines_per_tag - 1;
	ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
		(ebx->split.ways_of_associativity + 1) - 1;
}
L
Linus Torvalds 已提交
297

298 299 300 301 302 303 304
struct _cache_attr {
	struct attribute attr;
	ssize_t (*show)(struct _cpuid4_info *, char *);
	ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
};

#ifdef CONFIG_CPU_SUP_AMD
305 306 307 308 309 310 311 312 313 314
static unsigned int __cpuinit amd_calc_l3_indices(void)
{
	/*
	 * We're called over smp_call_function_single() and therefore
	 * are on the correct cpu.
	 */
	int cpu = smp_processor_id();
	int node = cpu_to_node(cpu);
	struct pci_dev *dev = node_to_k8_nb_misc(node);
	unsigned int sc0, sc1, sc2, sc3;
315
	u32 val = 0;
316 317 318 319 320 321 322 323 324 325 326 327

	pci_read_config_dword(dev, 0x1C4, &val);

	/* calculate subcache sizes */
	sc0 = !(val & BIT(0));
	sc1 = !(val & BIT(4));
	sc2 = !(val & BIT(8))  + !(val & BIT(9));
	sc3 = !(val & BIT(12)) + !(val & BIT(13));

	return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
}

328
static void __cpuinit
329
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
330
{
331
	if (boot_cpu_data.x86 != 0x10)
332
		return;
333

334
	if (index < 3)
335 336
		return;

337
	/* see errata #382 and #388 */
338
	if (boot_cpu_data.x86_model < 0x8)
339 340
		return;

341 342 343 344 345 346
	if ((boot_cpu_data.x86_model == 0x8 ||
	     boot_cpu_data.x86_model == 0x9)
		&&
	     boot_cpu_data.x86_mask < 0x1)
			return;

347 348 349 350
	/* not in virtualized environments */
	if (num_k8_northbridges == 0)
		return;

351 352
	this_leaf->can_disable = true;
	this_leaf->l3_indices  = amd_calc_l3_indices();
353 354
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
				  unsigned int index)
{
	int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
	int node = amd_get_nb_id(cpu);
	struct pci_dev *dev = node_to_k8_nb_misc(node);
	unsigned int reg = 0;

	if (!this_leaf->can_disable)
		return -EINVAL;

	if (!dev)
		return -EINVAL;

	pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
	return sprintf(buf, "0x%08x\n", reg);
}

#define SHOW_CACHE_DISABLE(index)					\
static ssize_t								\
show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf)	\
{									\
	return show_cache_disable(this_leaf, buf, index);		\
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)

static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
	const char *buf, size_t count, unsigned int index)
{
	int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
	int node = amd_get_nb_id(cpu);
	struct pci_dev *dev = node_to_k8_nb_misc(node);
	unsigned long val = 0;

#define SUBCACHE_MASK	(3UL << 20)
#define SUBCACHE_INDEX	0xfff

	if (!this_leaf->can_disable)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!dev)
		return -EINVAL;

	if (strict_strtoul(buf, 10, &val) < 0)
		return -EINVAL;

	/* do not allow writes outside of allowed bits */
	if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
	    ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
		return -EINVAL;

	val |= BIT(30);
	pci_write_config_dword(dev, 0x1BC + index * 4, val);
	/*
	 * We need to WBINVD on a core on the node containing the L3 cache which
	 * indices we disable therefore a simple wbinvd() is not sufficient.
	 */
	wbinvd_on_cpu(cpu);
	pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
	return count;
}

#define STORE_CACHE_DISABLE(index)					\
static ssize_t								\
store_cache_disable_##index(struct _cpuid4_info *this_leaf,		\
			    const char *buf, size_t count)		\
{									\
	return store_cache_disable(this_leaf, buf, count, index);	\
427
}
428 429 430 431 432 433 434 435 436 437 438 439 440 441
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)

static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
		show_cache_disable_0, store_cache_disable_0);
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
		show_cache_disable_1, store_cache_disable_1);

#else	/* CONFIG_CPU_SUP_AMD */
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
};
#endif /* CONFIG_CPU_SUP_AMD */
442

443
static int
444 445
__cpuinit cpuid4_cache_lookup_regs(int index,
				   struct _cpuid4_info_regs *this_leaf)
L
Linus Torvalds 已提交
446
{
447 448 449 450
	union _cpuid4_leaf_eax 	eax;
	union _cpuid4_leaf_ebx 	ebx;
	union _cpuid4_leaf_ecx 	ecx;
	unsigned		edx;
L
Linus Torvalds 已提交
451

452
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
453
		amd_cpuid4(index, &eax, &ebx, &ecx);
454
		amd_check_l3_disable(index, this_leaf);
455 456 457 458
	} else {
		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
	}

459
	if (eax.split.type == CACHE_TYPE_NULL)
460
		return -EIO; /* better error ? */
L
Linus Torvalds 已提交
461

462 463 464
	this_leaf->eax = eax;
	this_leaf->ebx = ebx;
	this_leaf->ecx = ecx;
465 466 467 468
	this_leaf->size = (ecx.split.number_of_sets          + 1) *
			  (ebx.split.coherency_line_size     + 1) *
			  (ebx.split.physical_line_partition + 1) *
			  (ebx.split.ways_of_associativity   + 1);
L
Linus Torvalds 已提交
469 470 471
	return 0;
}

472
static int __cpuinit find_num_cache_leaves(void)
L
Linus Torvalds 已提交
473 474 475
{
	unsigned int		eax, ebx, ecx, edx;
	union _cpuid4_leaf_eax	cache_eax;
476
	int 			i = -1;
L
Linus Torvalds 已提交
477

478 479 480
	do {
		++i;
		/* Do cpuid(4) loop to find out num_cache_leaves */
L
Linus Torvalds 已提交
481 482
		cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
		cache_eax.full = eax;
483 484
	} while (cache_eax.split.type != CACHE_TYPE_NULL);
	return i;
L
Linus Torvalds 已提交
485 486
}

487
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
488
{
A
Alan Cox 已提交
489 490
	/* Cache sizes */
	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
L
Linus Torvalds 已提交
491 492
	unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
	unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
493
	unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
494
#ifdef CONFIG_X86_HT
495
	unsigned int cpu = c->cpu_index;
496
#endif
L
Linus Torvalds 已提交
497

498
	if (c->cpuid_level > 3) {
L
Linus Torvalds 已提交
499 500 501 502 503 504 505 506 507 508 509 510 511
		static int is_initialized;

		if (is_initialized == 0) {
			/* Init num_cache_leaves from boot CPU */
			num_cache_leaves = find_num_cache_leaves();
			is_initialized++;
		}

		/*
		 * Whenever possible use cpuid(4), deterministic cache
		 * parameters cpuid leaf to find the cache details
		 */
		for (i = 0; i < num_cache_leaves; i++) {
512
			struct _cpuid4_info_regs this_leaf;
L
Linus Torvalds 已提交
513 514
			int retval;

515
			retval = cpuid4_cache_lookup_regs(i, &this_leaf);
L
Linus Torvalds 已提交
516
			if (retval >= 0) {
A
Alan Cox 已提交
517 518
				switch (this_leaf.eax.split.level) {
				case 1:
L
Linus Torvalds 已提交
519 520 521 522 523 524 525
					if (this_leaf.eax.split.type ==
							CACHE_TYPE_DATA)
						new_l1d = this_leaf.size/1024;
					else if (this_leaf.eax.split.type ==
							CACHE_TYPE_INST)
						new_l1i = this_leaf.size/1024;
					break;
A
Alan Cox 已提交
526
				case 2:
L
Linus Torvalds 已提交
527
					new_l2 = this_leaf.size/1024;
528 529 530
					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
					index_msb = get_count_order(num_threads_sharing);
					l2_id = c->apicid >> index_msb;
L
Linus Torvalds 已提交
531
					break;
A
Alan Cox 已提交
532
				case 3:
L
Linus Torvalds 已提交
533
					new_l3 = this_leaf.size/1024;
534
					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
A
Alan Cox 已提交
535 536
					index_msb = get_count_order(
							num_threads_sharing);
537
					l3_id = c->apicid >> index_msb;
L
Linus Torvalds 已提交
538
					break;
A
Alan Cox 已提交
539
				default:
L
Linus Torvalds 已提交
540 541 542 543 544
					break;
				}
			}
		}
	}
545 546 547 548 549
	/*
	 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
	 * trace cache
	 */
	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
L
Linus Torvalds 已提交
550
		/* supports eax=2  call */
551 552
		int j, n;
		unsigned int regs[4];
L
Linus Torvalds 已提交
553
		unsigned char *dp = (unsigned char *)regs;
554 555 556 557
		int only_trace = 0;

		if (num_cache_leaves != 0 && c->x86 == 15)
			only_trace = 1;
L
Linus Torvalds 已提交
558 559 560 561

		/* Number of times to iterate */
		n = cpuid_eax(2) & 0xFF;

A
Alan Cox 已提交
562
		for (i = 0 ; i < n ; i++) {
L
Linus Torvalds 已提交
563 564 565
			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);

			/* If bit 31 is set, this is an unknown format */
A
Alan Cox 已提交
566 567 568
			for (j = 0 ; j < 3 ; j++)
				if (regs[j] & (1 << 31))
					regs[j] = 0;
L
Linus Torvalds 已提交
569 570

			/* Byte 0 is level count, not a descriptor */
A
Alan Cox 已提交
571
			for (j = 1 ; j < 16 ; j++) {
L
Linus Torvalds 已提交
572 573 574 575
				unsigned char des = dp[j];
				unsigned char k = 0;

				/* look up this descriptor in the table */
A
Alan Cox 已提交
576
				while (cache_table[k].descriptor != 0) {
L
Linus Torvalds 已提交
577
					if (cache_table[k].descriptor == des) {
578 579
						if (only_trace && cache_table[k].cache_type != LVL_TRACE)
							break;
L
Linus Torvalds 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
						switch (cache_table[k].cache_type) {
						case LVL_1_INST:
							l1i += cache_table[k].size;
							break;
						case LVL_1_DATA:
							l1d += cache_table[k].size;
							break;
						case LVL_2:
							l2 += cache_table[k].size;
							break;
						case LVL_3:
							l3 += cache_table[k].size;
							break;
						case LVL_TRACE:
							trace += cache_table[k].size;
							break;
						}

						break;
					}

					k++;
				}
			}
		}
605
	}
L
Linus Torvalds 已提交
606

607 608
	if (new_l1d)
		l1d = new_l1d;
L
Linus Torvalds 已提交
609

610 611
	if (new_l1i)
		l1i = new_l1i;
L
Linus Torvalds 已提交
612

613 614
	if (new_l2) {
		l2 = new_l2;
615
#ifdef CONFIG_X86_HT
616
		per_cpu(cpu_llc_id, cpu) = l2_id;
617
#endif
618
	}
L
Linus Torvalds 已提交
619

620 621
	if (new_l3) {
		l3 = new_l3;
622
#ifdef CONFIG_X86_HT
623
		per_cpu(cpu_llc_id, cpu) = l3_id;
624
#endif
L
Linus Torvalds 已提交
625 626
	}

627 628
	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));

L
Linus Torvalds 已提交
629 630 631
	return l2;
}

632 633
#ifdef CONFIG_SYSFS

L
Linus Torvalds 已提交
634
/* pointer to _cpuid4_info array (for each cache leaf) */
635 636
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
#define CPUID4_INFO_IDX(x, y)	(&((per_cpu(ici_cpuid4_info, x))[y]))
L
Linus Torvalds 已提交
637 638

#ifdef CONFIG_SMP
639
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
L
Linus Torvalds 已提交
640
{
641
	struct _cpuid4_info	*this_leaf, *sibling_leaf;
L
Linus Torvalds 已提交
642
	unsigned long num_threads_sharing;
643
	int index_msb, i, sibling;
644
	struct cpuinfo_x86 *c = &cpu_data(cpu);
L
Linus Torvalds 已提交
645

646
	if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
647
		for_each_cpu(i, c->llc_shared_map) {
648
			if (!per_cpu(ici_cpuid4_info, i))
649 650
				continue;
			this_leaf = CPUID4_INFO_IDX(i, index);
651 652 653 654 655
			for_each_cpu(sibling, c->llc_shared_map) {
				if (!cpu_online(sibling))
					continue;
				set_bit(sibling, this_leaf->shared_cpu_map);
			}
656 657 658
		}
		return;
	}
L
Linus Torvalds 已提交
659 660 661 662
	this_leaf = CPUID4_INFO_IDX(cpu, index);
	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;

	if (num_threads_sharing == 1)
663
		cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
664 665 666 667
	else {
		index_msb = get_count_order(num_threads_sharing);

		for_each_online_cpu(i) {
668 669
			if (cpu_data(i).apicid >> index_msb ==
			    c->apicid >> index_msb) {
670 671
				cpumask_set_cpu(i,
					to_cpumask(this_leaf->shared_cpu_map));
672
				if (i != cpu && per_cpu(ici_cpuid4_info, i))  {
673 674 675 676
					sibling_leaf =
						CPUID4_INFO_IDX(i, index);
					cpumask_set_cpu(cpu, to_cpumask(
						sibling_leaf->shared_cpu_map));
677 678 679 680 681
				}
			}
		}
	}
}
682
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
683 684 685 686 687
{
	struct _cpuid4_info	*this_leaf, *sibling_leaf;
	int sibling;

	this_leaf = CPUID4_INFO_IDX(cpu, index);
688
	for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
689
		sibling_leaf = CPUID4_INFO_IDX(sibling, index);
690 691
		cpumask_clear_cpu(cpu,
				  to_cpumask(sibling_leaf->shared_cpu_map));
692
	}
L
Linus Torvalds 已提交
693 694
}
#else
A
Alan Cox 已提交
695 696 697 698 699 700 701
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
}

static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
}
L
Linus Torvalds 已提交
702 703
#endif

704
static void __cpuinit free_cache_attributes(unsigned int cpu)
L
Linus Torvalds 已提交
705
{
706 707 708 709 710
	int i;

	for (i = 0; i < num_cache_leaves; i++)
		cache_remove_shared_cpu_map(cpu, i);

711 712
	kfree(per_cpu(ici_cpuid4_info, cpu));
	per_cpu(ici_cpuid4_info, cpu) = NULL;
L
Linus Torvalds 已提交
713 714
}

715 716 717 718 719 720 721 722 723
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
	struct _cpuid4_info_regs *leaf_regs =
		(struct _cpuid4_info_regs *)this_leaf;

	return cpuid4_cache_lookup_regs(index, leaf_regs);
}

724
static void __cpuinit get_cpu_leaves(void *_retval)
L
Linus Torvalds 已提交
725
{
726
	int j, *retval = _retval, cpu = smp_processor_id();
727

L
Linus Torvalds 已提交
728 729
	/* Do cpuid and store the results */
	for (j = 0; j < num_cache_leaves; j++) {
730
		struct _cpuid4_info *this_leaf;
L
Linus Torvalds 已提交
731
		this_leaf = CPUID4_INFO_IDX(cpu, j);
732 733
		*retval = cpuid4_cache_lookup(j, this_leaf);
		if (unlikely(*retval < 0)) {
734 735 736 737
			int i;

			for (i = 0; i < j; i++)
				cache_remove_shared_cpu_map(cpu, i);
738
			break;
739
		}
L
Linus Torvalds 已提交
740 741
		cache_shared_cpu_map_setup(cpu, j);
	}
742 743 744 745 746 747 748 749 750
}

static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
	int			retval;

	if (num_cache_leaves == 0)
		return -ENOENT;

751
	per_cpu(ici_cpuid4_info, cpu) = kzalloc(
752
	    sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
753
	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
754
		return -ENOMEM;
L
Linus Torvalds 已提交
755

756
	smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
757
	if (retval) {
758 759
		kfree(per_cpu(ici_cpuid4_info, cpu));
		per_cpu(ici_cpuid4_info, cpu) = NULL;
760 761
	}

762
	return retval;
L
Linus Torvalds 已提交
763 764 765 766 767 768 769 770
}

#include <linux/kobject.h>
#include <linux/sysfs.h>

extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */

/* pointer to kobject for cpuX/cache */
771
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
L
Linus Torvalds 已提交
772 773 774 775 776 777 778 779

struct _index_kobject {
	struct kobject kobj;
	unsigned int cpu;
	unsigned short index;
};

/* pointer to array of kobjects for cpuX/cache/indexY */
780 781
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
#define INDEX_KOBJECT_PTR(x, y)		(&((per_cpu(ici_index_kobject, x))[y]))
L
Linus Torvalds 已提交
782 783 784 785 786

#define show_one_plus(file_name, object, val)				\
static ssize_t show_##file_name						\
			(struct _cpuid4_info *this_leaf, char *buf)	\
{									\
A
Alan Cox 已提交
787
	return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
L
Linus Torvalds 已提交
788 789 790 791 792 793 794 795 796 797
}

show_one_plus(level, eax.split.level, 0);
show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);

static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
{
A
Alan Cox 已提交
798
	return sprintf(buf, "%luK\n", this_leaf->size / 1024);
L
Linus Torvalds 已提交
799 800
}

801 802
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
					int type, char *buf)
L
Linus Torvalds 已提交
803
{
804
	ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
805 806
	int n = 0;

807
	if (len > 1) {
808
		const struct cpumask *mask;
809

810
		mask = to_cpumask(this_leaf->shared_cpu_map);
A
Alan Cox 已提交
811
		n = type ?
812 813
			cpulist_scnprintf(buf, len-2, mask) :
			cpumask_scnprintf(buf, len-2, mask);
814 815
		buf[n++] = '\n';
		buf[n] = '\0';
816 817
	}
	return n;
L
Linus Torvalds 已提交
818 819
}

820 821 822 823 824 825 826 827 828 829
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
{
	return show_shared_cpu_map_func(leaf, 0, buf);
}

static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
{
	return show_shared_cpu_map_func(leaf, 1, buf);
}

830 831 832 833
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
{
	switch (this_leaf->eax.split.type) {
	case CACHE_TYPE_DATA:
L
Linus Torvalds 已提交
834
		return sprintf(buf, "Data\n");
835
	case CACHE_TYPE_INST:
L
Linus Torvalds 已提交
836
		return sprintf(buf, "Instruction\n");
837
	case CACHE_TYPE_UNIFIED:
L
Linus Torvalds 已提交
838
		return sprintf(buf, "Unified\n");
839
	default:
L
Linus Torvalds 已提交
840 841 842 843
		return sprintf(buf, "Unknown\n");
	}
}

844 845
#define to_object(k)	container_of(k, struct _index_kobject, kobj)
#define to_attr(a)	container_of(a, struct _cache_attr, attr)
846

L
Linus Torvalds 已提交
847 848 849 850 851 852 853 854 855 856 857 858
#define define_one_ro(_name) \
static struct _cache_attr _name = \
	__ATTR(_name, 0444, show_##_name, NULL)

define_one_ro(level);
define_one_ro(type);
define_one_ro(coherency_line_size);
define_one_ro(physical_line_partition);
define_one_ro(ways_of_associativity);
define_one_ro(number_of_sets);
define_one_ro(size);
define_one_ro(shared_cpu_map);
859
define_one_ro(shared_cpu_list);
L
Linus Torvalds 已提交
860

861 862 863 864 865 866 867 868 869 870
#define DEFAULT_SYSFS_CACHE_ATTRS	\
	&type.attr,			\
	&level.attr,			\
	&coherency_line_size.attr,	\
	&physical_line_partition.attr,	\
	&ways_of_associativity.attr,	\
	&number_of_sets.attr,		\
	&size.attr,			\
	&shared_cpu_map.attr,		\
	&shared_cpu_list.attr
871

A
Alan Cox 已提交
872
static struct attribute *default_attrs[] = {
873 874 875 876 877 878
	DEFAULT_SYSFS_CACHE_ATTRS,
	NULL
};

static struct attribute *default_l3_attrs[] = {
	DEFAULT_SYSFS_CACHE_ATTRS,
879
#ifdef CONFIG_CPU_SUP_AMD
880 881
	&cache_disable_0.attr,
	&cache_disable_1.attr,
882
#endif
L
Linus Torvalds 已提交
883 884 885
	NULL
};

A
Alan Cox 已提交
886
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
L
Linus Torvalds 已提交
887 888 889 890 891 892 893 894
{
	struct _cache_attr *fattr = to_attr(attr);
	struct _index_kobject *this_leaf = to_object(kobj);
	ssize_t ret;

	ret = fattr->show ?
		fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
			buf) :
895
		0;
L
Linus Torvalds 已提交
896 897 898
	return ret;
}

A
Alan Cox 已提交
899 900
static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
L
Linus Torvalds 已提交
901
{
902 903 904 905
	struct _cache_attr *fattr = to_attr(attr);
	struct _index_kobject *this_leaf = to_object(kobj);
	ssize_t ret;

906 907 908
	ret = fattr->store ?
		fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
			buf, count) :
909 910
		0;
	return ret;
L
Linus Torvalds 已提交
911 912
}

913
static const struct sysfs_ops sysfs_ops = {
L
Linus Torvalds 已提交
914 915 916 917 918 919 920 921 922 923 924 925 926
	.show   = show,
	.store  = store,
};

static struct kobj_type ktype_cache = {
	.sysfs_ops	= &sysfs_ops,
	.default_attrs	= default_attrs,
};

static struct kobj_type ktype_percpu_entry = {
	.sysfs_ops	= &sysfs_ops,
};

927
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
L
Linus Torvalds 已提交
928
{
929 930 931 932
	kfree(per_cpu(ici_cache_kobject, cpu));
	kfree(per_cpu(ici_index_kobject, cpu));
	per_cpu(ici_cache_kobject, cpu) = NULL;
	per_cpu(ici_index_kobject, cpu) = NULL;
L
Linus Torvalds 已提交
933 934 935
	free_cache_attributes(cpu);
}

936
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
L
Linus Torvalds 已提交
937
{
938
	int err;
L
Linus Torvalds 已提交
939 940 941 942

	if (num_cache_leaves == 0)
		return -ENOENT;

943 944 945
	err = detect_cache_attributes(cpu);
	if (err)
		return err;
L
Linus Torvalds 已提交
946 947

	/* Allocate all required memory */
948
	per_cpu(ici_cache_kobject, cpu) =
949
		kzalloc(sizeof(struct kobject), GFP_KERNEL);
950
	if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
L
Linus Torvalds 已提交
951 952
		goto err_out;

953
	per_cpu(ici_index_kobject, cpu) = kzalloc(
A
Alan Cox 已提交
954
	    sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
955
	if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
L
Linus Torvalds 已提交
956 957 958 959 960 961 962 963 964
		goto err_out;

	return 0;

err_out:
	cpuid4_cache_sysfs_exit(cpu);
	return -ENOMEM;
}

965
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
966

L
Linus Torvalds 已提交
967
/* Add/Remove cache interface for CPU device */
968
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
L
Linus Torvalds 已提交
969 970 971 972
{
	unsigned int cpu = sys_dev->id;
	unsigned long i, j;
	struct _index_kobject *this_object;
973
	struct _cpuid4_info   *this_leaf;
974
	int retval;
L
Linus Torvalds 已提交
975 976 977 978 979

	retval = cpuid4_cache_sysfs_init(cpu);
	if (unlikely(retval < 0))
		return retval;

980
	retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
981
				      &ktype_percpu_entry,
982
				      &sys_dev->kobj, "%s", "cache");
983 984 985 986
	if (retval < 0) {
		cpuid4_cache_sysfs_exit(cpu);
		return retval;
	}
L
Linus Torvalds 已提交
987 988

	for (i = 0; i < num_cache_leaves; i++) {
A
Alan Cox 已提交
989
		this_object = INDEX_KOBJECT_PTR(cpu, i);
L
Linus Torvalds 已提交
990 991
		this_object->cpu = cpu;
		this_object->index = i;
992 993 994 995 996 997 998 999

		this_leaf = CPUID4_INFO_IDX(cpu, i);

		if (this_leaf->can_disable)
			ktype_cache.default_attrs = default_l3_attrs;
		else
			ktype_cache.default_attrs = default_attrs;

1000
		retval = kobject_init_and_add(&(this_object->kobj),
1001
					      &ktype_cache,
1002
					      per_cpu(ici_cache_kobject, cpu),
1003
					      "index%1lu", i);
L
Linus Torvalds 已提交
1004
		if (unlikely(retval)) {
A
Alan Cox 已提交
1005 1006
			for (j = 0; j < i; j++)
				kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1007
			kobject_put(per_cpu(ici_cache_kobject, cpu));
L
Linus Torvalds 已提交
1008
			cpuid4_cache_sysfs_exit(cpu);
1009
			return retval;
L
Linus Torvalds 已提交
1010
		}
1011
		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
L
Linus Torvalds 已提交
1012
	}
1013
	cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1014

1015
	kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1016
	return 0;
L
Linus Torvalds 已提交
1017 1018
}

1019
static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
L
Linus Torvalds 已提交
1020 1021 1022 1023
{
	unsigned int cpu = sys_dev->id;
	unsigned long i;

1024
	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1025
		return;
1026
	if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1027
		return;
1028
	cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1029 1030

	for (i = 0; i < num_cache_leaves; i++)
A
Alan Cox 已提交
1031
		kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1032
	kobject_put(per_cpu(ici_cache_kobject, cpu));
L
Linus Torvalds 已提交
1033
	cpuid4_cache_sysfs_exit(cpu);
1034 1035
}

1036
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1037 1038 1039 1040 1041 1042 1043 1044
					unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct sys_device *sys_dev;

	sys_dev = get_cpu_sysdev(cpu);
	switch (action) {
	case CPU_ONLINE:
1045
	case CPU_ONLINE_FROZEN:
1046 1047 1048
		cache_add_dev(sys_dev);
		break;
	case CPU_DEAD:
1049
	case CPU_DEAD_FROZEN:
1050 1051 1052 1053
		cache_remove_dev(sys_dev);
		break;
	}
	return NOTIFY_OK;
L
Linus Torvalds 已提交
1054 1055
}

A
Alan Cox 已提交
1056
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1057
	.notifier_call = cacheinfo_cpu_callback,
L
Linus Torvalds 已提交
1058 1059
};

1060
static int __cpuinit cache_sysfs_init(void)
L
Linus Torvalds 已提交
1061
{
1062 1063
	int i;

L
Linus Torvalds 已提交
1064 1065 1066
	if (num_cache_leaves == 0)
		return 0;

1067
	for_each_online_cpu(i) {
1068 1069
		int err;
		struct sys_device *sys_dev = get_cpu_sysdev(i);
1070

1071 1072 1073
		err = cache_add_dev(sys_dev);
		if (err)
			return err;
1074
	}
1075
	register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1076
	return 0;
L
Linus Torvalds 已提交
1077 1078
}

1079
device_initcall(cache_sysfs_init);
L
Linus Torvalds 已提交
1080 1081

#endif