intel_cacheinfo.c 32.2 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *	Routines to indentify caches on Intel CPU.
L
Linus Torvalds 已提交
3
 *
4 5
 *	Changes:
 *	Venkatesh Pallipadi	: Adding cache identification through cpuid(4)
A
Alan Cox 已提交
6
 *	Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7
 *	Andi Kleen / Andreas Herrmann	: CPUID4 emulation on AMD.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14
 */

#include <linux/init.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
T
Tim Schmielau 已提交
15
#include <linux/sched.h>
16
#include <linux/pci.h>
L
Linus Torvalds 已提交
17 18

#include <asm/processor.h>
A
Alan Cox 已提交
19
#include <linux/smp.h>
20
#include <asm/amd_nb.h>
21
#include <asm/smp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28

#define LVL_1_INST	1
#define LVL_1_DATA	2
#define LVL_2		3
#define LVL_3		4
#define LVL_TRACE	5

A
Alan Cox 已提交
29
struct _cache_table {
L
Linus Torvalds 已提交
30 31 32 33 34
	unsigned char descriptor;
	char cache_type;
	short size;
};

D
Dave Jones 已提交
35 36
#define MB(x)	((x) * 1024)

A
Alan Cox 已提交
37 38 39
/* All the cache descriptor types we care about (no TLB or
   trace cache entries) */

40
static const struct _cache_table __cpuinitconst cache_table[] =
L
Linus Torvalds 已提交
41 42 43
{
	{ 0x06, LVL_1_INST, 8 },	/* 4-way set assoc, 32 byte line size */
	{ 0x08, LVL_1_INST, 16 },	/* 4-way set assoc, 32 byte line size */
44
	{ 0x09, LVL_1_INST, 32 },	/* 4-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
45 46
	{ 0x0a, LVL_1_DATA, 8 },	/* 2 way set assoc, 32 byte line size */
	{ 0x0c, LVL_1_DATA, 16 },	/* 4-way set assoc, 32 byte line size */
47
	{ 0x0d, LVL_1_DATA, 16 },	/* 4-way set assoc, 64 byte line size */
48
	{ 0x0e, LVL_1_DATA, 24 },	/* 6-way set assoc, 64 byte line size */
49
	{ 0x21, LVL_2,      256 },	/* 8-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
50
	{ 0x22, LVL_3,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
D
Dave Jones 已提交
51 52 53
	{ 0x23, LVL_3,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x25, LVL_3,      MB(2) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x29, LVL_3,      MB(4) },	/* 8-way set assoc, sectored cache, 64 byte line size */
L
Linus Torvalds 已提交
54 55 56
	{ 0x2c, LVL_1_DATA, 32 },	/* 8-way set assoc, 64 byte line size */
	{ 0x30, LVL_1_INST, 32 },	/* 8-way set assoc, 64 byte line size */
	{ 0x39, LVL_2,      128 },	/* 4-way set assoc, sectored cache, 64 byte line size */
57
	{ 0x3a, LVL_2,      192 },	/* 6-way set assoc, sectored cache, 64 byte line size */
L
Linus Torvalds 已提交
58 59
	{ 0x3b, LVL_2,      128 },	/* 2-way set assoc, sectored cache, 64 byte line size */
	{ 0x3c, LVL_2,      256 },	/* 4-way set assoc, sectored cache, 64 byte line size */
60 61
	{ 0x3d, LVL_2,      384 },	/* 6-way set assoc, sectored cache, 64 byte line size */
	{ 0x3e, LVL_2,      512 },	/* 4-way set assoc, sectored cache, 64 byte line size */
62
	{ 0x3f, LVL_2,      256 },	/* 2-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
63 64 65
	{ 0x41, LVL_2,      128 },	/* 4-way set assoc, 32 byte line size */
	{ 0x42, LVL_2,      256 },	/* 4-way set assoc, 32 byte line size */
	{ 0x43, LVL_2,      512 },	/* 4-way set assoc, 32 byte line size */
D
Dave Jones 已提交
66 67 68 69
	{ 0x44, LVL_2,      MB(1) },	/* 4-way set assoc, 32 byte line size */
	{ 0x45, LVL_2,      MB(2) },	/* 4-way set assoc, 32 byte line size */
	{ 0x46, LVL_3,      MB(4) },	/* 4-way set assoc, 64 byte line size */
	{ 0x47, LVL_3,      MB(8) },	/* 8-way set assoc, 64 byte line size */
70
	{ 0x48, LVL_2,      MB(3) },	/* 12-way set assoc, 64 byte line size */
D
Dave Jones 已提交
71 72 73 74 75 76
	{ 0x49, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4a, LVL_3,      MB(6) },	/* 12-way set assoc, 64 byte line size */
	{ 0x4b, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4c, LVL_3,      MB(12) },	/* 12-way set assoc, 64 byte line size */
	{ 0x4d, LVL_3,      MB(16) },	/* 16-way set assoc, 64 byte line size */
	{ 0x4e, LVL_2,      MB(6) },	/* 24-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
77 78 79 80 81 82 83
	{ 0x60, LVL_1_DATA, 16 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x66, LVL_1_DATA, 8 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x67, LVL_1_DATA, 16 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x68, LVL_1_DATA, 32 },	/* 4-way set assoc, sectored cache, 64 byte line size */
	{ 0x70, LVL_TRACE,  12 },	/* 8-way set assoc */
	{ 0x71, LVL_TRACE,  16 },	/* 8-way set assoc */
	{ 0x72, LVL_TRACE,  32 },	/* 8-way set assoc */
84
	{ 0x73, LVL_TRACE,  64 },	/* 8-way set assoc */
D
Dave Jones 已提交
85 86 87 88 89 90 91
	{ 0x78, LVL_2,      MB(1) },	/* 4-way set assoc, 64 byte line size */
	{ 0x79, LVL_2,      128 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7a, LVL_2,      256 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7b, LVL_2,      512 },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7c, LVL_2,      MB(1) },	/* 8-way set assoc, sectored cache, 64 byte line size */
	{ 0x7d, LVL_2,      MB(2) },	/* 8-way set assoc, 64 byte line size */
	{ 0x7f, LVL_2,      512 },	/* 2-way set assoc, 64 byte line size */
92
	{ 0x80, LVL_2,      512 },	/* 8-way set assoc, 64 byte line size */
D
Dave Jones 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
	{ 0x82, LVL_2,      256 },	/* 8-way set assoc, 32 byte line size */
	{ 0x83, LVL_2,      512 },	/* 8-way set assoc, 32 byte line size */
	{ 0x84, LVL_2,      MB(1) },	/* 8-way set assoc, 32 byte line size */
	{ 0x85, LVL_2,      MB(2) },	/* 8-way set assoc, 32 byte line size */
	{ 0x86, LVL_2,      512 },	/* 4-way set assoc, 64 byte line size */
	{ 0x87, LVL_2,      MB(1) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd0, LVL_3,      512 },	/* 4-way set assoc, 64 byte line size */
	{ 0xd1, LVL_3,      MB(1) },	/* 4-way set assoc, 64 byte line size */
	{ 0xd2, LVL_3,      MB(2) },	/* 4-way set assoc, 64 byte line size */
	{ 0xd6, LVL_3,      MB(1) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd7, LVL_3,      MB(2) },	/* 8-way set assoc, 64 byte line size */
	{ 0xd8, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
	{ 0xdc, LVL_3,      MB(2) },	/* 12-way set assoc, 64 byte line size */
	{ 0xdd, LVL_3,      MB(4) },	/* 12-way set assoc, 64 byte line size */
	{ 0xde, LVL_3,      MB(8) },	/* 12-way set assoc, 64 byte line size */
	{ 0xe2, LVL_3,      MB(2) },	/* 16-way set assoc, 64 byte line size */
	{ 0xe3, LVL_3,      MB(4) },	/* 16-way set assoc, 64 byte line size */
	{ 0xe4, LVL_3,      MB(8) },	/* 16-way set assoc, 64 byte line size */
	{ 0xea, LVL_3,      MB(12) },	/* 24-way set assoc, 64 byte line size */
	{ 0xeb, LVL_3,      MB(18) },	/* 24-way set assoc, 64 byte line size */
	{ 0xec, LVL_3,      MB(24) },	/* 24-way set assoc, 64 byte line size */
L
Linus Torvalds 已提交
114 115 116 117
	{ 0x00, 0, 0}
};


A
Alan Cox 已提交
118
enum _cache_type {
L
Linus Torvalds 已提交
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
	CACHE_TYPE_NULL	= 0,
	CACHE_TYPE_DATA = 1,
	CACHE_TYPE_INST = 2,
	CACHE_TYPE_UNIFIED = 3
};

union _cpuid4_leaf_eax {
	struct {
		enum _cache_type	type:5;
		unsigned int		level:3;
		unsigned int		is_self_initializing:1;
		unsigned int		is_fully_associative:1;
		unsigned int		reserved:4;
		unsigned int		num_threads_sharing:12;
		unsigned int		num_cores_on_die:6;
	} split;
	u32 full;
};

union _cpuid4_leaf_ebx {
	struct {
		unsigned int		coherency_line_size:12;
		unsigned int		physical_line_partition:10;
		unsigned int		ways_of_associativity:10;
	} split;
	u32 full;
};

union _cpuid4_leaf_ecx {
	struct {
		unsigned int		number_of_sets:32;
	} split;
	u32 full;
};

154
struct amd_l3_cache {
155
	struct	 amd_northbridge *nb;
156 157 158 159
	unsigned indices;
	u8	 subcaches[4];
};

L
Linus Torvalds 已提交
160 161 162 163 164
struct _cpuid4_info {
	union _cpuid4_leaf_eax eax;
	union _cpuid4_leaf_ebx ebx;
	union _cpuid4_leaf_ecx ecx;
	unsigned long size;
165
	struct amd_l3_cache *l3;
166 167 168 169 170 171 172 173 174
	DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
};

/* subset of above _cpuid4_info w/o shared_cpu_map */
struct _cpuid4_info_regs {
	union _cpuid4_leaf_eax eax;
	union _cpuid4_leaf_ebx ebx;
	union _cpuid4_leaf_ecx ecx;
	unsigned long size;
175
	struct amd_l3_cache *l3;
L
Linus Torvalds 已提交
176 177
};

178 179 180 181
unsigned short			num_cache_leaves;

/* AMD doesn't have CPUID4. Emulate it here to report the same
   information to the user.  This makes some assumptions about the machine:
182
   L2 not shared, no SMT etc. that is currently true on AMD CPUs.
183 184 185 186 187

   In theory the TLBs could be reported as fake type (they are in "dummy").
   Maybe later */
union l1_cache {
	struct {
A
Alan Cox 已提交
188 189 190 191
		unsigned line_size:8;
		unsigned lines_per_tag:8;
		unsigned assoc:8;
		unsigned size_in_kb:8;
192 193 194 195 196 197
	};
	unsigned val;
};

union l2_cache {
	struct {
A
Alan Cox 已提交
198 199 200 201
		unsigned line_size:8;
		unsigned lines_per_tag:4;
		unsigned assoc:4;
		unsigned size_in_kb:16;
202 203 204 205
	};
	unsigned val;
};

206 207
union l3_cache {
	struct {
A
Alan Cox 已提交
208 209 210 211 212
		unsigned line_size:8;
		unsigned lines_per_tag:4;
		unsigned assoc:4;
		unsigned res:2;
		unsigned size_encoded:14;
213 214 215 216
	};
	unsigned val;
};

217
static const unsigned short __cpuinitconst assocs[] = {
218 219 220 221 222 223 224
	[1] = 1,
	[2] = 2,
	[4] = 4,
	[6] = 8,
	[8] = 16,
	[0xa] = 32,
	[0xb] = 48,
225
	[0xc] = 64,
226 227 228
	[0xd] = 96,
	[0xe] = 128,
	[0xf] = 0xffff /* fully associative - no way to show this currently */
229 230
};

231 232
static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
233

234 235 236 237
static void __cpuinit
amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
		     union _cpuid4_leaf_ebx *ebx,
		     union _cpuid4_leaf_ecx *ecx)
238 239 240 241 242
{
	unsigned dummy;
	unsigned line_size, lines_per_tag, assoc, size_in_kb;
	union l1_cache l1i, l1d;
	union l2_cache l2;
243 244
	union l3_cache l3;
	union l1_cache *l1 = &l1d;
245 246 247 248 249 250

	eax->full = 0;
	ebx->full = 0;
	ecx->full = 0;

	cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
251
	cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
252

253 254 255 256 257 258
	switch (leaf) {
	case 1:
		l1 = &l1i;
	case 0:
		if (!l1->val)
			return;
259
		assoc = assocs[l1->assoc];
260 261 262
		line_size = l1->line_size;
		lines_per_tag = l1->lines_per_tag;
		size_in_kb = l1->size_in_kb;
263 264 265 266
		break;
	case 2:
		if (!l2.val)
			return;
267
		assoc = assocs[l2.assoc];
268 269 270
		line_size = l2.line_size;
		lines_per_tag = l2.lines_per_tag;
		/* cpu_data has errata corrections for K7 applied */
271
		size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
272 273 274 275
		break;
	case 3:
		if (!l3.val)
			return;
276
		assoc = assocs[l3.assoc];
277 278 279
		line_size = l3.line_size;
		lines_per_tag = l3.lines_per_tag;
		size_in_kb = l3.size_encoded * 512;
280 281 282 283
		if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
			size_in_kb = size_in_kb >> 1;
			assoc = assoc >> 1;
		}
284 285 286
		break;
	default:
		return;
287 288
	}

289 290 291
	eax->split.is_self_initializing = 1;
	eax->split.type = types[leaf];
	eax->split.level = levels[leaf];
292
	eax->split.num_threads_sharing = 0;
293
	eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
294 295


296
	if (assoc == 0xffff)
297 298
		eax->split.is_fully_associative = 1;
	ebx->split.coherency_line_size = line_size - 1;
299
	ebx->split.ways_of_associativity = assoc - 1;
300 301 302 303
	ebx->split.physical_line_partition = lines_per_tag - 1;
	ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
		(ebx->split.ways_of_associativity + 1) - 1;
}
L
Linus Torvalds 已提交
304

305 306
struct _cache_attr {
	struct attribute attr;
307 308 309
	ssize_t (*show)(struct _cpuid4_info *, char *, unsigned int);
	ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count,
			 unsigned int);
310 311
};

312
#ifdef CONFIG_AMD_NB
313 314 315 316

/*
 * L3 cache descriptors
 */
317
static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
318 319
{
	unsigned int sc0, sc1, sc2, sc3;
320
	u32 val = 0;
321

322
	pci_read_config_dword(l3->nb->misc, 0x1C4, &val);
323 324

	/* calculate subcache sizes */
325 326 327 328 329
	l3->subcaches[0] = sc0 = !(val & BIT(0));
	l3->subcaches[1] = sc1 = !(val & BIT(4));
	l3->subcaches[2] = sc2 = !(val & BIT(8))  + !(val & BIT(9));
	l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));

330
	l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
331 332
}

333 334
static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf,
					int index)
335
{
336
	static struct amd_l3_cache *__cpuinitdata l3_caches;
337 338
	int node;

339 340
	/* only for L3, and not in virtualized environments */
	if (index < 3 || amd_nb_num() == 0)
341 342
		return;

343 344 345 346 347
	/*
	 * Strictly speaking, the amount in @size below is leaked since it is
	 * never freed but this is done only on shutdown so it doesn't matter.
	 */
	if (!l3_caches) {
348
		int size = amd_nb_num() * sizeof(struct amd_l3_cache);
349 350 351 352

		l3_caches = kzalloc(size, GFP_ATOMIC);
		if (!l3_caches)
			return;
353 354
	}

355 356
	node = amd_get_nb_id(smp_processor_id());

357 358 359
	if (!l3_caches[node].nb) {
		l3_caches[node].nb = node_to_amd_nb(node);
		amd_calc_l3_indices(&l3_caches[node]);
360 361
	}

362
	this_leaf->l3 = &l3_caches[node];
363 364
}

365 366 367 368 369 370 371 372 373 374 375
/*
 * check whether a slot used for disabling an L3 index is occupied.
 * @l3: L3 cache descriptor
 * @slot: slot number (0..1)
 *
 * @returns: the disabled index if used or negative value if slot free.
 */
int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot)
{
	unsigned int reg = 0;

376
	pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, &reg);
377 378 379 380 381 382 383 384

	/* check whether this slot is activated already */
	if (reg & (3UL << 30))
		return reg & 0xfff;

	return -1;
}

385
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
386
				  unsigned int slot)
387
{
388
	int index;
389

390 391
	if (!this_leaf->l3 ||
	    !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
392 393
		return -EINVAL;

394 395 396
	index = amd_get_l3_disable_slot(this_leaf->l3, slot);
	if (index >= 0)
		return sprintf(buf, "%d\n", index);
397

398
	return sprintf(buf, "FREE\n");
399 400
}

401
#define SHOW_CACHE_DISABLE(slot)					\
402
static ssize_t								\
403 404
show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf,	\
			  unsigned int cpu)				\
405
{									\
406
	return show_cache_disable(this_leaf, buf, slot);		\
407 408 409 410
}
SHOW_CACHE_DISABLE(0)
SHOW_CACHE_DISABLE(1)

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
				 unsigned slot, unsigned long idx)
{
	int i;

	idx |= BIT(30);

	/*
	 *  disable index in all 4 subcaches
	 */
	for (i = 0; i < 4; i++) {
		u32 reg = idx | (i << 20);

		if (!l3->subcaches[i])
			continue;

427
		pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
428 429 430 431 432 433 434 435 436

		/*
		 * We need to WBINVD on a core on the node containing the L3
		 * cache which indices we disable therefore a simple wbinvd()
		 * is not sufficient.
		 */
		wbinvd_on_cpu(cpu);

		reg |= BIT(31);
437
		pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg);
438 439 440
	}
}

441 442 443 444 445 446 447 448 449 450 451 452
/*
 * disable a L3 cache index by using a disable-slot
 *
 * @l3:    L3 cache descriptor
 * @cpu:   A CPU on the node containing the L3 cache
 * @slot:  slot number (0..1)
 * @index: index to disable
 *
 * @return: 0 on success, error status on failure
 */
int amd_set_l3_disable_slot(struct amd_l3_cache *l3, int cpu, unsigned slot,
			    unsigned long index)
453
{
454
	int ret = 0;
455

456
	/*  check if @slot is already used or the index is already disabled */
457 458
	ret = amd_get_l3_disable_slot(l3, slot);
	if (ret >= 0)
459 460
		return -EINVAL;

461
	if (index > l3->indices)
462 463
		return -EINVAL;

464 465
	/* check whether the other slot has disabled the same index already */
	if (index == amd_get_l3_disable_slot(l3, !slot))
466 467 468 469 470 471 472 473 474 475 476 477 478 479
		return -EINVAL;

	amd_l3_disable_index(l3, cpu, slot, index);

	return 0;
}

static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
				  const char *buf, size_t count,
				  unsigned int slot)
{
	unsigned long val = 0;
	int cpu, err = 0;

480 481 482
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

483 484
	if (!this_leaf->l3 ||
	    !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
485 486
		return -EINVAL;

487
	cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
488

489
	if (strict_strtoul(buf, 10, &val) < 0)
490 491
		return -EINVAL;

492 493 494 495 496 497 498
	err = amd_set_l3_disable_slot(this_leaf->l3, cpu, slot, val);
	if (err) {
		if (err == -EEXIST)
			printk(KERN_WARNING "L3 disable slot %d in use!\n",
					    slot);
		return err;
	}
499 500 501
	return count;
}

502
#define STORE_CACHE_DISABLE(slot)					\
503
static ssize_t								\
504
store_cache_disable_##slot(struct _cpuid4_info *this_leaf,		\
505 506
			   const char *buf, size_t count,		\
			   unsigned int cpu)				\
507
{									\
508
	return store_cache_disable(this_leaf, buf, count, slot);	\
509
}
510 511 512 513 514 515 516 517
STORE_CACHE_DISABLE(0)
STORE_CACHE_DISABLE(1)

static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
		show_cache_disable_0, store_cache_disable_0);
static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
		show_cache_disable_1, store_cache_disable_1);

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
static ssize_t
show_subcaches(struct _cpuid4_info *this_leaf, char *buf, unsigned int cpu)
{
	if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		return -EINVAL;

	return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
}

static ssize_t
store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
		unsigned int cpu)
{
	unsigned long val;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!this_leaf->l3 || !amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		return -EINVAL;

	if (strict_strtoul(buf, 16, &val) < 0)
		return -EINVAL;

	if (amd_set_subcaches(cpu, val))
		return -EINVAL;

	return count;
}

static struct _cache_attr subcaches =
	__ATTR(subcaches, 0644, show_subcaches, store_subcaches);

551
#else	/* CONFIG_AMD_NB */
552
#define amd_init_l3_cache(x, y)
553
#endif /* CONFIG_AMD_NB */
554

555
static int
556 557
__cpuinit cpuid4_cache_lookup_regs(int index,
				   struct _cpuid4_info_regs *this_leaf)
L
Linus Torvalds 已提交
558
{
559 560 561
	union _cpuid4_leaf_eax	eax;
	union _cpuid4_leaf_ebx	ebx;
	union _cpuid4_leaf_ecx	ecx;
562
	unsigned		edx;
L
Linus Torvalds 已提交
563

564
	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
565
		amd_cpuid4(index, &eax, &ebx, &ecx);
566
		amd_init_l3_cache(this_leaf, index);
567 568 569 570
	} else {
		cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
	}

571
	if (eax.split.type == CACHE_TYPE_NULL)
572
		return -EIO; /* better error ? */
L
Linus Torvalds 已提交
573

574 575 576
	this_leaf->eax = eax;
	this_leaf->ebx = ebx;
	this_leaf->ecx = ecx;
577 578 579 580
	this_leaf->size = (ecx.split.number_of_sets          + 1) *
			  (ebx.split.coherency_line_size     + 1) *
			  (ebx.split.physical_line_partition + 1) *
			  (ebx.split.ways_of_associativity   + 1);
L
Linus Torvalds 已提交
581 582 583
	return 0;
}

584
static int __cpuinit find_num_cache_leaves(void)
L
Linus Torvalds 已提交
585 586 587
{
	unsigned int		eax, ebx, ecx, edx;
	union _cpuid4_leaf_eax	cache_eax;
588
	int 			i = -1;
L
Linus Torvalds 已提交
589

590 591 592
	do {
		++i;
		/* Do cpuid(4) loop to find out num_cache_leaves */
L
Linus Torvalds 已提交
593 594
		cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
		cache_eax.full = eax;
595 596
	} while (cache_eax.split.type != CACHE_TYPE_NULL);
	return i;
L
Linus Torvalds 已提交
597 598
}

599
unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
L
Linus Torvalds 已提交
600
{
A
Alan Cox 已提交
601 602
	/* Cache sizes */
	unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
L
Linus Torvalds 已提交
603 604
	unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
	unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
605
	unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
606
#ifdef CONFIG_X86_HT
607
	unsigned int cpu = c->cpu_index;
608
#endif
L
Linus Torvalds 已提交
609

610
	if (c->cpuid_level > 3) {
L
Linus Torvalds 已提交
611 612 613 614 615 616 617 618 619 620 621 622 623
		static int is_initialized;

		if (is_initialized == 0) {
			/* Init num_cache_leaves from boot CPU */
			num_cache_leaves = find_num_cache_leaves();
			is_initialized++;
		}

		/*
		 * Whenever possible use cpuid(4), deterministic cache
		 * parameters cpuid leaf to find the cache details
		 */
		for (i = 0; i < num_cache_leaves; i++) {
624
			struct _cpuid4_info_regs this_leaf;
L
Linus Torvalds 已提交
625 626
			int retval;

627
			retval = cpuid4_cache_lookup_regs(i, &this_leaf);
L
Linus Torvalds 已提交
628
			if (retval >= 0) {
A
Alan Cox 已提交
629 630
				switch (this_leaf.eax.split.level) {
				case 1:
L
Linus Torvalds 已提交
631 632 633 634 635 636 637
					if (this_leaf.eax.split.type ==
							CACHE_TYPE_DATA)
						new_l1d = this_leaf.size/1024;
					else if (this_leaf.eax.split.type ==
							CACHE_TYPE_INST)
						new_l1i = this_leaf.size/1024;
					break;
A
Alan Cox 已提交
638
				case 2:
L
Linus Torvalds 已提交
639
					new_l2 = this_leaf.size/1024;
640 641 642
					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
					index_msb = get_count_order(num_threads_sharing);
					l2_id = c->apicid >> index_msb;
L
Linus Torvalds 已提交
643
					break;
A
Alan Cox 已提交
644
				case 3:
L
Linus Torvalds 已提交
645
					new_l3 = this_leaf.size/1024;
646
					num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
A
Alan Cox 已提交
647 648
					index_msb = get_count_order(
							num_threads_sharing);
649
					l3_id = c->apicid >> index_msb;
L
Linus Torvalds 已提交
650
					break;
A
Alan Cox 已提交
651
				default:
L
Linus Torvalds 已提交
652 653 654 655 656
					break;
				}
			}
		}
	}
657 658 659 660 661
	/*
	 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
	 * trace cache
	 */
	if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
L
Linus Torvalds 已提交
662
		/* supports eax=2  call */
663 664
		int j, n;
		unsigned int regs[4];
L
Linus Torvalds 已提交
665
		unsigned char *dp = (unsigned char *)regs;
666 667 668 669
		int only_trace = 0;

		if (num_cache_leaves != 0 && c->x86 == 15)
			only_trace = 1;
L
Linus Torvalds 已提交
670 671 672 673

		/* Number of times to iterate */
		n = cpuid_eax(2) & 0xFF;

A
Alan Cox 已提交
674
		for (i = 0 ; i < n ; i++) {
L
Linus Torvalds 已提交
675 676 677
			cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);

			/* If bit 31 is set, this is an unknown format */
A
Alan Cox 已提交
678 679 680
			for (j = 0 ; j < 3 ; j++)
				if (regs[j] & (1 << 31))
					regs[j] = 0;
L
Linus Torvalds 已提交
681 682

			/* Byte 0 is level count, not a descriptor */
A
Alan Cox 已提交
683
			for (j = 1 ; j < 16 ; j++) {
L
Linus Torvalds 已提交
684 685 686 687
				unsigned char des = dp[j];
				unsigned char k = 0;

				/* look up this descriptor in the table */
A
Alan Cox 已提交
688
				while (cache_table[k].descriptor != 0) {
L
Linus Torvalds 已提交
689
					if (cache_table[k].descriptor == des) {
690 691
						if (only_trace && cache_table[k].cache_type != LVL_TRACE)
							break;
L
Linus Torvalds 已提交
692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
						switch (cache_table[k].cache_type) {
						case LVL_1_INST:
							l1i += cache_table[k].size;
							break;
						case LVL_1_DATA:
							l1d += cache_table[k].size;
							break;
						case LVL_2:
							l2 += cache_table[k].size;
							break;
						case LVL_3:
							l3 += cache_table[k].size;
							break;
						case LVL_TRACE:
							trace += cache_table[k].size;
							break;
						}

						break;
					}

					k++;
				}
			}
		}
717
	}
L
Linus Torvalds 已提交
718

719 720
	if (new_l1d)
		l1d = new_l1d;
L
Linus Torvalds 已提交
721

722 723
	if (new_l1i)
		l1i = new_l1i;
L
Linus Torvalds 已提交
724

725 726
	if (new_l2) {
		l2 = new_l2;
727
#ifdef CONFIG_X86_HT
728
		per_cpu(cpu_llc_id, cpu) = l2_id;
729
#endif
730
	}
L
Linus Torvalds 已提交
731

732 733
	if (new_l3) {
		l3 = new_l3;
734
#ifdef CONFIG_X86_HT
735
		per_cpu(cpu_llc_id, cpu) = l3_id;
736
#endif
L
Linus Torvalds 已提交
737 738
	}

739 740
	c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));

L
Linus Torvalds 已提交
741 742 743
	return l2;
}

744 745
#ifdef CONFIG_SYSFS

L
Linus Torvalds 已提交
746
/* pointer to _cpuid4_info array (for each cache leaf) */
747 748
static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
#define CPUID4_INFO_IDX(x, y)	(&((per_cpu(ici_cpuid4_info, x))[y]))
L
Linus Torvalds 已提交
749 750

#ifdef CONFIG_SMP
751
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
L
Linus Torvalds 已提交
752
{
753
	struct _cpuid4_info	*this_leaf, *sibling_leaf;
L
Linus Torvalds 已提交
754
	unsigned long num_threads_sharing;
755
	int index_msb, i, sibling;
756
	struct cpuinfo_x86 *c = &cpu_data(cpu);
L
Linus Torvalds 已提交
757

758
	if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
759
		for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
760
			if (!per_cpu(ici_cpuid4_info, i))
761 762
				continue;
			this_leaf = CPUID4_INFO_IDX(i, index);
763
			for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
764 765 766 767
				if (!cpu_online(sibling))
					continue;
				set_bit(sibling, this_leaf->shared_cpu_map);
			}
768 769 770
		}
		return;
	}
L
Linus Torvalds 已提交
771 772 773 774
	this_leaf = CPUID4_INFO_IDX(cpu, index);
	num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;

	if (num_threads_sharing == 1)
775
		cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
776 777 778 779
	else {
		index_msb = get_count_order(num_threads_sharing);

		for_each_online_cpu(i) {
780 781
			if (cpu_data(i).apicid >> index_msb ==
			    c->apicid >> index_msb) {
782 783
				cpumask_set_cpu(i,
					to_cpumask(this_leaf->shared_cpu_map));
784
				if (i != cpu && per_cpu(ici_cpuid4_info, i))  {
785 786 787 788
					sibling_leaf =
						CPUID4_INFO_IDX(i, index);
					cpumask_set_cpu(cpu, to_cpumask(
						sibling_leaf->shared_cpu_map));
789 790 791 792 793
				}
			}
		}
	}
}
794
static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
795 796 797 798 799
{
	struct _cpuid4_info	*this_leaf, *sibling_leaf;
	int sibling;

	this_leaf = CPUID4_INFO_IDX(cpu, index);
800
	for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
801
		sibling_leaf = CPUID4_INFO_IDX(sibling, index);
802 803
		cpumask_clear_cpu(cpu,
				  to_cpumask(sibling_leaf->shared_cpu_map));
804
	}
L
Linus Torvalds 已提交
805 806
}
#else
A
Alan Cox 已提交
807 808 809 810 811 812 813
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
}

static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
}
L
Linus Torvalds 已提交
814 815
#endif

816
static void __cpuinit free_cache_attributes(unsigned int cpu)
L
Linus Torvalds 已提交
817
{
818 819 820 821 822
	int i;

	for (i = 0; i < num_cache_leaves; i++)
		cache_remove_shared_cpu_map(cpu, i);

823
	kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
824 825
	kfree(per_cpu(ici_cpuid4_info, cpu));
	per_cpu(ici_cpuid4_info, cpu) = NULL;
L
Linus Torvalds 已提交
826 827
}

828 829 830 831 832 833 834 835 836
static int
__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
{
	struct _cpuid4_info_regs *leaf_regs =
		(struct _cpuid4_info_regs *)this_leaf;

	return cpuid4_cache_lookup_regs(index, leaf_regs);
}

837
static void __cpuinit get_cpu_leaves(void *_retval)
L
Linus Torvalds 已提交
838
{
839
	int j, *retval = _retval, cpu = smp_processor_id();
840

L
Linus Torvalds 已提交
841 842
	/* Do cpuid and store the results */
	for (j = 0; j < num_cache_leaves; j++) {
843
		struct _cpuid4_info *this_leaf;
L
Linus Torvalds 已提交
844
		this_leaf = CPUID4_INFO_IDX(cpu, j);
845 846
		*retval = cpuid4_cache_lookup(j, this_leaf);
		if (unlikely(*retval < 0)) {
847 848 849 850
			int i;

			for (i = 0; i < j; i++)
				cache_remove_shared_cpu_map(cpu, i);
851
			break;
852
		}
L
Linus Torvalds 已提交
853 854
		cache_shared_cpu_map_setup(cpu, j);
	}
855 856 857 858 859 860 861 862 863
}

static int __cpuinit detect_cache_attributes(unsigned int cpu)
{
	int			retval;

	if (num_cache_leaves == 0)
		return -ENOENT;

864
	per_cpu(ici_cpuid4_info, cpu) = kzalloc(
865
	    sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
866
	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
867
		return -ENOMEM;
L
Linus Torvalds 已提交
868

869
	smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
870
	if (retval) {
871 872
		kfree(per_cpu(ici_cpuid4_info, cpu));
		per_cpu(ici_cpuid4_info, cpu) = NULL;
873 874
	}

875
	return retval;
L
Linus Torvalds 已提交
876 877 878 879 880 881 882 883
}

#include <linux/kobject.h>
#include <linux/sysfs.h>

extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */

/* pointer to kobject for cpuX/cache */
884
static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
L
Linus Torvalds 已提交
885 886 887 888 889 890 891 892

struct _index_kobject {
	struct kobject kobj;
	unsigned int cpu;
	unsigned short index;
};

/* pointer to array of kobjects for cpuX/cache/indexY */
893 894
static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
#define INDEX_KOBJECT_PTR(x, y)		(&((per_cpu(ici_index_kobject, x))[y]))
L
Linus Torvalds 已提交
895 896

#define show_one_plus(file_name, object, val)				\
897 898
static ssize_t show_##file_name(struct _cpuid4_info *this_leaf, char *buf, \
				unsigned int cpu)			\
L
Linus Torvalds 已提交
899
{									\
A
Alan Cox 已提交
900
	return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
L
Linus Torvalds 已提交
901 902 903 904 905 906 907 908
}

show_one_plus(level, eax.split.level, 0);
show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);

909 910
static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf,
			 unsigned int cpu)
L
Linus Torvalds 已提交
911
{
A
Alan Cox 已提交
912
	return sprintf(buf, "%luK\n", this_leaf->size / 1024);
L
Linus Torvalds 已提交
913 914
}

915 916
static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
					int type, char *buf)
L
Linus Torvalds 已提交
917
{
918
	ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
919 920
	int n = 0;

921
	if (len > 1) {
922
		const struct cpumask *mask;
923

924
		mask = to_cpumask(this_leaf->shared_cpu_map);
A
Alan Cox 已提交
925
		n = type ?
926 927
			cpulist_scnprintf(buf, len-2, mask) :
			cpumask_scnprintf(buf, len-2, mask);
928 929
		buf[n++] = '\n';
		buf[n] = '\0';
930 931
	}
	return n;
L
Linus Torvalds 已提交
932 933
}

934 935
static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf,
					  unsigned int cpu)
936 937 938 939
{
	return show_shared_cpu_map_func(leaf, 0, buf);
}

940 941
static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf,
					   unsigned int cpu)
942 943 944 945
{
	return show_shared_cpu_map_func(leaf, 1, buf);
}

946 947
static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf,
			 unsigned int cpu)
948 949 950
{
	switch (this_leaf->eax.split.type) {
	case CACHE_TYPE_DATA:
L
Linus Torvalds 已提交
951
		return sprintf(buf, "Data\n");
952
	case CACHE_TYPE_INST:
L
Linus Torvalds 已提交
953
		return sprintf(buf, "Instruction\n");
954
	case CACHE_TYPE_UNIFIED:
L
Linus Torvalds 已提交
955
		return sprintf(buf, "Unified\n");
956
	default:
L
Linus Torvalds 已提交
957 958 959 960
		return sprintf(buf, "Unknown\n");
	}
}

961 962
#define to_object(k)	container_of(k, struct _index_kobject, kobj)
#define to_attr(a)	container_of(a, struct _cache_attr, attr)
963

L
Linus Torvalds 已提交
964 965 966 967 968 969 970 971 972 973 974 975
#define define_one_ro(_name) \
static struct _cache_attr _name = \
	__ATTR(_name, 0444, show_##_name, NULL)

define_one_ro(level);
define_one_ro(type);
define_one_ro(coherency_line_size);
define_one_ro(physical_line_partition);
define_one_ro(ways_of_associativity);
define_one_ro(number_of_sets);
define_one_ro(size);
define_one_ro(shared_cpu_map);
976
define_one_ro(shared_cpu_list);
L
Linus Torvalds 已提交
977

A
Alan Cox 已提交
978
static struct attribute *default_attrs[] = {
979 980 981 982 983 984 985 986 987
	&type.attr,
	&level.attr,
	&coherency_line_size.attr,
	&physical_line_partition.attr,
	&ways_of_associativity.attr,
	&number_of_sets.attr,
	&size.attr,
	&shared_cpu_map.attr,
	&shared_cpu_list.attr,
988 989 990
	NULL
};

991
#ifdef CONFIG_AMD_NB
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
static struct attribute ** __cpuinit amd_l3_attrs(void)
{
	static struct attribute **attrs;
	int n;

	if (attrs)
		return attrs;

	n = sizeof (default_attrs) / sizeof (struct attribute *);

	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
		n += 2;

1005 1006 1007
	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		n += 1;

1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
	if (attrs == NULL)
		return attrs = default_attrs;

	for (n = 0; default_attrs[n]; n++)
		attrs[n] = default_attrs[n];

	if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
		attrs[n++] = &cache_disable_0.attr;
		attrs[n++] = &cache_disable_1.attr;
	}

1020 1021 1022
	if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
		attrs[n++] = &subcaches.attr;

1023 1024
	return attrs;
}
1025
#endif
L
Linus Torvalds 已提交
1026

A
Alan Cox 已提交
1027
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
L
Linus Torvalds 已提交
1028 1029 1030 1031 1032 1033 1034
{
	struct _cache_attr *fattr = to_attr(attr);
	struct _index_kobject *this_leaf = to_object(kobj);
	ssize_t ret;

	ret = fattr->show ?
		fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1035
			buf, this_leaf->cpu) :
1036
		0;
L
Linus Torvalds 已提交
1037 1038 1039
	return ret;
}

A
Alan Cox 已提交
1040 1041
static ssize_t store(struct kobject *kobj, struct attribute *attr,
		     const char *buf, size_t count)
L
Linus Torvalds 已提交
1042
{
1043 1044 1045 1046
	struct _cache_attr *fattr = to_attr(attr);
	struct _index_kobject *this_leaf = to_object(kobj);
	ssize_t ret;

1047 1048
	ret = fattr->store ?
		fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
1049
			buf, count, this_leaf->cpu) :
1050 1051
		0;
	return ret;
L
Linus Torvalds 已提交
1052 1053
}

1054
static const struct sysfs_ops sysfs_ops = {
L
Linus Torvalds 已提交
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067
	.show   = show,
	.store  = store,
};

static struct kobj_type ktype_cache = {
	.sysfs_ops	= &sysfs_ops,
	.default_attrs	= default_attrs,
};

static struct kobj_type ktype_percpu_entry = {
	.sysfs_ops	= &sysfs_ops,
};

1068
static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
L
Linus Torvalds 已提交
1069
{
1070 1071 1072 1073
	kfree(per_cpu(ici_cache_kobject, cpu));
	kfree(per_cpu(ici_index_kobject, cpu));
	per_cpu(ici_cache_kobject, cpu) = NULL;
	per_cpu(ici_index_kobject, cpu) = NULL;
L
Linus Torvalds 已提交
1074 1075 1076
	free_cache_attributes(cpu);
}

1077
static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
L
Linus Torvalds 已提交
1078
{
1079
	int err;
L
Linus Torvalds 已提交
1080 1081 1082 1083

	if (num_cache_leaves == 0)
		return -ENOENT;

1084 1085 1086
	err = detect_cache_attributes(cpu);
	if (err)
		return err;
L
Linus Torvalds 已提交
1087 1088

	/* Allocate all required memory */
1089
	per_cpu(ici_cache_kobject, cpu) =
1090
		kzalloc(sizeof(struct kobject), GFP_KERNEL);
1091
	if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
L
Linus Torvalds 已提交
1092 1093
		goto err_out;

1094
	per_cpu(ici_index_kobject, cpu) = kzalloc(
A
Alan Cox 已提交
1095
	    sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
1096
	if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
L
Linus Torvalds 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105
		goto err_out;

	return 0;

err_out:
	cpuid4_cache_sysfs_exit(cpu);
	return -ENOMEM;
}

1106
static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1107

L
Linus Torvalds 已提交
1108
/* Add/Remove cache interface for CPU device */
1109
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
L
Linus Torvalds 已提交
1110 1111 1112 1113
{
	unsigned int cpu = sys_dev->id;
	unsigned long i, j;
	struct _index_kobject *this_object;
1114
	struct _cpuid4_info   *this_leaf;
1115
	int retval;
L
Linus Torvalds 已提交
1116 1117 1118 1119 1120

	retval = cpuid4_cache_sysfs_init(cpu);
	if (unlikely(retval < 0))
		return retval;

1121
	retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
1122
				      &ktype_percpu_entry,
1123
				      &sys_dev->kobj, "%s", "cache");
1124 1125 1126 1127
	if (retval < 0) {
		cpuid4_cache_sysfs_exit(cpu);
		return retval;
	}
L
Linus Torvalds 已提交
1128 1129

	for (i = 0; i < num_cache_leaves; i++) {
A
Alan Cox 已提交
1130
		this_object = INDEX_KOBJECT_PTR(cpu, i);
L
Linus Torvalds 已提交
1131 1132
		this_object->cpu = cpu;
		this_object->index = i;
1133 1134 1135

		this_leaf = CPUID4_INFO_IDX(cpu, i);

1136 1137 1138 1139 1140
		ktype_cache.default_attrs = default_attrs;
#ifdef CONFIG_AMD_NB
		if (this_leaf->l3)
			ktype_cache.default_attrs = amd_l3_attrs();
#endif
1141
		retval = kobject_init_and_add(&(this_object->kobj),
1142
					      &ktype_cache,
1143
					      per_cpu(ici_cache_kobject, cpu),
1144
					      "index%1lu", i);
L
Linus Torvalds 已提交
1145
		if (unlikely(retval)) {
A
Alan Cox 已提交
1146 1147
			for (j = 0; j < i; j++)
				kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
1148
			kobject_put(per_cpu(ici_cache_kobject, cpu));
L
Linus Torvalds 已提交
1149
			cpuid4_cache_sysfs_exit(cpu);
1150
			return retval;
L
Linus Torvalds 已提交
1151
		}
1152
		kobject_uevent(&(this_object->kobj), KOBJ_ADD);
L
Linus Torvalds 已提交
1153
	}
1154
	cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
1155

1156
	kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
1157
	return 0;
L
Linus Torvalds 已提交
1158 1159
}

1160
static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
L
Linus Torvalds 已提交
1161 1162 1163 1164
{
	unsigned int cpu = sys_dev->id;
	unsigned long i;

1165
	if (per_cpu(ici_cpuid4_info, cpu) == NULL)
1166
		return;
1167
	if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
1168
		return;
1169
	cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
1170 1171

	for (i = 0; i < num_cache_leaves; i++)
A
Alan Cox 已提交
1172
		kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
1173
	kobject_put(per_cpu(ici_cache_kobject, cpu));
L
Linus Torvalds 已提交
1174
	cpuid4_cache_sysfs_exit(cpu);
1175 1176
}

1177
static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1178 1179 1180 1181 1182 1183 1184 1185
					unsigned long action, void *hcpu)
{
	unsigned int cpu = (unsigned long)hcpu;
	struct sys_device *sys_dev;

	sys_dev = get_cpu_sysdev(cpu);
	switch (action) {
	case CPU_ONLINE:
1186
	case CPU_ONLINE_FROZEN:
1187 1188 1189
		cache_add_dev(sys_dev);
		break;
	case CPU_DEAD:
1190
	case CPU_DEAD_FROZEN:
1191 1192 1193 1194
		cache_remove_dev(sys_dev);
		break;
	}
	return NOTIFY_OK;
L
Linus Torvalds 已提交
1195 1196
}

A
Alan Cox 已提交
1197
static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
1198
	.notifier_call = cacheinfo_cpu_callback,
L
Linus Torvalds 已提交
1199 1200
};

1201
static int __cpuinit cache_sysfs_init(void)
L
Linus Torvalds 已提交
1202
{
1203 1204
	int i;

L
Linus Torvalds 已提交
1205 1206 1207
	if (num_cache_leaves == 0)
		return 0;

1208
	for_each_online_cpu(i) {
1209 1210
		int err;
		struct sys_device *sys_dev = get_cpu_sysdev(i);
1211

1212 1213 1214
		err = cache_add_dev(sys_dev);
		if (err)
			return err;
1215
	}
1216
	register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1217
	return 0;
L
Linus Torvalds 已提交
1218 1219
}

1220
device_initcall(cache_sysfs_init);
L
Linus Torvalds 已提交
1221 1222

#endif