提交 68091ee7 编写于 作者: S Suravee Suthikulpanit 提交者: Thomas Gleixner

x86/CPU/AMD: Calculate last level cache ID from number of sharing threads

Last Level Cache ID can be calculated from the number of threads sharing
the cache, which is available from CPUID Fn0x8000001D (Cache Properties).
This is used to left-shift the APIC ID to derive LLC ID.

Therefore, default to this method unless the APIC ID enumeration does not
follow the scheme.
Signed-off-by: NSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: NBorislav Petkov <bp@suse.de>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1524864877-111962-5-git-send-email-suravee.suthikulpanit@amd.com
上级 1d200c07
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_CACHEINFO_H
#define _ASM_X86_CACHEINFO_H
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
#endif /* _ASM_X86_CACHEINFO_H */
......@@ -9,6 +9,7 @@
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cacheinfo.h>
#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/pci-direct.h>
......@@ -343,22 +344,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
c->x86_max_cores /= smp_num_siblings;
}
/*
* We may have multiple LLCs if L3 caches exist, so check if we
* have an L3 cache by looking at the L3 cache CPUID leaf.
*/
if (cpuid_edx(0x80000006)) {
if (c->x86 == 0x17) {
/*
* LLC is at the core complex level.
* Core complex id is ApicId[3].
*/
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
} else {
/* LLC is at the node level. */
per_cpu(cpu_llc_id, cpu) = node_id;
}
}
cacheinfo_amd_init_llc_id(c, cpu, node_id);
} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
u64 value;
......
......@@ -637,6 +637,45 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
return i;
}
void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
{
/*
* We may have multiple LLCs if L3 caches exist, so check if we
* have an L3 cache by looking at the L3 cache CPUID leaf.
*/
if (!cpuid_edx(0x80000006))
return;
if (c->x86 < 0x17) {
/* LLC is at the node level. */
per_cpu(cpu_llc_id, cpu) = node_id;
} else if (c->x86 == 0x17 &&
c->x86_model >= 0 && c->x86_model <= 0x1F) {
/*
* LLC is at the core complex level.
* Core complex ID is ApicId[3] for these processors.
*/
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
} else {
/*
* LLC ID is calculated from the number of threads sharing the
* cache.
* */
u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
u32 llc_index = find_num_cache_leaves(c) - 1;
cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx);
if (eax)
num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
if (num_sharing_cache) {
int bits = get_count_order(num_sharing_cache) - 1;
per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
}
}
}
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册