提交 537488db 编写于 作者: L Liu Peibao 提交者: openeuler-sync-bot

LoongArch: convert p v s cache to l1 l2 l3 format

LoongArch inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6BWFP

--------------------------------

Change-Id: I0d38cd552c26b92a587465d3b5fb5e992f67ff9b
Signed-off-by: NLiu Peibao <liupeibao@loongson.cn>
(cherry picked from commit eb8c6f75)
上级 cbf1b67a
...@@ -9,8 +9,11 @@ ...@@ -9,8 +9,11 @@
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cacheops.h> #include <asm/cacheops.h>
extern void local_flush_icache_range(unsigned long start, unsigned long end); void local_flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_line_hit(unsigned long addr);
asmlinkage void cpu_flush_caches(void);
#define invalid_cache_line_hit(addr) flush_cache_line_hit(addr)
#define flush_icache_range local_flush_icache_range #define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range #define flush_icache_user_range local_flush_icache_range
...@@ -35,46 +38,26 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end); ...@@ -35,46 +38,26 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end);
: \ : \
: "i" (op), "ZC" (*(unsigned char *)(addr))) : "i" (op), "ZC" (*(unsigned char *)(addr)))
static inline void flush_icache_line_indexed(unsigned long addr) static inline bool cache_present(struct cache_desc *cdesc)
{ {
cache_op(Index_Invalidate_I, addr); return cdesc->flags & CACHE_PRESENT;
} }
static inline void flush_dcache_line_indexed(unsigned long addr) static inline bool cache_private(struct cache_desc *cdesc)
{ {
cache_op(Index_Writeback_Inv_D, addr); return cdesc->flags & CACHE_PRIVATE;
} }
static inline void flush_vcache_line_indexed(unsigned long addr) static inline bool cache_inclusive(struct cache_desc *cdesc)
{ {
cache_op(Index_Writeback_Inv_V, addr); return cdesc->flags & CACHE_INCLUSIVE;
} }
static inline void flush_scache_line_indexed(unsigned long addr) static inline unsigned int cpu_last_level_cache_line_size(void)
{ {
cache_op(Index_Writeback_Inv_S, addr); unsigned int cache_present = current_cpu_data.cache_leaves_present;
}
static inline void flush_icache_line(unsigned long addr) return current_cpu_data.cache_leaves[cache_present - 1].linesz;
{
cache_op(Hit_Invalidate_I, addr);
} }
static inline void flush_dcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_V, addr);
}
static inline void flush_scache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_S, addr);
}
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
#endif /* _ASM_CACHEFLUSH_H */ #endif /* _ASM_CACHEFLUSH_H */
...@@ -8,16 +8,18 @@ ...@@ -8,16 +8,18 @@
#define __ASM_CACHEOPS_H #define __ASM_CACHEOPS_H
/* /*
* Most cache ops are split into a 2 bit field identifying the cache, and a 3 * Most cache ops are split into a 3 bit field identifying the cache, and a 2
* bit field identifying the cache operation. * bit field identifying the cache operation.
*/ */
#define CacheOp_Cache 0x03 #define CacheOp_Cache 0x07
#define CacheOp_Op 0x1c #define CacheOp_Op 0x18
#define Cache_I 0x00 #define Cache_LEAF0 0x00
#define Cache_D 0x01 #define Cache_LEAF1 0x01
#define Cache_V 0x02 #define Cache_LEAF2 0x02
#define Cache_S 0x03 #define Cache_LEAF3 0x03
#define Cache_LEAF4 0x04
#define Cache_LEAF5 0x05
#define Index_Invalidate 0x08 #define Index_Invalidate 0x08
#define Index_Writeback_Inv 0x08 #define Index_Writeback_Inv 0x08
...@@ -25,13 +27,17 @@ ...@@ -25,13 +27,17 @@
#define Hit_Writeback_Inv 0x10 #define Hit_Writeback_Inv 0x10
#define CacheOp_User_Defined 0x18 #define CacheOp_User_Defined 0x18
#define Index_Invalidate_I (Cache_I | Index_Invalidate) #define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv)
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv)
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv)
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv) #define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv)
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate) #define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv) #define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv) #define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv) #define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv)
#endif /* __ASM_CACHEOPS_H */ #endif /* __ASM_CACHEOPS_H */
...@@ -19,11 +19,6 @@ ...@@ -19,11 +19,6 @@
#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT) #define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) #define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#define cpu_icache_line_size() cpu_data[0].icache.linesz
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT) # define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31 # define cpu_vabits 31
......
...@@ -9,19 +9,28 @@ ...@@ -9,19 +9,28 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
/* cache_desc->flags */
enum {
CACHE_PRESENT = (1 << 0),
CACHE_PRIVATE = (1 << 1), /* core private cache */
CACHE_INCLUSIVE = (1 << 2), /* include the lower level caches */
};
/* /*
* Descriptor for a cache * Descriptor for a cache
*/ */
struct cache_desc { struct cache_desc {
unsigned int waysize; /* Bytes per way */ unsigned char type;
unsigned char level;
unsigned short sets; /* Number of lines per set */ unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */ unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */ unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */ unsigned char flags; /* Flags describing cache properties */
}; };
#define CACHE_LEAVES_MAX 6
#define CACHE_LEVEL_MAX 3
struct cpuinfo_loongarch { struct cpuinfo_loongarch {
u64 asid_cache; u64 asid_cache;
unsigned long asid_mask; unsigned long asid_mask;
...@@ -40,11 +49,8 @@ struct cpuinfo_loongarch { ...@@ -40,11 +49,8 @@ struct cpuinfo_loongarch {
int tlbsizemtlb; int tlbsizemtlb;
int tlbsizestlbsets; int tlbsizestlbsets;
int tlbsizestlbways; int tlbsizestlbways;
struct cache_desc icache; /* Primary I-cache */ unsigned int cache_leaves_present; /* number of cache_leaves[] elements */
struct cache_desc dcache; /* Primary D or combined I/D cache */ struct cache_desc cache_leaves[CACHE_LEAVES_MAX];
struct cache_desc vcache; /* Victim cache, between pcache and scache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
int core; /* physical core number in package */ int core; /* physical core number in package */
int package;/* physical package number */ int package;/* physical package number */
int vabits; /* Virtual Address size in bits */ int vabits; /* Virtual Address size in bits */
......
...@@ -224,6 +224,13 @@ static inline u32 read_cpucfg(u32 reg) ...@@ -224,6 +224,13 @@ static inline u32 read_cpucfg(u32 reg)
#define CPUCFG48_VFPU_CG BIT(2) #define CPUCFG48_VFPU_CG BIT(2)
#define CPUCFG48_RAM_CG BIT(3) #define CPUCFG48_RAM_CG BIT(3)
#define CACHE_WAYS_M GENMASK(15, 0)
#define CACHE_SETS_M GENMASK(23, 16)
#define CACHE_LSIZE_M GENMASK(30, 24)
#define CACHE_WAYS 0
#define CACHE_SETS 16
#define CACHE_LSIZE 24
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* CSR */ /* CSR */
......
...@@ -5,69 +5,28 @@ ...@@ -5,69 +5,28 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/ */
#include <linux/cacheinfo.h> #include <linux/cacheinfo.h>
#include <linux/of.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/cpu-info.h> #include <asm/cpu-info.h>
/* Populates leaf and increments to next leaf */
#define populate_cache(cache, leaf, c_level, c_type) \
do { \
leaf->type = c_type; \
leaf->level = c_level; \
leaf->coherency_line_size = c->cache.linesz; \
leaf->number_of_sets = c->cache.sets; \
leaf->ways_of_associativity = c->cache.ways; \
leaf->size = c->cache.linesz * c->cache.sets * \
c->cache.ways; \
if (leaf->level > 2) \
leaf->size *= nodes_per_package; \
leaf++; \
} while (0)
int init_cache_level(unsigned int cpu) int init_cache_level(unsigned int cpu)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
int levels = 0, leaves = 0; unsigned int cache_present = current_cpu_data.cache_leaves_present;
this_cpu_ci->num_levels =
/* current_cpu_data.cache_leaves[cache_present - 1].level;
* If Dcache is not set, we assume the cache structures this_cpu_ci->num_leaves = cache_present;
* are not properly initialized.
*/
if (c->dcache.waysize)
levels += 1;
else
return -ENOENT;
leaves += (c->icache.waysize) ? 2 : 1;
if (c->vcache.waysize) {
levels++;
leaves++;
}
if (c->scache.waysize) {
levels++;
leaves++;
}
if (c->tcache.waysize) {
levels++;
leaves++;
}
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
return 0; return 0;
} }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf) struct cacheinfo *sib_leaf)
{ {
return !((this_leaf->level == 1) || (this_leaf->level == 2)); return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE) &&
!(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
} }
static void cache_cpumap_setup(unsigned int cpu) static void __cache_cpumap_setup(unsigned int cpu)
{ {
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf; struct cacheinfo *this_leaf, *sib_leaf;
...@@ -85,8 +44,11 @@ static void cache_cpumap_setup(unsigned int cpu) ...@@ -85,8 +44,11 @@ static void cache_cpumap_setup(unsigned int cpu)
for_each_online_cpu(i) { for_each_online_cpu(i) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
if (i == cpu || !sib_cpu_ci->info_list) /* skip if itself or no cacheinfo or not in one
continue;/* skip if itself or no cacheinfo */ * physical node. */
if (i == cpu || !sib_cpu_ci->info_list ||
(cpu_to_node(i) != cpu_to_node(cpu)))
continue;
sib_leaf = sib_cpu_ci->info_list + index; sib_leaf = sib_cpu_ci->info_list + index;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) { if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
...@@ -98,33 +60,30 @@ static void cache_cpumap_setup(unsigned int cpu) ...@@ -98,33 +60,30 @@ static void cache_cpumap_setup(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu) int populate_cache_leaves(unsigned int cpu)
{ {
int level = 1, nodes_per_package = 1; struct cache_desc *cdesc_tmp, *cdesc = current_cpu_data.cache_leaves;
struct cpuinfo_loongarch *c = &current_cpu_data; unsigned int cache_present = current_cpu_data.cache_leaves_present;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list; struct cacheinfo *this_leaf = this_cpu_ci->info_list;
int i;
if (loongson_sysconf.nr_nodes > 1)
nodes_per_package = loongson_sysconf.cores_per_package for (i = 0; i < cache_present; i++) {
/ loongson_sysconf.cores_per_node; cdesc_tmp = cdesc + i;
if (c->icache.waysize) { this_leaf->type = cdesc_tmp->type;
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA); this_leaf->level = cdesc_tmp->level;
populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST); this_leaf->coherency_line_size = cdesc_tmp->linesz;
} else { this_leaf->number_of_sets = cdesc_tmp->sets;
populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED); this_leaf->ways_of_associativity = cdesc_tmp->ways;
this_leaf->size =
cdesc_tmp->linesz * cdesc_tmp->sets * cdesc_tmp->ways;
this_leaf->priv = &cdesc_tmp->flags;
this_leaf++;
} }
if (c->vcache.waysize) if (!of_have_populated_dt()) {
populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED); __cache_cpumap_setup(cpu);
if (c->scache.waysize)
populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
cache_cpumap_setup(cpu);
this_cpu_ci->cpu_map_populated = true; this_cpu_ci->cpu_map_populated = true;
}
return 0; return 0;
} }
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
* *
* Derived from MIPS: * Derived from MIPS:
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc. * Copyright (C) 2007 MIPS Technologies, Inc.
*/ */
#include <linux/export.h> #include <linux/export.h>
#include <linux/fcntl.h> #include <linux/fcntl.h>
#include <linux/fs.h> #include <linux/fs.h>
...@@ -15,127 +15,225 @@ ...@@ -15,127 +15,225 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/cacheinfo.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/loongarch.h> #include <asm/loongarch.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/setup.h> #include <asm/setup.h>
extern struct loongson_system_configuration loongson_sysconf;
void cache_error_setup(void)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
}
/* /* Cache operations. */
* LoongArch maintains ICache/DCache coherency by hardware,
* we just need "ibar" to avoid instruction hazard here.
*/
void local_flush_icache_range(unsigned long start, unsigned long end) void local_flush_icache_range(unsigned long start, unsigned long end)
{ {
asm volatile ("\tibar 0\n"::); asm volatile ("\tibar 0\n"::);
} }
EXPORT_SYMBOL(local_flush_icache_range);
void cache_error_setup(void) static inline void __flush_cache_line_hit(int leaf, unsigned long addr)
{ {
extern char __weak except_vec_cex; switch (leaf) {
set_merr_handler(0x0, &except_vec_cex, 0x80); case Cache_LEAF0:
cache_op(Hit_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Hit_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Hit_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Hit_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Hit_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Hit_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
} }
static unsigned long icache_size __read_mostly; static inline void __flush_cache_line_indexed(int leaf, unsigned long addr)
static unsigned long dcache_size __read_mostly; {
static unsigned long vcache_size __read_mostly; switch (leaf) {
static unsigned long scache_size __read_mostly; case Cache_LEAF0:
cache_op(Index_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Index_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Index_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Index_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Index_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Index_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
}
static char *way_string[] = { NULL, "direct mapped", "2-way", void flush_cache_line_hit(unsigned long addr)
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way", {
"9-way", "10-way", "11-way", "12-way", int leaf;
"13-way", "14-way", "15-way", "16-way", struct cache_desc *cdesc = current_cpu_data.cache_leaves;
}; unsigned int cache_present = current_cpu_data.cache_leaves_present;
/* If last level cache is inclusive, no need to flush other caches. */
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
__flush_cache_line_hit(leaf, addr);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
__flush_cache_line_hit(leaf, addr);
}
static void probe_pcache(void) static void flush_cache_leaf(unsigned int leaf)
{
u64 line;
int i, j, nr_nodes;
struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
nr_nodes = loongson_sysconf.nr_nodes;
if (cache_private(cdesc))
nr_nodes = 1;
line = CSR_DMW0_BASE;
do {
for (i = 0; i < cdesc->sets; i++) {
for (j = 0; j < cdesc->ways; j++) {
__flush_cache_line_indexed(leaf, line);
line++;
}
line -= cdesc->ways;
line += cdesc->linesz;
}
line += 0x100000000000;
} while (--nr_nodes > 0);
}
asmlinkage __visible void cpu_flush_caches(void)
{
int leaf;
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
/* If last level cache is inclusive, no need to flush other caches. */
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
flush_cache_leaf(leaf);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
flush_cache_leaf(leaf);
}
static inline void set_cache_basics(struct cache_desc *cdesc, unsigned int leaf)
{ {
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config; unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG17); config = read_cpucfg(LOONGARCH_CPUCFG17 + leaf);
lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE); cdesc->linesz = 1 << ((config & CACHE_LSIZE_M) >> CACHE_LSIZE);
sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS); cdesc->sets = 1 << ((config & CACHE_SETS_M) >> CACHE_SETS);
ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1; cdesc->ways = ((config & CACHE_WAYS_M) >> CACHE_WAYS) + 1;
}
c->icache.linesz = lsize; #define populate_cache_properties(conifg, cdesc, level, leaf) \
c->icache.sets = sets; { \
c->icache.ways = ways; if (level == 1) { \
icache_size = sets * ways * lsize; cdesc->flags |= CACHE_PRIVATE; \
c->icache.waysize = icache_size / c->icache.ways; } else { \
if (config & IUPRIV) \
cdesc->flags |= CACHE_PRIVATE; \
if (config & IUINCL) \
cdesc->flags |= CACHE_INCLUSIVE; \
} \
cdesc->flags |= CACHE_PRESENT; \
cdesc->level = level; \
set_cache_basics(cdesc, leaf); \
cdesc++; \
leaf++; \
}
config = read_cpucfg(LOONGARCH_CPUCFG18); /*
lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE); * Each level cache occupies 7bits in order in CPUCFG16
sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS); * except level 1 cache with bit0~2.
ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1; *
*/
static void probe_cache_hierarchy(void)
{
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int leaf = 0, level;
unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
c->dcache.linesz = lsize; #define IUPRE (1 << 0)
c->dcache.sets = sets; #define IUUNIFY (1 << 1)
c->dcache.ways = ways; #define IUPRIV (1 << 2)
dcache_size = sets * ways * lsize; #define IUINCL (1 << 3)
c->dcache.waysize = dcache_size / c->dcache.ways; #define DPRE (1 << 4)
#define DPRIV (1 << 5)
#define DINCL (1 << 6)
c->options |= LOONGARCH_CPU_PREFETCH; #define L1DPRE (1 << 2)
pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", for (level = 1; level <= CACHE_LEVEL_MAX; level++) {
icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz); if (config & IUPRE) {
if (config & IUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", populate_cache_properties(config, cdesc, level, leaf);
dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz); }
}
static void probe_vcache(void) if ((level == 1 && (config & L1DPRE)) ||
{ (level != 1 && (config & DPRE))) {
struct cpuinfo_loongarch *c = &current_cpu_data; cdesc->type = CACHE_TYPE_DATA;
unsigned int lsize, sets, ways;
unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG19); populate_cache_properties(config, cdesc, level, leaf);
lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE); }
sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS);
ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1;
c->vcache.linesz = lsize; if (level == 1)
c->vcache.sets = sets; config = config >> 3;
c->vcache.ways = ways; else
vcache_size = lsize * sets * ways; config = config >> 7;
c->vcache.waysize = vcache_size / c->vcache.ways;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", if (!config)
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); break;
}
static void probe_scache(void) }
{
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG20); if (leaf > 0)
lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE); current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS);
ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1;
c->scache.linesz = lsize; BUG_ON(leaf > CACHE_LEAVES_MAX);
c->scache.sets = sets;
c->scache.ways = ways;
/* 4 cores. scaches are shared */
scache_size = lsize * sets * ways;
c->scache.waysize = scache_size / c->scache.ways;
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", current_cpu_data.cache_leaves_present = leaf;
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
} }
void cpu_cache_init(void) void cpu_cache_init(void)
{ {
probe_pcache(); probe_cache_hierarchy();
probe_vcache();
probe_scache();
shm_align_mask = PAGE_SIZE - 1; shm_align_mask = PAGE_SIZE - 1;
} }
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/vgaarb.h> #include <linux/vgaarb.h>
#include <asm/loongson.h> #include <asm/loongson.h>
#include <asm/cacheflush.h>
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00 #define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06 #define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
...@@ -45,12 +46,10 @@ static int __init pcibios_init(void) ...@@ -45,12 +46,10 @@ static int __init pcibios_init(void)
unsigned int lsize; unsigned int lsize;
/* /*
* Set PCI cacheline size to that of the highest level in the * Set PCI cacheline size to that of the last level in the
* cache hierarchy. * cache hierarchy.
*/ */
lsize = cpu_dcache_line_size(); lsize = cpu_last_level_cache_line_size();
lsize = cpu_vcache_line_size() ? : lsize;
lsize = cpu_scache_line_size() ? : lsize;
BUG_ON(!lsize); BUG_ON(!lsize);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册