You need to sign in or sign up before continuing.
提交 eb8c6f75 编写于 作者: L Liu Peibao 提交者: Hongchen Zhang

LoongArch: convert p v s cache to l1 l2 l3 format

LoongArch inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I6BWFP

--------------------------------

Change-Id: I0d38cd552c26b92a587465d3b5fb5e992f67ff9b
Signed-off-by: NLiu Peibao <liupeibao@loongson.cn>
上级 6def0ea3
......@@ -9,8 +9,11 @@
#include <asm/cpu-features.h>
#include <asm/cacheops.h>
extern void local_flush_icache_range(unsigned long start, unsigned long end);
void local_flush_icache_range(unsigned long start, unsigned long end);
void flush_cache_line_hit(unsigned long addr);
asmlinkage void cpu_flush_caches(void);
#define invalid_cache_line_hit(addr) flush_cache_line_hit(addr)
#define flush_icache_range local_flush_icache_range
#define flush_icache_user_range local_flush_icache_range
......@@ -35,46 +38,26 @@ extern void local_flush_icache_range(unsigned long start, unsigned long end);
: \
: "i" (op), "ZC" (*(unsigned char *)(addr)))
static inline void flush_icache_line_indexed(unsigned long addr)
static inline bool cache_present(struct cache_desc *cdesc)
{
cache_op(Index_Invalidate_I, addr);
return cdesc->flags & CACHE_PRESENT;
}
static inline void flush_dcache_line_indexed(unsigned long addr)
static inline bool cache_private(struct cache_desc *cdesc)
{
cache_op(Index_Writeback_Inv_D, addr);
return cdesc->flags & CACHE_PRIVATE;
}
static inline void flush_vcache_line_indexed(unsigned long addr)
static inline bool cache_inclusive(struct cache_desc *cdesc)
{
cache_op(Index_Writeback_Inv_V, addr);
return cdesc->flags & CACHE_INCLUSIVE;
}
static inline void flush_scache_line_indexed(unsigned long addr)
static inline unsigned int cpu_last_level_cache_line_size(void)
{
cache_op(Index_Writeback_Inv_S, addr);
}
unsigned int cache_present = current_cpu_data.cache_leaves_present;
static inline void flush_icache_line(unsigned long addr)
{
cache_op(Hit_Invalidate_I, addr);
return current_cpu_data.cache_leaves[cache_present - 1].linesz;
}
static inline void flush_dcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_D, addr);
}
static inline void flush_vcache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_V, addr);
}
static inline void flush_scache_line(unsigned long addr)
{
cache_op(Hit_Writeback_Inv_S, addr);
}
#include <asm-generic/cacheflush.h>
#endif /* _ASM_CACHEFLUSH_H */
......@@ -8,16 +8,18 @@
#define __ASM_CACHEOPS_H
/*
* Most cache ops are split into a 2 bit field identifying the cache, and a 3
* Most cache ops are split into a 3 bit field identifying the cache, and a 2
* bit field identifying the cache operation.
*/
#define CacheOp_Cache 0x03
#define CacheOp_Op 0x1c
#define CacheOp_Cache 0x07
#define CacheOp_Op 0x18
#define Cache_I 0x00
#define Cache_D 0x01
#define Cache_V 0x02
#define Cache_S 0x03
#define Cache_LEAF0 0x00
#define Cache_LEAF1 0x01
#define Cache_LEAF2 0x02
#define Cache_LEAF3 0x03
#define Cache_LEAF4 0x04
#define Cache_LEAF5 0x05
#define Index_Invalidate 0x08
#define Index_Writeback_Inv 0x08
......@@ -25,13 +27,17 @@
#define Hit_Writeback_Inv 0x10
#define CacheOp_User_Defined 0x18
#define Index_Invalidate_I (Cache_I | Index_Invalidate)
#define Index_Writeback_Inv_D (Cache_D | Index_Writeback_Inv)
#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
#define Index_Writeback_Inv_S (Cache_S | Index_Writeback_Inv)
#define Hit_Invalidate_I (Cache_I | Hit_Invalidate)
#define Hit_Writeback_Inv_D (Cache_D | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_V (Cache_V | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_S (Cache_S | Hit_Writeback_Inv)
#define Index_Writeback_Inv_LEAF0 (Cache_LEAF0 | Index_Writeback_Inv)
#define Index_Writeback_Inv_LEAF1 (Cache_LEAF1 | Index_Writeback_Inv)
#define Index_Writeback_Inv_LEAF2 (Cache_LEAF2 | Index_Writeback_Inv)
#define Index_Writeback_Inv_LEAF3 (Cache_LEAF3 | Index_Writeback_Inv)
#define Index_Writeback_Inv_LEAF4 (Cache_LEAF4 | Index_Writeback_Inv)
#define Index_Writeback_Inv_LEAF5 (Cache_LEAF5 | Index_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF0 (Cache_LEAF0 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF1 (Cache_LEAF1 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF2 (Cache_LEAF2 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF3 (Cache_LEAF3 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF4 (Cache_LEAF4 | Hit_Writeback_Inv)
#define Hit_Writeback_Inv_LEAF5 (Cache_LEAF5 | Hit_Writeback_Inv)
#endif /* __ASM_CACHEOPS_H */
......@@ -19,11 +19,6 @@
#define cpu_has_loongarch32 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_32BIT)
#define cpu_has_loongarch64 (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
#define cpu_icache_line_size() cpu_data[0].icache.linesz
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#define cpu_vcache_line_size() cpu_data[0].vcache.linesz
#define cpu_scache_line_size() cpu_data[0].scache.linesz
#ifdef CONFIG_32BIT
# define cpu_has_64bits (cpu_data[0].isa_level & LOONGARCH_CPU_ISA_64BIT)
# define cpu_vabits 31
......
......@@ -9,19 +9,28 @@
#include <linux/types.h>
#include <asm/loongarch.h>
/* cache_desc->flags */
enum {
CACHE_PRESENT = (1 << 0),
CACHE_PRIVATE = (1 << 1), /* core private cache */
CACHE_INCLUSIVE = (1 << 2), /* include the lower level caches */
};
/*
* Descriptor for a cache
*/
struct cache_desc {
unsigned int waysize; /* Bytes per way */
unsigned char type;
unsigned char level;
unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */
};
#define CACHE_LEAVES_MAX 6
#define CACHE_LEVEL_MAX 3
struct cpuinfo_loongarch {
u64 asid_cache;
unsigned long asid_mask;
......@@ -40,11 +49,8 @@ struct cpuinfo_loongarch {
int tlbsizemtlb;
int tlbsizestlbsets;
int tlbsizestlbways;
struct cache_desc icache; /* Primary I-cache */
struct cache_desc dcache; /* Primary D or combined I/D cache */
struct cache_desc vcache; /* Victim cache, between pcache and scache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
unsigned int cache_leaves_present; /* number of cache_leaves[] elements */
struct cache_desc cache_leaves[CACHE_LEAVES_MAX];
int core; /* physical core number in package */
int package;/* physical package number */
int vabits; /* Virtual Address size in bits */
......
......@@ -224,6 +224,13 @@ static inline u32 read_cpucfg(u32 reg)
#define CPUCFG48_VFPU_CG BIT(2)
#define CPUCFG48_RAM_CG BIT(3)
#define CACHE_WAYS_M GENMASK(15, 0)
#define CACHE_SETS_M GENMASK(23, 16)
#define CACHE_LSIZE_M GENMASK(30, 24)
#define CACHE_WAYS 0
#define CACHE_SETS 16
#define CACHE_LSIZE 24
#ifndef __ASSEMBLY__
/* CSR */
......
......@@ -5,69 +5,28 @@
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/cacheinfo.h>
#include <linux/of.h>
#include <asm/bootinfo.h>
#include <asm/cpu-info.h>
/* Populates leaf and increments to next leaf */
#define populate_cache(cache, leaf, c_level, c_type) \
do { \
leaf->type = c_type; \
leaf->level = c_level; \
leaf->coherency_line_size = c->cache.linesz; \
leaf->number_of_sets = c->cache.sets; \
leaf->ways_of_associativity = c->cache.ways; \
leaf->size = c->cache.linesz * c->cache.sets * \
c->cache.ways; \
if (leaf->level > 2) \
leaf->size *= nodes_per_package; \
leaf++; \
} while (0)
int init_cache_level(unsigned int cpu)
{
struct cpuinfo_loongarch *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
int levels = 0, leaves = 0;
/*
* If Dcache is not set, we assume the cache structures
* are not properly initialized.
*/
if (c->dcache.waysize)
levels += 1;
else
return -ENOENT;
leaves += (c->icache.waysize) ? 2 : 1;
if (c->vcache.waysize) {
levels++;
leaves++;
}
if (c->scache.waysize) {
levels++;
leaves++;
}
if (c->tcache.waysize) {
levels++;
leaves++;
}
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
this_cpu_ci->num_levels =
current_cpu_data.cache_leaves[cache_present - 1].level;
this_cpu_ci->num_leaves = cache_present;
return 0;
}
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
{
return !((this_leaf->level == 1) || (this_leaf->level == 2));
return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE) &&
!(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
}
static void cache_cpumap_setup(unsigned int cpu)
static void __cache_cpumap_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
......@@ -85,8 +44,11 @@ static void cache_cpumap_setup(unsigned int cpu)
for_each_online_cpu(i) {
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
/* skip if itself or no cacheinfo or not in one
* physical node. */
if (i == cpu || !sib_cpu_ci->info_list ||
(cpu_to_node(i) != cpu_to_node(cpu)))
continue;
sib_leaf = sib_cpu_ci->info_list + index;
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
......@@ -98,33 +60,30 @@ static void cache_cpumap_setup(unsigned int cpu)
int populate_cache_leaves(unsigned int cpu)
{
int level = 1, nodes_per_package = 1;
struct cpuinfo_loongarch *c = &current_cpu_data;
struct cache_desc *cdesc_tmp, *cdesc = current_cpu_data.cache_leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
if (loongson_sysconf.nr_nodes > 1)
nodes_per_package = loongson_sysconf.cores_per_package
/ loongson_sysconf.cores_per_node;
if (c->icache.waysize) {
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
populate_cache(icache, this_leaf, level++, CACHE_TYPE_INST);
} else {
populate_cache(dcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
int i;
for (i = 0; i < cache_present; i++) {
cdesc_tmp = cdesc + i;
this_leaf->type = cdesc_tmp->type;
this_leaf->level = cdesc_tmp->level;
this_leaf->coherency_line_size = cdesc_tmp->linesz;
this_leaf->number_of_sets = cdesc_tmp->sets;
this_leaf->ways_of_associativity = cdesc_tmp->ways;
this_leaf->size =
cdesc_tmp->linesz * cdesc_tmp->sets * cdesc_tmp->ways;
this_leaf->priv = &cdesc_tmp->flags;
this_leaf++;
}
if (c->vcache.waysize)
populate_cache(vcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
if (c->scache.waysize)
populate_cache(scache, this_leaf, level++, CACHE_TYPE_UNIFIED);
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, level++, CACHE_TYPE_UNIFIED);
cache_cpumap_setup(cpu);
if (!of_have_populated_dt()) {
__cache_cpumap_setup(cpu);
this_cpu_ci->cpu_map_populated = true;
}
return 0;
}
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc.
*/
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*
* Derived from MIPS:
* Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 MIPS Technologies, Inc.
*/
#include <linux/export.h>
#include <linux/fcntl.h>
#include <linux/fs.h>
......@@ -15,127 +15,225 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/cacheinfo.h>
#include <asm/cacheflush.h>
#include <asm/cpu.h>
#include <asm/bootinfo.h>
#include <asm/cpu-features.h>
#include <asm/dma.h>
#include <asm/loongarch.h>
#include <asm/processor.h>
#include <asm/setup.h>
extern struct loongson_system_configuration loongson_sysconf;
void cache_error_setup(void)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
}
/*
* LoongArch maintains ICache/DCache coherency by hardware,
* we just need "ibar" to avoid instruction hazard here.
*/
/* Cache operations. */
void local_flush_icache_range(unsigned long start, unsigned long end)
{
asm volatile ("\tibar 0\n"::);
}
EXPORT_SYMBOL(local_flush_icache_range);
void cache_error_setup(void)
static inline void __flush_cache_line_hit(int leaf, unsigned long addr)
{
extern char __weak except_vec_cex;
set_merr_handler(0x0, &except_vec_cex, 0x80);
switch (leaf) {
case Cache_LEAF0:
cache_op(Hit_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Hit_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Hit_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Hit_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Hit_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Hit_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
}
static unsigned long icache_size __read_mostly;
static unsigned long dcache_size __read_mostly;
static unsigned long vcache_size __read_mostly;
static unsigned long scache_size __read_mostly;
static inline void __flush_cache_line_indexed(int leaf, unsigned long addr)
{
switch (leaf) {
case Cache_LEAF0:
cache_op(Index_Writeback_Inv_LEAF0, addr);
break;
case Cache_LEAF1:
cache_op(Index_Writeback_Inv_LEAF1, addr);
break;
case Cache_LEAF2:
cache_op(Index_Writeback_Inv_LEAF2, addr);
break;
case Cache_LEAF3:
cache_op(Index_Writeback_Inv_LEAF3, addr);
break;
case Cache_LEAF4:
cache_op(Index_Writeback_Inv_LEAF4, addr);
break;
case Cache_LEAF5:
cache_op(Index_Writeback_Inv_LEAF5, addr);
break;
default:
break;
}
}
static char *way_string[] = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
"9-way", "10-way", "11-way", "12-way",
"13-way", "14-way", "15-way", "16-way",
};
void flush_cache_line_hit(unsigned long addr)
{
int leaf;
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
/* If last level cache is inclusive, no need to flush other caches. */
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
__flush_cache_line_hit(leaf, addr);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
__flush_cache_line_hit(leaf, addr);
}
static void probe_pcache(void)
static void flush_cache_leaf(unsigned int leaf)
{
u64 line;
int i, j, nr_nodes;
struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
nr_nodes = loongson_sysconf.nr_nodes;
if (cache_private(cdesc))
nr_nodes = 1;
line = CSR_DMW0_BASE;
do {
for (i = 0; i < cdesc->sets; i++) {
for (j = 0; j < cdesc->ways; j++) {
__flush_cache_line_indexed(leaf, line);
line++;
}
line -= cdesc->ways;
line += cdesc->linesz;
}
line += 0x100000000000;
} while (--nr_nodes > 0);
}
asmlinkage __visible void cpu_flush_caches(void)
{
int leaf;
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int cache_present = current_cpu_data.cache_leaves_present;
/* If last level cache is inclusive, no need to flush other caches. */
leaf = cache_present - 1;
if (cache_inclusive(cdesc + leaf)) {
flush_cache_leaf(leaf);
return;
}
for (leaf = 0; leaf < cache_present; leaf++)
flush_cache_leaf(leaf);
}
static inline void set_cache_basics(struct cache_desc *cdesc, unsigned int leaf)
{
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config;
config = read_cpucfg(LOONGARCH_CPUCFG17);
lsize = 1 << ((config & CPUCFG17_L1I_SIZE_M) >> CPUCFG17_L1I_SIZE);
sets = 1 << ((config & CPUCFG17_L1I_SETS_M) >> CPUCFG17_L1I_SETS);
ways = ((config & CPUCFG17_L1I_WAYS_M) >> CPUCFG17_L1I_WAYS) + 1;
config = read_cpucfg(LOONGARCH_CPUCFG17 + leaf);
cdesc->linesz = 1 << ((config & CACHE_LSIZE_M) >> CACHE_LSIZE);
cdesc->sets = 1 << ((config & CACHE_SETS_M) >> CACHE_SETS);
cdesc->ways = ((config & CACHE_WAYS_M) >> CACHE_WAYS) + 1;
}
c->icache.linesz = lsize;
c->icache.sets = sets;
c->icache.ways = ways;
icache_size = sets * ways * lsize;
c->icache.waysize = icache_size / c->icache.ways;
#define populate_cache_properties(conifg, cdesc, level, leaf) \
{ \
if (level == 1) { \
cdesc->flags |= CACHE_PRIVATE; \
} else { \
if (config & IUPRIV) \
cdesc->flags |= CACHE_PRIVATE; \
if (config & IUINCL) \
cdesc->flags |= CACHE_INCLUSIVE; \
} \
cdesc->flags |= CACHE_PRESENT; \
cdesc->level = level; \
set_cache_basics(cdesc, leaf); \
cdesc++; \
leaf++; \
}
config = read_cpucfg(LOONGARCH_CPUCFG18);
lsize = 1 << ((config & CPUCFG18_L1D_SIZE_M) >> CPUCFG18_L1D_SIZE);
sets = 1 << ((config & CPUCFG18_L1D_SETS_M) >> CPUCFG18_L1D_SETS);
ways = ((config & CPUCFG18_L1D_WAYS_M) >> CPUCFG18_L1D_WAYS) + 1;
/*
* Each level cache occupies 7bits in order in CPUCFG16
* except level 1 cache with bit0~2.
*
*/
static void probe_cache_hierarchy(void)
{
struct cache_desc *cdesc = current_cpu_data.cache_leaves;
unsigned int leaf = 0, level;
unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
c->dcache.linesz = lsize;
c->dcache.sets = sets;
c->dcache.ways = ways;
dcache_size = sets * ways * lsize;
c->dcache.waysize = dcache_size / c->dcache.ways;
#define IUPRE (1 << 0)
#define IUUNIFY (1 << 1)
#define IUPRIV (1 << 2)
#define IUINCL (1 << 3)
#define DPRE (1 << 4)
#define DPRIV (1 << 5)
#define DINCL (1 << 6)
c->options |= LOONGARCH_CPU_PREFETCH;
#define L1DPRE (1 << 2)
pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10, way_string[c->icache.ways], "VIPT", c->icache.linesz);
for (level = 1; level <= CACHE_LEVEL_MAX; level++) {
if (config & IUPRE) {
if (config & IUUNIFY)
cdesc->type = CACHE_TYPE_UNIFIED;
else
cdesc->type = CACHE_TYPE_INST;
pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
dcache_size >> 10, way_string[c->dcache.ways], "VIPT", "no aliases", c->dcache.linesz);
}
populate_cache_properties(config, cdesc, level, leaf);
}
static void probe_vcache(void)
{
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config;
if ((level == 1 && (config & L1DPRE)) ||
(level != 1 && (config & DPRE))) {
cdesc->type = CACHE_TYPE_DATA;
config = read_cpucfg(LOONGARCH_CPUCFG19);
lsize = 1 << ((config & CPUCFG19_L2_SIZE_M) >> CPUCFG19_L2_SIZE);
sets = 1 << ((config & CPUCFG19_L2_SETS_M) >> CPUCFG19_L2_SETS);
ways = ((config & CPUCFG19_L2_WAYS_M) >> CPUCFG19_L2_WAYS) + 1;
populate_cache_properties(config, cdesc, level, leaf);
}
c->vcache.linesz = lsize;
c->vcache.sets = sets;
c->vcache.ways = ways;
vcache_size = lsize * sets * ways;
c->vcache.waysize = vcache_size / c->vcache.ways;
if (level == 1)
config = config >> 3;
else
config = config >> 7;
pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
}
if (!config)
break;
static void probe_scache(void)
{
struct cpuinfo_loongarch *c = &current_cpu_data;
unsigned int lsize, sets, ways;
unsigned int config;
}
config = read_cpucfg(LOONGARCH_CPUCFG20);
lsize = 1 << ((config & CPUCFG20_L3_SIZE_M) >> CPUCFG20_L3_SIZE);
sets = 1 << ((config & CPUCFG20_L3_SETS_M) >> CPUCFG20_L3_SETS);
ways = ((config & CPUCFG20_L3_WAYS_M) >> CPUCFG20_L3_WAYS) + 1;
if (leaf > 0)
current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
c->scache.linesz = lsize;
c->scache.sets = sets;
c->scache.ways = ways;
/* 4 cores. scaches are shared */
scache_size = lsize * sets * ways;
c->scache.waysize = scache_size / c->scache.ways;
BUG_ON(leaf > CACHE_LEAVES_MAX);
pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
current_cpu_data.cache_leaves_present = leaf;
}
void cpu_cache_init(void)
{
probe_pcache();
probe_vcache();
probe_scache();
probe_cache_hierarchy();
shm_align_mask = PAGE_SIZE - 1;
}
......@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <asm/loongson.h>
#include <asm/cacheflush.h>
#define PCI_DEVICE_ID_LOONGSON_HOST 0x7a00
#define PCI_DEVICE_ID_LOONGSON_DC1 0x7a06
......@@ -45,12 +46,10 @@ static int __init pcibios_init(void)
unsigned int lsize;
/*
* Set PCI cacheline size to that of the highest level in the
* Set PCI cacheline size to that of the last level in the
* cache hierarchy.
*/
lsize = cpu_dcache_line_size();
lsize = cpu_vcache_line_size() ? : lsize;
lsize = cpu_scache_line_size() ? : lsize;
lsize = cpu_last_level_cache_line_size();
BUG_ON(!lsize);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册