提交 d8e5f554 编写于 作者: M Minkyu Kang 提交者: Tom Rix

s5pc1xx: update cache routines

Because of v7_flush_dcache_all is moved to omap3/cache.S
and s5pc110 needs cache routines, update s5pc1xx cache routines.

l2_cache_enable and l2_caceh_disable are moved from cache.c to cache.S
and invalidate_dcache is modified for SoC specific.
Signed-off-by: NMinkyu Kang <mk7.kang@samsung.com>
上级 17ef9104
......@@ -28,9 +28,9 @@ include $(TOPDIR)/config.mk
LIB = $(obj)lib$(SOC).a
SOBJS = reset.o
SOBJS = cache.o
SOBJS += reset.o
COBJS += cache.o
COBJS += clock.o
COBJS += cpu_info.o
COBJS += timer.o
......
/*
* Copyright (C) 2009 Samsung Electronics
* Minkyu Kang <mk7.kang@samsung.com>
*
* based on cpu/arm_cortexa8/omap3/cache.S
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <asm/arch/cpu.h>
.align 5
.global invalidate_dcache
.global l2_cache_enable
.global l2_cache_disable
/*
* invalidate_dcache()
* Invalidate the whole D-cache.
*
* Corrupted registers: r0-r5, r7, r9-r11
*/
invalidate_dcache:
stmfd r13!, {r0 - r5, r7, r9 - r12, r14}
cmp r0, #0xC100 @ check if the cpu is s5pc100
beq finished_inval @ s5pc100 doesn't need this
@ routine
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished_inval @ if loc is 0, then no need to
@ clean
mov r10, #0 @ start clean at cache level 0
inval_loop1:
add r2, r10, r10, lsr #1 @ work out 3x current cache
@ level
mov r1, r0, lsr r2 @ extract cache type bits from
@ clidr
and r1, r1, #7 @ mask of the bits for current
@ cache only
cmp r1, #2 @ see what cache we have at
@ this level
blt skip_inval @ skip if no cache, or just
@ i-cache
mcr p15, 2, r10, c0, c0, 0 @ select current cache level
@ in cssr
mov r2, #0 @ operand for mcr SBZ
mcr p15, 0, r2, c7, c5, 4 @ flush prefetch buffer to
@ sych the new cssr&csidr,
@ with armv7 this is 'isb',
@ but we compile with armv5
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
and r2, r1, #7 @ extract the length of the
@ cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the
@ way size
clz r5, r4 @ find bit position of way
@ size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the
@ index size
inval_loop2:
mov r9, r4 @ create working copy of max
@ way size
inval_loop3:
orr r11, r10, r9, lsl r5 @ factor way and cache number
@ into r11
orr r11, r11, r7, lsl r2 @ factor index number into r11
mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
subs r9, r9, #1 @ decrement the way
bge inval_loop3
subs r7, r7, #1 @ decrement the index
bge inval_loop2
skip_inval:
add r10, r10, #2 @ increment cache number
cmp r3, r10
bgt inval_loop1
finished_inval:
mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level
@ in cssr
mcr p15, 0, r10, c7, c5, 4 @ flush prefetch buffer,
@ with armv7 this is 'isb',
@ but we compile with armv5
ldmfd r13!, {r0 - r5, r7, r9 - r12, pc}
l2_cache_enable:
push {r0, r1, r2, lr}
mrc 15, 0, r3, cr1, cr0, 1
orr r3, r3, #2
mcr 15, 0, r3, cr1, cr0, 1
pop {r1, r2, r3, pc}
l2_cache_disable:
push {r0, r1, r2, lr}
mrc 15, 0, r3, cr1, cr0, 1
bic r3, r3, #2
mcr 15, 0, r3, cr1, cr0, 1
pop {r1, r2, r3, pc}
/*
* Copyright (C) 2009 Samsung Electronics
* Copyright (C) 2009 Samsung Electrnoics
* Minkyu Kang <mk7.kang@samsung.com>
*
* See file CREDITS for list of people who contributed to this
......@@ -21,23 +21,12 @@
* MA 02111-1307 USA
*/
#include <common.h>
#include <asm/cache.h>
#ifndef _SYS_PROTO_H_
#define _SYS_PROTO_H_
void l2_cache_enable(void)
{
unsigned long i;
u32 get_device_type(void);
void invalidate_dcache(u32);
void l2_cache_disable(void);
void l2_cache_enable(void);
__asm__ __volatile__("mrc p15, 0, %0, c1, c0, 1":"=r"(i));
__asm__ __volatile__("orr %0, %0, #0x2":"=r"(i));
__asm__ __volatile__("mcr p15, 0, %0, c1, c0, 1":"=r"(i));
}
void l2_cache_disable(void)
{
unsigned long i;
__asm__ __volatile__("mrc p15, 0, %0, c1, c0, 1":"=r"(i));
__asm__ __volatile__("bic %0, %0, #0x2":"=r"(i));
__asm__ __volatile__("mcr p15, 0, %0, c1, c0, 1":"=r"(i));
}
#endif
......@@ -47,8 +47,6 @@
#undef CONFIG_SKIP_RELOCATE_UBOOT
#define CONFIG_L2_OFF
/* input clock of PLL: SMDKC100 has 12MHz input clock */
#define CONFIG_SYS_CLK_FREQ 12000000
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册