Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
94ecd224
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
94ecd224
编写于
8月 16, 2009
作者:
P
Paul Mundt
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
sh: Fix up the SH-5 build with caches enabled.
Signed-off-by:
N
Paul Mundt
<
lethal@linux-sh.org
>
上级
1ee4ab09
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
64 addition
and
303 deletion
+64
-303
arch/sh/include/asm/system.h
arch/sh/include/asm/system.h
+1
-13
arch/sh/include/asm/system_32.h
arch/sh/include/asm/system_32.h
+10
-0
arch/sh/include/asm/system_64.h
arch/sh/include/asm/system_64.h
+5
-0
arch/sh/kernel/sh_ksyms_64.c
arch/sh/kernel/sh_ksyms_64.c
+0
-8
arch/sh/mm/cache-sh5.c
arch/sh/mm/cache-sh5.c
+21
-228
arch/sh/mm/flush-sh4.c
arch/sh/mm/flush-sh4.c
+27
-54
未找到文件。
arch/sh/include/asm/system.h
浏览文件 @
94ecd224
...
...
@@ -14,18 +14,6 @@
#define AT_VECTOR_SIZE_ARCH 5
/* entries in ARCH_DLINFO */
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#define __icbi() \
{ \
unsigned long __addr; \
__addr = 0xa8000000; \
__asm__ __volatile__( \
"icbi %0\n\t" \
:
/* no output */
\
: "m" (__m(__addr))); \
}
#endif
/*
* A brief note on ctrl_barrier(), the control register write barrier.
*
...
...
@@ -44,7 +32,7 @@
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("synco": : :"memory")
#define ctrl_barrier() __icbi()
#define ctrl_barrier() __icbi(
0xa8000000
)
#define read_barrier_depends() do { } while(0)
#else
#define mb() __asm__ __volatile__ ("": : :"memory")
...
...
arch/sh/include/asm/system_32.h
浏览文件 @
94ecd224
...
...
@@ -63,6 +63,16 @@ do { \
#define __restore_dsp(tsk) do { } while (0)
#endif
#if defined(CONFIG_CPU_SH4A)
#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
#else
#define __icbi(addr) mb()
#endif
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
struct
task_struct
*
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
);
...
...
arch/sh/include/asm/system_64.h
浏览文件 @
94ecd224
...
...
@@ -37,6 +37,11 @@ do { \
#define jump_to_uncached() do { } while (0)
#define back_to_cached() do { } while (0)
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static
inline
reg_size_t
register_align
(
void
*
val
)
{
return
(
unsigned
long
long
)(
signed
long
long
)(
signed
long
)
val
;
...
...
arch/sh/kernel/sh_ksyms_64.c
浏览文件 @
94ecd224
...
...
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
EXPORT_SYMBOL
(
dump_fpu
);
EXPORT_SYMBOL
(
kernel_thread
);
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU)
EXPORT_SYMBOL
(
clear_user_page
);
#endif
#ifndef CONFIG_CACHE_OFF
EXPORT_SYMBOL
(
flush_dcache_page
);
#endif
#ifdef CONFIG_VT
EXPORT_SYMBOL
(
screen_info
);
#endif
...
...
arch/sh/mm/cache-sh5.c
浏览文件 @
94ecd224
...
...
@@ -25,29 +25,6 @@ extern void __weak sh4__flush_region_init(void);
/* Wired TLB entry for the D-cache */
static
unsigned
long
long
dtlb_cache_slot
;
void
__init
cpu_cache_init
(
void
)
{
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot
=
sh64_get_wired_dtlb_entry
();
sh4__flush_region_init
();
}
void
__init
kmap_coherent_init
(
void
)
{
/* XXX ... */
}
void
*
kmap_coherent
(
struct
page
*
page
,
unsigned
long
addr
)
{
/* XXX ... */
return
NULL
;
}
void
kunmap_coherent
(
void
)
{
}
#ifdef CONFIG_DCACHE_DISABLED
#define sh64_dcache_purge_all() do { } while (0)
#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
...
...
@@ -233,52 +210,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
}
}
/*
* Invalidate a small range of user context I-cache, not necessarily page
* (or even cache-line) aligned.
*
* Since this is used inside ptrace, the ASID in the mm context typically
* won't match current_asid. We'll have to switch ASID to do this. For
* safety, and given that the range will be small, do all this under cli.
*
* Note, there is a hazard that the ASID in mm->context is no longer
* actually associated with mm, i.e. if the mm->context has started a new
* cycle since mm was last active. However, this is just a performance
* issue: all that happens is that we invalidate lines belonging to
* another mm, so the owning process has to refill them when that mm goes
* live again. mm itself can't have any cache entries because there will
* have been a flush_cache_all when the new mm->context cycle started.
*/
static
void
sh64_icache_inv_user_small_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
int
len
)
{
unsigned
long
long
eaddr
=
start
;
unsigned
long
long
eaddr_end
=
start
+
len
;
unsigned
long
current_asid
,
mm_asid
;
unsigned
long
flags
;
unsigned
long
long
epage_start
;
/*
* Align to start of cache line. Otherwise, suppose len==8 and
* start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
*/
eaddr
=
L1_CACHE_ALIGN
(
start
);
eaddr_end
=
start
+
len
;
mm_asid
=
cpu_asid
(
smp_processor_id
(),
mm
);
local_irq_save
(
flags
);
current_asid
=
switch_and_save_asid
(
mm_asid
);
epage_start
=
eaddr
&
PAGE_MASK
;
while
(
eaddr
<
eaddr_end
)
{
__asm__
__volatile__
(
"icbi %0, 0"
:
:
"r"
(
eaddr
));
eaddr
+=
L1_CACHE_BYTES
;
}
switch_and_save_asid
(
current_asid
);
local_irq_restore
(
flags
);
}
static
void
sh64_icache_inv_current_user_range
(
unsigned
long
start
,
unsigned
long
end
)
{
/* The icbi instruction never raises ITLBMISS. i.e. if there's not a
...
...
@@ -564,7 +495,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
* Invalidate the entire contents of both caches, after writing back to
* memory any dirty data from the D-cache.
*/
void
flush_cache_all
(
void
)
static
void
sh5_
flush_cache_all
(
void
)
{
sh64_dcache_purge_all
();
sh64_icache_inv_all
();
...
...
@@ -591,7 +522,7 @@ void flush_cache_all(void)
* I-cache. This is similar to the lack of action needed in
* flush_tlb_mm - see fault.c.
*/
void
flush_cache_mm
(
struct
mm_struct
*
mm
)
static
void
sh5_
flush_cache_mm
(
struct
mm_struct
*
mm
)
{
sh64_dcache_purge_all
();
}
...
...
@@ -603,8 +534,8 @@ void flush_cache_mm(struct mm_struct *mm)
*
* Note, 'end' is 1 byte beyond the end of the range to flush.
*/
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
static
void
sh5_flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
...
...
@@ -621,8 +552,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
*
* Note, this is called with pte lock held.
*/
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
eaddr
,
unsigned
long
pfn
)
static
void
sh5_flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
eaddr
,
unsigned
long
pfn
)
{
sh64_dcache_purge_phy_page
(
pfn
<<
PAGE_SHIFT
);
...
...
@@ -630,7 +561,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
sh64_icache_inv_user_page
(
vma
,
eaddr
);
}
void
flush_dcache_page
(
struct
page
*
page
)
static
void
sh5_
flush_dcache_page
(
struct
page
*
page
)
{
sh64_dcache_purge_phy_page
(
page_to_phys
(
page
));
wmb
();
...
...
@@ -644,39 +575,20 @@ void flush_dcache_page(struct page *page)
* mapping, therefore it's guaranteed that there no cache entries for
* the range in cache sets of the wrong colour.
*/
void
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
)
static
void
sh5_
flush_icache_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__flush_purge_region
((
void
*
)
start
,
end
);
wmb
();
sh64_icache_inv_kernel_range
(
start
,
end
);
}
/*
* Flush the range of user (defined by vma->vm_mm) address space starting
* at 'addr' for 'len' bytes from the cache. The range does not straddle
* a page boundary, the unique physical page containing the range is
* 'page'. This seems to be used mainly for invalidating an address
* range following a poke into the program text through the ptrace() call
* from another process (e.g. for BRK instruction insertion).
*/
static
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
)
{
sh64_dcache_purge_coloured_phy_page
(
page_to_phys
(
page
),
addr
);
mb
();
if
(
vma
->
vm_flags
&
VM_EXEC
)
sh64_icache_inv_user_small_range
(
vma
->
vm_mm
,
addr
,
len
);
}
/*
* For the address range [start,end), write back the data from the
* D-cache and invalidate the corresponding region of the I-cache for the
* current process. Used to flush signal trampolines on the stack to
* make them executable.
*/
void
flush_cache_sigtramp
(
unsigned
long
vaddr
)
static
void
sh5_
flush_cache_sigtramp
(
unsigned
long
vaddr
)
{
unsigned
long
end
=
vaddr
+
L1_CACHE_BYTES
;
...
...
@@ -685,138 +597,19 @@ void flush_cache_sigtramp(unsigned long vaddr)
sh64_icache_inv_current_user_range
(
vaddr
,
end
);
}
#ifdef CONFIG_MMU
/*
* These *MUST* lie in an area of virtual address space that's otherwise
* unused.
*/
#define UNIQUE_EADDR_START 0xe0000000UL
#define UNIQUE_EADDR_END 0xe8000000UL
/*
* Given a physical address paddr, and a user virtual address user_eaddr
* which will eventually be mapped to it, create a one-off kernel-private
* eaddr mapped to the same paddr. This is used for creating special
* destination pages for copy_user_page and clear_user_page.
*/
static
unsigned
long
sh64_make_unique_eaddr
(
unsigned
long
user_eaddr
,
unsigned
long
paddr
)
{
static
unsigned
long
current_pointer
=
UNIQUE_EADDR_START
;
unsigned
long
coloured_pointer
;
if
(
current_pointer
==
UNIQUE_EADDR_END
)
{
sh64_dcache_purge_all
();
current_pointer
=
UNIQUE_EADDR_START
;
}
coloured_pointer
=
(
current_pointer
&
~
CACHE_OC_SYN_MASK
)
|
(
user_eaddr
&
CACHE_OC_SYN_MASK
);
sh64_setup_dtlb_cache_slot
(
coloured_pointer
,
get_asid
(),
paddr
);
current_pointer
+=
(
PAGE_SIZE
<<
CACHE_OC_N_SYNBITS
);
return
coloured_pointer
;
}
static
void
sh64_copy_user_page_coloured
(
void
*
to
,
void
*
from
,
unsigned
long
address
)
{
void
*
coloured_to
;
/*
* Discard any existing cache entries of the wrong colour. These are
* present quite often, if the kernel has recently used the page
* internally, then given it up, then it's been allocated to the user.
*/
sh64_dcache_purge_coloured_phy_page
(
__pa
(
to
),
(
unsigned
long
)
to
);
coloured_to
=
(
void
*
)
sh64_make_unique_eaddr
(
address
,
__pa
(
to
));
copy_page
(
from
,
coloured_to
);
sh64_teardown_dtlb_cache_slot
();
}
static
void
sh64_clear_user_page_coloured
(
void
*
to
,
unsigned
long
address
)
{
void
*
coloured_to
;
/*
* Discard any existing kernel-originated lines of the wrong
* colour (as above)
*/
sh64_dcache_purge_coloured_phy_page
(
__pa
(
to
),
(
unsigned
long
)
to
);
coloured_to
=
(
void
*
)
sh64_make_unique_eaddr
(
address
,
__pa
(
to
));
clear_page
(
coloured_to
);
sh64_teardown_dtlb_cache_slot
();
}
/*
* 'from' and 'to' are kernel virtual addresses (within the superpage
* mapping of the physical RAM). 'address' is the user virtual address
* where the copy 'to' will be mapped after. This allows a custom
* mapping to be used to ensure that the new copy is placed in the
* right cache sets for the user to see it without having to bounce it
* out via memory. Note however : the call to flush_page_to_ram in
* (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
* very important case!
*
* TBD : can we guarantee that on every call, any cache entries for
* 'from' are in the same colour sets as 'address' also? i.e. is this
* always used just to deal with COW? (I suspect not).
*
* There are two possibilities here for when the page 'from' was last accessed:
* - by the kernel : this is OK, no purge required.
* - by the/a user (e.g. for break_COW) : need to purge.
*
* If the potential user mapping at 'address' is the same colour as
* 'from' there is no need to purge any cache lines from the 'from'
* page mapped into cache sets of colour 'address'. (The copy will be
* accessing the page through 'from').
*/
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
address
,
struct
page
*
page
)
void
__init
sh5_cache_init
(
void
)
{
if
(((
address
^
(
unsigned
long
)
from
)
&
CACHE_OC_SYN_MASK
)
!=
0
)
sh64_dcache_purge_coloured_phy_page
(
__pa
(
from
),
address
);
flush_cache_all
=
sh5_flush_cache_all
;
flush_cache_mm
=
sh5_flush_cache_mm
;
flush_cache_dup_mm
=
sh5_flush_cache_mm
;
flush_cache_page
=
sh5_flush_cache_page
;
flush_cache_range
=
sh5_flush_cache_range
;
flush_dcache_page
=
sh5_flush_dcache_page
;
flush_icache_range
=
sh5_flush_icache_range
;
flush_cache_sigtramp
=
sh5_flush_cache_sigtramp
;
if
(((
address
^
(
unsigned
long
)
to
)
&
CACHE_OC_SYN_MASK
)
==
0
)
copy_page
(
to
,
from
);
else
sh64_copy_user_page_coloured
(
to
,
from
,
address
);
}
/*
* 'to' is a kernel virtual address (within the superpage mapping of the
* physical RAM). 'address' is the user virtual address where the 'to'
* page will be mapped after. This allows a custom mapping to be used to
* ensure that the new copy is placed in the right cache sets for the
* user to see it without having to bounce it out via memory.
*/
void
clear_user_page
(
void
*
to
,
unsigned
long
address
,
struct
page
*
page
)
{
if
(((
address
^
(
unsigned
long
)
to
)
&
CACHE_OC_SYN_MASK
)
==
0
)
clear_page
(
to
);
else
sh64_clear_user_page_coloured
(
to
,
address
);
}
void
copy_to_user_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
vaddr
,
void
*
dst
,
const
void
*
src
,
unsigned
long
len
)
{
flush_cache_page
(
vma
,
vaddr
,
page_to_pfn
(
page
));
memcpy
(
dst
,
src
,
len
);
flush_icache_user_range
(
vma
,
page
,
vaddr
,
len
);
}
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot
=
sh64_get_wired_dtlb_entry
();
void
copy_from_user_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
vaddr
,
void
*
dst
,
const
void
*
src
,
unsigned
long
len
)
{
flush_cache_page
(
vma
,
vaddr
,
page_to_pfn
(
page
));
memcpy
(
dst
,
src
,
len
);
sh4__flush_region_init
();
}
#endif
arch/sh/mm/flush-sh4.c
浏览文件 @
94ecd224
...
...
@@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
}
while
(
cnt
)
{
asm
volatile
(
"ocbwb @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbwb
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
--
;
}
}
...
...
@@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
}
while
(
cnt
)
{
asm
volatile
(
"ocbp @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbp
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
--
;
}
}
...
...
@@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size)
cnt
=
(
end
-
v
)
/
L1_CACHE_BYTES
;
while
(
cnt
>=
8
)
{
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
-=
8
;
}
while
(
cnt
)
{
asm
volatile
(
"ocbi @%0"
:
:
"r"
(
v
));
v
+=
L1_CACHE_BYTES
;
__ocbi
(
v
);
v
+=
L1_CACHE_BYTES
;
cnt
--
;
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录