Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
6665398a
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
6665398a
编写于
12月 17, 2009
作者:
R
Russell King
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'cache' (early part)
上级
c0caac93
bf32eb85
变更
29
隐藏空白更改
内联
并排
Showing
29 changed file
with
224 addition
and
155 deletion
+224
-155
arch/arm/common/dmabounce.c
arch/arm/common/dmabounce.c
+4
-8
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/cacheflush.h
+5
-12
arch/arm/mm/cache-fa.S
arch/arm/mm/cache-fa.S
+6
-5
arch/arm/mm/cache-l2x0.c
arch/arm/mm/cache-l2x0.c
+72
-21
arch/arm/mm/cache-v3.S
arch/arm/mm/cache-v3.S
+5
-4
arch/arm/mm/cache-v4.S
arch/arm/mm/cache-v4.S
+5
-4
arch/arm/mm/cache-v4wb.S
arch/arm/mm/cache-v4wb.S
+6
-5
arch/arm/mm/cache-v4wt.S
arch/arm/mm/cache-v4wt.S
+6
-5
arch/arm/mm/cache-v6.S
arch/arm/mm/cache-v6.S
+6
-5
arch/arm/mm/cache-v7.S
arch/arm/mm/cache-v7.S
+7
-6
arch/arm/mm/flush.c
arch/arm/mm/flush.c
+2
-2
arch/arm/mm/highmem.c
arch/arm/mm/highmem.c
+1
-1
arch/arm/mm/nommu.c
arch/arm/mm/nommu.c
+1
-1
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020.S
+6
-5
arch/arm/mm/proc-arm1020e.S
arch/arm/mm/proc-arm1020e.S
+6
-5
arch/arm/mm/proc-arm1022.S
arch/arm/mm/proc-arm1022.S
+6
-5
arch/arm/mm/proc-arm1026.S
arch/arm/mm/proc-arm1026.S
+6
-5
arch/arm/mm/proc-arm920.S
arch/arm/mm/proc-arm920.S
+6
-5
arch/arm/mm/proc-arm922.S
arch/arm/mm/proc-arm922.S
+6
-5
arch/arm/mm/proc-arm925.S
arch/arm/mm/proc-arm925.S
+6
-5
arch/arm/mm/proc-arm926.S
arch/arm/mm/proc-arm926.S
+6
-5
arch/arm/mm/proc-arm940.S
arch/arm/mm/proc-arm940.S
+5
-4
arch/arm/mm/proc-arm946.S
arch/arm/mm/proc-arm946.S
+6
-5
arch/arm/mm/proc-feroceon.S
arch/arm/mm/proc-feroceon.S
+8
-7
arch/arm/mm/proc-mohawk.S
arch/arm/mm/proc-mohawk.S
+6
-5
arch/arm/mm/proc-syms.c
arch/arm/mm/proc-syms.c
+1
-2
arch/arm/mm/proc-xsc3.S
arch/arm/mm/proc-xsc3.S
+6
-5
arch/arm/mm/proc-xscale.S
arch/arm/mm/proc-xscale.S
+7
-6
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/maps/pxa2xx-flash.c
+11
-2
未找到文件。
arch/arm/common/dmabounce.c
浏览文件 @
6665398a
...
...
@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy
(
ptr
,
buf
->
safe
,
size
);
/*
* DMA buffers must have the same cache properties
* as if they were really used for DMA - which means
* data must be written back to RAM. Note that
* we don't use dmac_flush_range() here for the
* bidirectional case because we know the cache
* lines will be coherent with the data written.
* Since we may have written to a page cache page,
* we need to ensure that the data will be coherent
* with user mappings.
*/
dmac_clean_range
(
ptr
,
ptr
+
size
);
outer_clean_range
(
__pa
(
ptr
),
__pa
(
ptr
)
+
size
);
__cpuc_flush_kernel_dcache_area
(
ptr
,
size
);
}
free_safe_buffer
(
dev
->
archdata
.
dmabounce
,
buf
);
}
...
...
arch/arm/include/asm/cacheflush.h
浏览文件 @
6665398a
...
...
@@ -211,7 +211,7 @@ struct cpu_cache_fns {
void
(
*
coherent_kern_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
coherent_user_range
)(
unsigned
long
,
unsigned
long
);
void
(
*
flush_kern_dcache_
page
)(
void
*
);
void
(
*
flush_kern_dcache_
area
)(
void
*
,
size_t
);
void
(
*
dma_inv_range
)(
const
void
*
,
const
void
*
);
void
(
*
dma_clean_range
)(
const
void
*
,
const
void
*
);
...
...
@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_
page cpu_cache.flush_kern_dcache_page
#define __cpuc_flush_dcache_
area cpu_cache.flush_kern_dcache_area
/*
* These are private to the dma-mapping API. Do not use directly.
...
...
@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_
page __glue(_CACHE,_flush_kern_dcache_page
)
#define __cpuc_flush_dcache_
area __glue(_CACHE,_flush_kern_dcache_area
)
extern
void
__cpuc_flush_kern_all
(
void
);
extern
void
__cpuc_flush_user_all
(
void
);
extern
void
__cpuc_flush_user_range
(
unsigned
long
,
unsigned
long
,
unsigned
int
);
extern
void
__cpuc_coherent_kern_range
(
unsigned
long
,
unsigned
long
);
extern
void
__cpuc_coherent_user_range
(
unsigned
long
,
unsigned
long
);
extern
void
__cpuc_flush_dcache_
page
(
void
*
);
extern
void
__cpuc_flush_dcache_
area
(
void
*
,
size_t
);
/*
* These are private to the dma-mapping API. Do not use directly.
...
...
@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{
/* highmem pages are always flushed upon kunmap already */
if
((
cache_is_vivt
()
||
cache_is_vipt_aliasing
())
&&
!
PageHighMem
(
page
))
__cpuc_flush_dcache_
page
(
page_address
(
page
)
);
__cpuc_flush_dcache_
area
(
page_address
(
page
),
PAGE_SIZE
);
}
#define flush_dcache_mmap_lock(mapping) \
...
...
@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
*/
#define flush_icache_page(vma,page) do { } while (0)
static
inline
void
flush_ioremap_region
(
unsigned
long
phys
,
void
__iomem
*
virt
,
unsigned
offset
,
size_t
size
)
{
const
void
*
start
=
(
void
__force
*
)
virt
+
offset
;
dmac_inv_range
(
start
,
start
+
size
);
}
/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
...
...
arch/arm/mm/cache-fa.S
浏览文件 @
6665398a
...
...
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
kaddr
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
size
)
*
*
Ensure
that
the
data
held
in
the
page
kaddr
is
written
back
*
to
the
page
in
question
.
*
*
-
kaddr
-
kernel
address
(
guaranteed
to
be
page
aligned
)
*
-
addr
-
kernel
address
*
-
size
-
size
of
region
*/
ENTRY
(
fa_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
fa_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
&
invalidate
D
line
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
.
long
fa_flush_user_cache_range
.
long
fa_coherent_kern_range
.
long
fa_coherent_user_range
.
long
fa_flush_kern_dcache_
page
.
long
fa_flush_kern_dcache_
area
.
long
fa_dma_inv_range
.
long
fa_dma_clean_range
.
long
fa_dma_flush_range
...
...
arch/arm/mm/cache-l2x0.c
浏览文件 @
6665398a
...
...
@@ -28,69 +28,120 @@
static
void
__iomem
*
l2x0_base
;
static
DEFINE_SPINLOCK
(
l2x0_lock
);
static
inline
void
sync_writel
(
unsigned
long
val
,
unsigned
long
reg
,
unsigned
long
complete_mask
)
static
inline
void
cache_wait
(
void
__iomem
*
reg
,
unsigned
long
mask
)
{
unsigned
long
flags
;
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
writel
(
val
,
l2x0_base
+
reg
);
/* wait for the operation to complete */
while
(
readl
(
l2x0_base
+
reg
)
&
complete_
mask
)
while
(
readl
(
reg
)
&
mask
)
;
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
}
static
inline
void
cache_sync
(
void
)
{
sync_writel
(
0
,
L2X0_CACHE_SYNC
,
1
);
void
__iomem
*
base
=
l2x0_base
;
writel
(
0
,
base
+
L2X0_CACHE_SYNC
);
cache_wait
(
base
+
L2X0_CACHE_SYNC
,
1
);
}
static
inline
void
l2x0_inv_all
(
void
)
{
unsigned
long
flags
;
/* invalidate all ways */
sync_writel
(
0xff
,
L2X0_INV_WAY
,
0xff
);
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
writel
(
0xff
,
l2x0_base
+
L2X0_INV_WAY
);
cache_wait
(
l2x0_base
+
L2X0_INV_WAY
,
0xff
);
cache_sync
();
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
}
static
void
l2x0_inv_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
addr
;
void
__iomem
*
base
=
l2x0_base
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
if
(
start
&
(
CACHE_LINE_SIZE
-
1
))
{
start
&=
~
(
CACHE_LINE_SIZE
-
1
);
sync_writel
(
start
,
L2X0_CLEAN_INV_LINE_PA
,
1
);
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
start
+=
CACHE_LINE_SIZE
;
}
if
(
end
&
(
CACHE_LINE_SIZE
-
1
))
{
end
&=
~
(
CACHE_LINE_SIZE
-
1
);
sync_writel
(
end
,
L2X0_CLEAN_INV_LINE_PA
,
1
);
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
end
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
}
for
(
addr
=
start
;
addr
<
end
;
addr
+=
CACHE_LINE_SIZE
)
sync_writel
(
addr
,
L2X0_INV_LINE_PA
,
1
);
while
(
start
<
end
)
{
unsigned
long
blk_end
=
start
+
min
(
end
-
start
,
4096UL
);
while
(
start
<
blk_end
)
{
cache_wait
(
base
+
L2X0_INV_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_INV_LINE_PA
);
start
+=
CACHE_LINE_SIZE
;
}
if
(
blk_end
<
end
)
{
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
}
}
cache_wait
(
base
+
L2X0_INV_LINE_PA
,
1
);
cache_sync
();
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
}
static
void
l2x0_clean_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
addr
;
void
__iomem
*
base
=
l2x0_base
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
start
&=
~
(
CACHE_LINE_SIZE
-
1
);
for
(
addr
=
start
;
addr
<
end
;
addr
+=
CACHE_LINE_SIZE
)
sync_writel
(
addr
,
L2X0_CLEAN_LINE_PA
,
1
);
while
(
start
<
end
)
{
unsigned
long
blk_end
=
start
+
min
(
end
-
start
,
4096UL
);
while
(
start
<
blk_end
)
{
cache_wait
(
base
+
L2X0_CLEAN_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_CLEAN_LINE_PA
);
start
+=
CACHE_LINE_SIZE
;
}
if
(
blk_end
<
end
)
{
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
}
}
cache_wait
(
base
+
L2X0_CLEAN_LINE_PA
,
1
);
cache_sync
();
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
}
static
void
l2x0_flush_range
(
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
addr
;
void
__iomem
*
base
=
l2x0_base
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
start
&=
~
(
CACHE_LINE_SIZE
-
1
);
for
(
addr
=
start
;
addr
<
end
;
addr
+=
CACHE_LINE_SIZE
)
sync_writel
(
addr
,
L2X0_CLEAN_INV_LINE_PA
,
1
);
while
(
start
<
end
)
{
unsigned
long
blk_end
=
start
+
min
(
end
-
start
,
4096UL
);
while
(
start
<
blk_end
)
{
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
writel
(
start
,
base
+
L2X0_CLEAN_INV_LINE_PA
);
start
+=
CACHE_LINE_SIZE
;
}
if
(
blk_end
<
end
)
{
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
spin_lock_irqsave
(
&
l2x0_lock
,
flags
);
}
}
cache_wait
(
base
+
L2X0_CLEAN_INV_LINE_PA
,
1
);
cache_sync
();
spin_unlock_irqrestore
(
&
l2x0_lock
,
flags
);
}
void
__init
l2x0_init
(
void
__iomem
*
base
,
__u32
aux_val
,
__u32
aux_mask
)
...
...
arch/arm/mm/cache-v3.S
浏览文件 @
6665398a
...
...
@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
page
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
v3_flush_kern_dcache_
page
)
ENTRY
(
v3_flush_kern_dcache_
area
)
/
*
FALLTHROUGH
*/
/*
...
...
@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
.
long
v3_flush_user_cache_range
.
long
v3_coherent_kern_range
.
long
v3_coherent_user_range
.
long
v3_flush_kern_dcache_
page
.
long
v3_flush_kern_dcache_
area
.
long
v3_dma_inv_range
.
long
v3_dma_clean_range
.
long
v3_dma_flush_range
...
...
arch/arm/mm/cache-v4.S
浏览文件 @
6665398a
...
...
@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
v4_flush_kern_dcache_
page
)
ENTRY
(
v4_flush_kern_dcache_
area
)
/
*
FALLTHROUGH
*/
/*
...
...
@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
.
long
v4_flush_user_cache_range
.
long
v4_coherent_kern_range
.
long
v4_coherent_user_range
.
long
v4_flush_kern_dcache_
page
.
long
v4_flush_kern_dcache_
area
.
long
v4_dma_inv_range
.
long
v4_dma_clean_range
.
long
v4_dma_flush_range
...
...
arch/arm/mm/cache-v4wb.S
浏览文件 @
6665398a
...
...
@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
v4wb_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
v4wb_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
/
*
fall
through
*/
/*
...
...
@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
.
long
v4wb_flush_user_cache_range
.
long
v4wb_coherent_kern_range
.
long
v4wb_coherent_user_range
.
long
v4wb_flush_kern_dcache_
page
.
long
v4wb_flush_kern_dcache_
area
.
long
v4wb_dma_inv_range
.
long
v4wb_dma_clean_range
.
long
v4wb_dma_flush_range
...
...
arch/arm/mm/cache-v4wt.S
浏览文件 @
6665398a
...
...
@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
v4wt_flush_kern_dcache_
page
)
ENTRY
(
v4wt_flush_kern_dcache_
area
)
mov
r2
,
#
0
mcr
p15
,
0
,
r2
,
c7
,
c5
,
0
@
invalidate
I
cache
add
r1
,
r0
,
#
PAGE_SZ
add
r1
,
r0
,
r1
/
*
fallthrough
*/
/*
...
...
@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
.
long
v4wt_flush_user_cache_range
.
long
v4wt_coherent_kern_range
.
long
v4wt_coherent_user_range
.
long
v4wt_flush_kern_dcache_
page
.
long
v4wt_flush_kern_dcache_
area
.
long
v4wt_dma_inv_range
.
long
v4wt_dma_clean_range
.
long
v4wt_dma_flush_range
...
...
arch/arm/mm/cache-v6.S
浏览文件 @
6665398a
...
...
@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
ENDPROC
(
v6_coherent_kern_range
)
/*
*
v6_flush_kern_dcache_
page
(
kaddr
)
*
v6_flush_kern_dcache_
area
(
void
*
addr
,
size_t
size
)
*
*
Ensure
that
the
data
held
in
the
page
kaddr
is
written
back
*
to
the
page
in
question
.
*
*
-
kaddr
-
kernel
address
(
guaranteed
to
be
page
aligned
)
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
v6_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
v6_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
#ifdef HARVARD_CACHE
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
&
invalidate
D
line
...
...
@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
.
long
v6_flush_user_cache_range
.
long
v6_coherent_kern_range
.
long
v6_coherent_user_range
.
long
v6_flush_kern_dcache_
page
.
long
v6_flush_kern_dcache_
area
.
long
v6_dma_inv_range
.
long
v6_dma_clean_range
.
long
v6_dma_flush_range
...
...
arch/arm/mm/cache-v7.S
浏览文件 @
6665398a
...
...
@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range)
ENDPROC
(
v7_coherent_user_range
)
/*
*
v7_flush_kern_dcache_
page
(
kaddr
)
*
v7_flush_kern_dcache_
area
(
void
*
addr
,
size_t
size
)
*
*
Ensure
that
the
data
held
in
the
page
kaddr
is
written
back
*
to
the
page
in
question
.
*
*
-
kaddr
-
kernel
address
(
guaranteed
to
be
page
aligned
)
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
v7_flush_kern_dcache_
page
)
ENTRY
(
v7_flush_kern_dcache_
area
)
dcache_line_size
r2
,
r3
add
r1
,
r0
,
#
PAGE_SZ
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
&
invalidate
D
line
/
unified
line
add
r0
,
r0
,
r2
...
...
@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo
1
b
dsb
mov
pc
,
lr
ENDPROC
(
v7_flush_kern_dcache_
page
)
ENDPROC
(
v7_flush_kern_dcache_
area
)
/*
*
v7_dma_inv_range
(
start
,
end
)
...
...
@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns)
.
long
v7_flush_user_cache_range
.
long
v7_coherent_kern_range
.
long
v7_coherent_user_range
.
long
v7_flush_kern_dcache_
page
.
long
v7_flush_kern_dcache_
area
.
long
v7_dma_inv_range
.
long
v7_dma_clean_range
.
long
v7_dma_flush_range
...
...
arch/arm/mm/flush.c
浏览文件 @
6665398a
...
...
@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if
(
addr
)
#endif
__cpuc_flush_dcache_
page
(
addr
);
__cpuc_flush_dcache_
area
(
addr
,
PAGE_SIZE
);
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
...
...
@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
* in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate.
*/
__cpuc_flush_dcache_
page
(
page_address
(
page
)
);
__cpuc_flush_dcache_
area
(
page_address
(
page
),
PAGE_SIZE
);
}
arch/arm/mm/highmem.c
浏览文件 @
6665398a
...
...
@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned
int
idx
=
type
+
KM_TYPE_NR
*
smp_processor_id
();
if
(
kvaddr
>=
(
void
*
)
FIXADDR_START
)
{
__cpuc_flush_dcache_
page
((
void
*
)
vaddr
);
__cpuc_flush_dcache_
area
((
void
*
)
vaddr
,
PAGE_SIZE
);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON
(
vaddr
!=
__fix_to_virt
(
FIX_KMAP_BEGIN
+
idx
));
set_pte_ext
(
TOP_PTE
(
vaddr
),
__pte
(
0
),
0
);
...
...
arch/arm/mm/nommu.c
浏览文件 @
6665398a
...
...
@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode)
void
flush_dcache_page
(
struct
page
*
page
)
{
__cpuc_flush_dcache_
page
(
page_address
(
page
)
);
__cpuc_flush_dcache_
area
(
page_address
(
page
),
PAGE_SIZE
);
}
EXPORT_SYMBOL
(
flush_dcache_page
);
...
...
arch/arm/mm/proc-arm1020.S
浏览文件 @
6665398a
...
...
@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
page
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm1020_flush_kern_dcache_
page
)
ENTRY
(
arm1020_flush_kern_dcache_
area
)
mov
ip
,
#
0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add
r1
,
r0
,
#
PAGE_SZ
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
mcr
p15
,
0
,
ip
,
c7
,
c10
,
4
@
drain
WB
add
r0
,
r0
,
#
CACHE_DLINESIZE
...
...
@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns)
.
long
arm1020_flush_user_cache_range
.
long
arm1020_coherent_kern_range
.
long
arm1020_coherent_user_range
.
long
arm1020_flush_kern_dcache_
page
.
long
arm1020_flush_kern_dcache_
area
.
long
arm1020_dma_inv_range
.
long
arm1020_dma_clean_range
.
long
arm1020_dma_flush_range
...
...
arch/arm/mm/proc-arm1020e.S
浏览文件 @
6665398a
...
...
@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
page
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm1020e_flush_kern_dcache_
page
)
ENTRY
(
arm1020e_flush_kern_dcache_
area
)
mov
ip
,
#
0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add
r1
,
r0
,
#
PAGE_SZ
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns)
.
long
arm1020e_flush_user_cache_range
.
long
arm1020e_coherent_kern_range
.
long
arm1020e_coherent_user_range
.
long
arm1020e_flush_kern_dcache_
page
.
long
arm1020e_flush_kern_dcache_
area
.
long
arm1020e_dma_inv_range
.
long
arm1020e_dma_clean_range
.
long
arm1020e_dma_flush_range
...
...
arch/arm/mm/proc-arm1022.S
浏览文件 @
6665398a
...
...
@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
page
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm1022_flush_kern_dcache_
page
)
ENTRY
(
arm1022_flush_kern_dcache_
area
)
mov
ip
,
#
0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add
r1
,
r0
,
#
PAGE_SZ
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns)
.
long
arm1022_flush_user_cache_range
.
long
arm1022_coherent_kern_range
.
long
arm1022_coherent_user_range
.
long
arm1022_flush_kern_dcache_
page
.
long
arm1022_flush_kern_dcache_
area
.
long
arm1022_dma_inv_range
.
long
arm1022_dma_clean_range
.
long
arm1022_dma_flush_range
...
...
arch/arm/mm/proc-arm1026.S
浏览文件 @
6665398a
...
...
@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
page
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm1026_flush_kern_dcache_
page
)
ENTRY
(
arm1026_flush_kern_dcache_
area
)
mov
ip
,
#
0
#ifndef CONFIG_CPU_DCACHE_DISABLE
add
r1
,
r0
,
#
PAGE_SZ
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns)
.
long
arm1026_flush_user_cache_range
.
long
arm1026_coherent_kern_range
.
long
arm1026_coherent_user_range
.
long
arm1026_flush_kern_dcache_
page
.
long
arm1026_flush_kern_dcache_
area
.
long
arm1026_dma_inv_range
.
long
arm1026_dma_clean_range
.
long
arm1026_dma_flush_range
...
...
arch/arm/mm/proc-arm920.S
浏览文件 @
6665398a
...
...
@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm920_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
arm920_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns)
.
long
arm920_flush_user_cache_range
.
long
arm920_coherent_kern_range
.
long
arm920_coherent_user_range
.
long
arm920_flush_kern_dcache_
page
.
long
arm920_flush_kern_dcache_
area
.
long
arm920_dma_inv_range
.
long
arm920_dma_clean_range
.
long
arm920_dma_flush_range
...
...
arch/arm/mm/proc-arm922.S
浏览文件 @
6665398a
...
...
@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm922_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
arm922_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns)
.
long
arm922_flush_user_cache_range
.
long
arm922_coherent_kern_range
.
long
arm922_coherent_user_range
.
long
arm922_flush_kern_dcache_
page
.
long
arm922_flush_kern_dcache_
area
.
long
arm922_dma_inv_range
.
long
arm922_dma_clean_range
.
long
arm922_dma_flush_range
...
...
arch/arm/mm/proc-arm925.S
浏览文件 @
6665398a
...
...
@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm925_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
arm925_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns)
.
long
arm925_flush_user_cache_range
.
long
arm925_coherent_kern_range
.
long
arm925_coherent_user_range
.
long
arm925_flush_kern_dcache_
page
.
long
arm925_flush_kern_dcache_
area
.
long
arm925_dma_inv_range
.
long
arm925_dma_clean_range
.
long
arm925_dma_flush_range
...
...
arch/arm/mm/proc-arm926.S
浏览文件 @
6665398a
...
...
@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm926_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
arm926_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns)
.
long
arm926_flush_user_cache_range
.
long
arm926_coherent_kern_range
.
long
arm926_coherent_user_range
.
long
arm926_flush_kern_dcache_
page
.
long
arm926_flush_kern_dcache_
area
.
long
arm926_dma_inv_range
.
long
arm926_dma_clean_range
.
long
arm926_dma_flush_range
...
...
arch/arm/mm/proc-arm940.S
浏览文件 @
6665398a
...
...
@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range)
/
*
FALLTHROUGH
*/
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
arm940_flush_kern_dcache_
page
)
ENTRY
(
arm940_flush_kern_dcache_
area
)
mov
ip
,
#
0
mov
r1
,
#(
CACHE_DSEGMENTS
-
1
)
<<
4
@
4
segments
1
:
orr
r3
,
r1
,
#(
CACHE_DENTRIES
-
1
)
<<
26
@
64
entries
...
...
@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns)
.
long
arm940_flush_user_cache_range
.
long
arm940_coherent_kern_range
.
long
arm940_coherent_user_range
.
long
arm940_flush_kern_dcache_
page
.
long
arm940_flush_kern_dcache_
area
.
long
arm940_dma_inv_range
.
long
arm940_dma_clean_range
.
long
arm940_dma_flush_range
...
...
arch/arm/mm/proc-arm946.S
浏览文件 @
6665398a
...
...
@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*
(
same
as
arm926
)
*/
ENTRY
(
arm946_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
arm946_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns)
.
long
arm946_flush_user_cache_range
.
long
arm946_coherent_kern_range
.
long
arm946_coherent_user_range
.
long
arm946_flush_kern_dcache_
page
.
long
arm946_flush_kern_dcache_
area
.
long
arm946_dma_inv_range
.
long
arm946_dma_clean_range
.
long
arm946_dma_flush_range
...
...
arch/arm/mm/proc-feroceon.S
浏览文件 @
6665398a
...
...
@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
.
align
5
ENTRY
(
feroceon_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
feroceon_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page)
mov
pc
,
lr
.
align
5
ENTRY
(
feroceon_range_flush_kern_dcache_
page
)
ENTRY
(
feroceon_range_flush_kern_dcache_
area
)
mrs
r2
,
cpsr
add
r1
,
r0
,
#
PAGE_SZ
-
CACHE_DLINESIZE
@
top
addr
is
inclusive
orr
r3
,
r2
,
#
PSR_I_BIT
...
...
@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns)
.
long
feroceon_flush_user_cache_range
.
long
feroceon_coherent_kern_range
.
long
feroceon_coherent_user_range
.
long
feroceon_flush_kern_dcache_
page
.
long
feroceon_flush_kern_dcache_
area
.
long
feroceon_dma_inv_range
.
long
feroceon_dma_clean_range
.
long
feroceon_dma_flush_range
...
...
@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns)
.
long
feroceon_flush_user_cache_range
.
long
feroceon_coherent_kern_range
.
long
feroceon_coherent_user_range
.
long
feroceon_range_flush_kern_dcache_
page
.
long
feroceon_range_flush_kern_dcache_
area
.
long
feroceon_range_dma_inv_range
.
long
feroceon_range_dma_clean_range
.
long
feroceon_range_dma_flush_range
...
...
arch/arm/mm/proc-mohawk.S
浏览文件 @
6665398a
...
...
@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
mohawk_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
mohawk_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
+
invalidate
D
entry
add
r0
,
r0
,
#
CACHE_DLINESIZE
cmp
r0
,
r1
...
...
@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns)
.
long
mohawk_flush_user_cache_range
.
long
mohawk_coherent_kern_range
.
long
mohawk_coherent_user_range
.
long
mohawk_flush_kern_dcache_
page
.
long
mohawk_flush_kern_dcache_
area
.
long
mohawk_dma_inv_range
.
long
mohawk_dma_clean_range
.
long
mohawk_dma_flush_range
...
...
arch/arm/mm/proc-syms.c
浏览文件 @
6665398a
...
...
@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL
(
__cpuc_flush_user_all
);
EXPORT_SYMBOL
(
__cpuc_flush_user_range
);
EXPORT_SYMBOL
(
__cpuc_coherent_kern_range
);
EXPORT_SYMBOL
(
__cpuc_flush_dcache_page
);
EXPORT_SYMBOL
(
dmac_inv_range
);
/* because of flush_ioremap_region() */
EXPORT_SYMBOL
(
__cpuc_flush_dcache_area
);
#else
EXPORT_SYMBOL
(
cpu_cache
);
#endif
...
...
arch/arm/mm/proc-xsc3.S
浏览文件 @
6665398a
...
...
@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
.
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
xsc3_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
xsc3_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c14
,
1
@
clean
/
invalidate
L1
D
line
add
r0
,
r0
,
#
CACHELINESIZE
cmp
r0
,
r1
...
...
@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns)
.
long
xsc3_flush_user_cache_range
.
long
xsc3_coherent_kern_range
.
long
xsc3_coherent_user_range
.
long
xsc3_flush_kern_dcache_
page
.
long
xsc3_flush_kern_dcache_
area
.
long
xsc3_dma_inv_range
.
long
xsc3_dma_clean_range
.
long
xsc3_dma_flush_range
...
...
arch/arm/mm/proc-xscale.S
浏览文件 @
6665398a
...
...
@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range)
mov
pc
,
lr
/*
*
flush_kern_dcache_
page
(
void
*
pag
e
)
*
flush_kern_dcache_
area
(
void
*
addr
,
size_t
siz
e
)
*
*
Ensure
no
D
cache
aliasing
occurs
,
either
with
itself
or
*
the
I
cache
*
*
-
addr
-
page
aligned
address
*
-
addr
-
kernel
address
*
-
size
-
region
size
*/
ENTRY
(
xscale_flush_kern_dcache_
page
)
add
r1
,
r0
,
#
PAGE_SZ
ENTRY
(
xscale_flush_kern_dcache_
area
)
add
r1
,
r0
,
r1
1
:
mcr
p15
,
0
,
r0
,
c7
,
c10
,
1
@
clean
D
entry
mcr
p15
,
0
,
r0
,
c7
,
c6
,
1
@
invalidate
D
entry
add
r0
,
r0
,
#
CACHELINESIZE
...
...
@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns)
.
long
xscale_flush_user_cache_range
.
long
xscale_coherent_kern_range
.
long
xscale_coherent_user_range
.
long
xscale_flush_kern_dcache_
page
.
long
xscale_flush_kern_dcache_
area
.
long
xscale_dma_inv_range
.
long
xscale_dma_clean_range
.
long
xscale_dma_flush_range
...
...
@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.
long
xscale_flush_user_cache_range
.
long
xscale_coherent_kern_range
.
long
xscale_coherent_user_range
.
long
xscale_flush_kern_dcache_
page
.
long
xscale_flush_kern_dcache_
area
.
long
xscale_dma_flush_range
.
long
xscale_dma_clean_range
.
long
xscale_dma_flush_range
...
...
drivers/mtd/maps/pxa2xx-flash.c
浏览文件 @
6665398a
...
...
@@ -20,14 +20,23 @@
#include <asm/io.h>
#include <mach/hardware.h>
#include <asm/cacheflush.h>
#include <asm/mach/flash.h>
#define CACHELINESIZE 32
static
void
pxa2xx_map_inval_cache
(
struct
map_info
*
map
,
unsigned
long
from
,
ssize_t
len
)
{
flush_ioremap_region
(
map
->
phys
,
map
->
cached
,
from
,
len
);
unsigned
long
start
=
(
unsigned
long
)
map
->
cached
+
from
;
unsigned
long
end
=
start
+
len
;
start
&=
~
(
CACHELINESIZE
-
1
);
while
(
start
<
end
)
{
/* invalidate D cache line */
asm
volatile
(
"mcr p15, 0, %0, c7, c6, 1"
:
:
"r"
(
start
));
start
+=
CACHELINESIZE
;
}
}
struct
pxa2xx_flash_info
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录