Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
FengXiao2002
Linux Imx
提交
2ef7f3db
L
Linux Imx
项目概览
FengXiao2002
/
Linux Imx
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
L
Linux Imx
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
2ef7f3db
编写于
11月 05, 2009
作者:
R
Russell King
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
ARM: Fix ptrace accesses
Signed-off-by:
N
Russell King
<
rmk+kernel@arm.linux.org.uk
>
上级
bf32eb85
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
50 addition
and
30 deletion
+50
-30
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/cacheflush.h
+2
-22
arch/arm/include/asm/smp_plat.h
arch/arm/include/asm/smp_plat.h
+5
-0
arch/arm/mm/flush.c
arch/arm/mm/flush.c
+43
-8
未找到文件。
arch/arm/include/asm/cacheflush.h
浏览文件 @
2ef7f3db
...
...
@@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*/
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
} while (0)
extern
void
copy_to_user_page
(
struct
vm_area_struct
*
,
struct
page
*
,
unsigned
long
,
void
*
,
const
void
*
,
unsigned
long
);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
...
...
@@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
}
}
static
inline
void
vivt_flush_ptrace_access
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
uaddr
,
void
*
kaddr
,
unsigned
long
len
,
int
write
)
{
if
(
cpumask_test_cpu
(
smp_processor_id
(),
mm_cpumask
(
vma
->
vm_mm
)))
{
unsigned
long
addr
=
(
unsigned
long
)
kaddr
;
__cpuc_coherent_kern_range
(
addr
,
addr
+
len
);
}
}
#ifndef CONFIG_CPU_CACHE_VIPT
#define flush_cache_mm(mm) \
vivt_flush_cache_mm(mm)
...
...
@@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vivt_flush_cache_range(vma,start,end)
#define flush_cache_page(vma,addr,pfn) \
vivt_flush_cache_page(vma,addr,pfn)
#define flush_ptrace_access(vma,page,ua,ka,len,write) \
vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern
void
flush_cache_mm
(
struct
mm_struct
*
mm
);
extern
void
flush_cache_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_cache_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
user_addr
,
unsigned
long
pfn
);
extern
void
flush_ptrace_access
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
uaddr
,
void
*
kaddr
,
unsigned
long
len
,
int
write
);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
...
...
arch/arm/include/asm/smp_plat.h
浏览文件 @
2ef7f3db
...
...
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
return
((
read_cpuid_ext
(
CPUID_EXT_MMFR3
)
>>
12
)
&
0xf
)
<
2
;
}
static
inline
int
cache_ops_need_broadcast
(
void
)
{
return
((
read_cpuid_ext
(
CPUID_EXT_MMFR3
)
>>
12
)
&
0xf
)
<
1
;
}
#endif
arch/arm/mm/flush.c
浏览文件 @
2ef7f3db
...
...
@@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
...
...
@@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
if
(
vma
->
vm_flags
&
VM_EXEC
&&
icache_is_vivt_asid_tagged
())
__flush_icache_all
();
}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
#endif
#ifdef CONFIG_SMP
static
void
flush_ptrace_access_other
(
void
*
args
)
{
__flush_icache_all
();
}
#endif
static
void
flush_ptrace_access
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
uaddr
,
void
*
kaddr
,
unsigned
long
len
,
int
write
)
unsigned
long
uaddr
,
void
*
kaddr
,
unsigned
long
len
)
{
if
(
cache_is_vivt
())
{
vivt_flush_ptrace_access
(
vma
,
page
,
uaddr
,
kaddr
,
len
,
write
);
if
(
cpumask_test_cpu
(
smp_processor_id
(),
mm_cpumask
(
vma
->
vm_mm
)))
{
unsigned
long
addr
=
(
unsigned
long
)
kaddr
;
__cpuc_coherent_kern_range
(
addr
,
addr
+
len
);
}
return
;
}
...
...
@@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
/* VIPT non-aliasing cache */
if
(
cpumask_test_cpu
(
smp_processor_id
(),
mm_cpumask
(
vma
->
vm_mm
))
&&
vma
->
vm_flags
&
VM_EXEC
)
{
if
(
vma
->
vm_flags
&
VM_EXEC
)
{
unsigned
long
addr
=
(
unsigned
long
)
kaddr
;
/* only flushing the kernel mapping on non-aliasing VIPT */
__cpuc_coherent_kern_range
(
addr
,
addr
+
len
);
#ifdef CONFIG_SMP
if
(
cache_ops_need_broadcast
())
smp_call_function
(
flush_ptrace_access_other
,
NULL
,
1
);
#endif
}
}
#else
#define flush_pfn_alias(pfn,vaddr) do { } while (0)
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*
* Note that this code needs to run on the current CPU.
*/
void
copy_to_user_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
uaddr
,
void
*
dst
,
const
void
*
src
,
unsigned
long
len
)
{
#ifdef CONFIG_SMP
preempt_disable
();
#endif
memcpy
(
dst
,
src
,
len
);
flush_ptrace_access
(
vma
,
page
,
uaddr
,
dst
,
len
);
#ifdef CONFIG_SMP
preempt_enable
();
#endif
}
void
__flush_dcache_page
(
struct
address_space
*
mapping
,
struct
page
*
page
)
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录