Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
qemu
提交
61382a50
Q
qemu
项目概览
openeuler
/
qemu
通知
10
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
Q
qemu
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
61382a50
编写于
10月 27, 2003
作者:
B
bellard
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
full softmmu support
git-svn-id:
svn://svn.savannah.nongnu.org/qemu/trunk@410
c046a42c-6fe2-441c-8c8c-71466251a162
上级
3a51dee6
变更
10
展开全部
隐藏空白更改
内联
并排
Showing
10 changed file
with
594 addition
and
339 deletion
+594
-339
cpu-all.h
cpu-all.h
+86
-47
exec.c
exec.c
+81
-22
hw/vga_template.h
hw/vga_template.h
+2
-2
softmmu_header.h
softmmu_header.h
+51
-19
softmmu_template.h
softmmu_template.h
+50
-32
target-i386/exec.h
target-i386/exec.h
+81
-19
target-i386/helper.c
target-i386/helper.c
+111
-87
target-i386/helper2.c
target-i386/helper2.c
+43
-33
target-i386/op.c
target-i386/op.c
+3
-1
target-i386/translate.c
target-i386/translate.c
+86
-77
未找到文件。
cpu-all.h
浏览文件 @
61382a50
...
@@ -20,18 +20,19 @@
...
@@ -20,18 +20,19 @@
#ifndef CPU_ALL_H
#ifndef CPU_ALL_H
#define CPU_ALL_H
#define CPU_ALL_H
/* all CPU memory access use these macros */
/* CPU memory access without any memory or io remapping */
static
inline
int
ldub
(
void
*
ptr
)
static
inline
int
ldub_raw
(
void
*
ptr
)
{
{
return
*
(
uint8_t
*
)
ptr
;
return
*
(
uint8_t
*
)
ptr
;
}
}
static
inline
int
ldsb
(
void
*
ptr
)
static
inline
int
ldsb
_raw
(
void
*
ptr
)
{
{
return
*
(
int8_t
*
)
ptr
;
return
*
(
int8_t
*
)
ptr
;
}
}
static
inline
void
stb
(
void
*
ptr
,
int
v
)
static
inline
void
stb
_raw
(
void
*
ptr
,
int
v
)
{
{
*
(
uint8_t
*
)
ptr
=
v
;
*
(
uint8_t
*
)
ptr
=
v
;
}
}
...
@@ -42,7 +43,7 @@ static inline void stb(void *ptr, int v)
...
@@ -42,7 +43,7 @@ static inline void stb(void *ptr, int v)
#if defined(WORDS_BIGENDIAN) || defined(__arm__)
#if defined(WORDS_BIGENDIAN) || defined(__arm__)
/* conservative code for little endian unaligned accesses */
/* conservative code for little endian unaligned accesses */
static
inline
int
lduw
(
void
*
ptr
)
static
inline
int
lduw
_raw
(
void
*
ptr
)
{
{
#ifdef __powerpc__
#ifdef __powerpc__
int
val
;
int
val
;
...
@@ -54,7 +55,7 @@ static inline int lduw(void *ptr)
...
@@ -54,7 +55,7 @@ static inline int lduw(void *ptr)
#endif
#endif
}
}
static
inline
int
ldsw
(
void
*
ptr
)
static
inline
int
ldsw
_raw
(
void
*
ptr
)
{
{
#ifdef __powerpc__
#ifdef __powerpc__
int
val
;
int
val
;
...
@@ -66,7 +67,7 @@ static inline int ldsw(void *ptr)
...
@@ -66,7 +67,7 @@ static inline int ldsw(void *ptr)
#endif
#endif
}
}
static
inline
int
ldl
(
void
*
ptr
)
static
inline
int
ldl
_raw
(
void
*
ptr
)
{
{
#ifdef __powerpc__
#ifdef __powerpc__
int
val
;
int
val
;
...
@@ -78,16 +79,16 @@ static inline int ldl(void *ptr)
...
@@ -78,16 +79,16 @@ static inline int ldl(void *ptr)
#endif
#endif
}
}
static
inline
uint64_t
ldq
(
void
*
ptr
)
static
inline
uint64_t
ldq
_raw
(
void
*
ptr
)
{
{
uint8_t
*
p
=
ptr
;
uint8_t
*
p
=
ptr
;
uint32_t
v1
,
v2
;
uint32_t
v1
,
v2
;
v1
=
ldl
(
p
);
v1
=
ldl
_raw
(
p
);
v2
=
ldl
(
p
+
4
);
v2
=
ldl
_raw
(
p
+
4
);
return
v1
|
((
uint64_t
)
v2
<<
32
);
return
v1
|
((
uint64_t
)
v2
<<
32
);
}
}
static
inline
void
stw
(
void
*
ptr
,
int
v
)
static
inline
void
stw
_raw
(
void
*
ptr
,
int
v
)
{
{
#ifdef __powerpc__
#ifdef __powerpc__
__asm__
__volatile__
(
"sthbrx %1,0,%2"
:
"=m"
(
*
(
uint16_t
*
)
ptr
)
:
"r"
(
v
),
"r"
(
ptr
));
__asm__
__volatile__
(
"sthbrx %1,0,%2"
:
"=m"
(
*
(
uint16_t
*
)
ptr
)
:
"r"
(
v
),
"r"
(
ptr
));
...
@@ -98,7 +99,7 @@ static inline void stw(void *ptr, int v)
...
@@ -98,7 +99,7 @@ static inline void stw(void *ptr, int v)
#endif
#endif
}
}
static
inline
void
stl
(
void
*
ptr
,
int
v
)
static
inline
void
stl
_raw
(
void
*
ptr
,
int
v
)
{
{
#ifdef __powerpc__
#ifdef __powerpc__
__asm__
__volatile__
(
"stwbrx %1,0,%2"
:
"=m"
(
*
(
uint32_t
*
)
ptr
)
:
"r"
(
v
),
"r"
(
ptr
));
__asm__
__volatile__
(
"stwbrx %1,0,%2"
:
"=m"
(
*
(
uint32_t
*
)
ptr
)
:
"r"
(
v
),
"r"
(
ptr
));
...
@@ -111,104 +112,104 @@ static inline void stl(void *ptr, int v)
...
@@ -111,104 +112,104 @@ static inline void stl(void *ptr, int v)
#endif
#endif
}
}
static
inline
void
stq
(
void
*
ptr
,
uint64_t
v
)
static
inline
void
stq
_raw
(
void
*
ptr
,
uint64_t
v
)
{
{
uint8_t
*
p
=
ptr
;
uint8_t
*
p
=
ptr
;
stl
(
p
,
(
uint32_t
)
v
);
stl
_raw
(
p
,
(
uint32_t
)
v
);
stl
(
p
+
4
,
v
>>
32
);
stl
_raw
(
p
+
4
,
v
>>
32
);
}
}
/* float access */
/* float access */
static
inline
float
ldfl
(
void
*
ptr
)
static
inline
float
ldfl
_raw
(
void
*
ptr
)
{
{
union
{
union
{
float
f
;
float
f
;
uint32_t
i
;
uint32_t
i
;
}
u
;
}
u
;
u
.
i
=
ldl
(
ptr
);
u
.
i
=
ldl
_raw
(
ptr
);
return
u
.
f
;
return
u
.
f
;
}
}
static
inline
void
stfl
(
void
*
ptr
,
float
v
)
static
inline
void
stfl
_raw
(
void
*
ptr
,
float
v
)
{
{
union
{
union
{
float
f
;
float
f
;
uint32_t
i
;
uint32_t
i
;
}
u
;
}
u
;
u
.
f
=
v
;
u
.
f
=
v
;
stl
(
ptr
,
u
.
i
);
stl
_raw
(
ptr
,
u
.
i
);
}
}
#if defined(__arm__) && !defined(WORDS_BIGENDIAN)
#if defined(__arm__) && !defined(WORDS_BIGENDIAN)
/* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */
/* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */
static
inline
double
ldfq
(
void
*
ptr
)
static
inline
double
ldfq
_raw
(
void
*
ptr
)
{
{
union
{
union
{
double
d
;
double
d
;
uint32_t
tab
[
2
];
uint32_t
tab
[
2
];
}
u
;
}
u
;
u
.
tab
[
1
]
=
ldl
(
ptr
);
u
.
tab
[
1
]
=
ldl
_raw
(
ptr
);
u
.
tab
[
0
]
=
ldl
(
ptr
+
4
);
u
.
tab
[
0
]
=
ldl
_raw
(
ptr
+
4
);
return
u
.
d
;
return
u
.
d
;
}
}
static
inline
void
stfq
(
void
*
ptr
,
double
v
)
static
inline
void
stfq
_raw
(
void
*
ptr
,
double
v
)
{
{
union
{
union
{
double
d
;
double
d
;
uint32_t
tab
[
2
];
uint32_t
tab
[
2
];
}
u
;
}
u
;
u
.
d
=
v
;
u
.
d
=
v
;
stl
(
ptr
,
u
.
tab
[
1
]);
stl
_raw
(
ptr
,
u
.
tab
[
1
]);
stl
(
ptr
+
4
,
u
.
tab
[
0
]);
stl
_raw
(
ptr
+
4
,
u
.
tab
[
0
]);
}
}
#else
#else
static
inline
double
ldfq
(
void
*
ptr
)
static
inline
double
ldfq
_raw
(
void
*
ptr
)
{
{
union
{
union
{
double
d
;
double
d
;
uint64_t
i
;
uint64_t
i
;
}
u
;
}
u
;
u
.
i
=
ldq
(
ptr
);
u
.
i
=
ldq
_raw
(
ptr
);
return
u
.
d
;
return
u
.
d
;
}
}
static
inline
void
stfq
(
void
*
ptr
,
double
v
)
static
inline
void
stfq
_raw
(
void
*
ptr
,
double
v
)
{
{
union
{
union
{
double
d
;
double
d
;
uint64_t
i
;
uint64_t
i
;
}
u
;
}
u
;
u
.
d
=
v
;
u
.
d
=
v
;
stq
(
ptr
,
u
.
i
);
stq
_raw
(
ptr
,
u
.
i
);
}
}
#endif
#endif
#elif defined(TARGET_WORDS_BIGENDIAN) && !defined(WORDS_BIGENDIAN)
#elif defined(TARGET_WORDS_BIGENDIAN) && !defined(WORDS_BIGENDIAN)
static
inline
int
lduw
(
void
*
ptr
)
static
inline
int
lduw
_raw
(
void
*
ptr
)
{
{
uint8_t
*
b
=
(
uint8_t
*
)
ptr
;
uint8_t
*
b
=
(
uint8_t
*
)
ptr
;
return
(
b
[
0
]
<<
8
|
b
[
1
]);
return
(
b
[
0
]
<<
8
|
b
[
1
]);
}
}
static
inline
int
ldsw
(
void
*
ptr
)
static
inline
int
ldsw
_raw
(
void
*
ptr
)
{
{
int8_t
*
b
=
(
int8_t
*
)
ptr
;
int8_t
*
b
=
(
int8_t
*
)
ptr
;
return
(
b
[
0
]
<<
8
|
b
[
1
]);
return
(
b
[
0
]
<<
8
|
b
[
1
]);
}
}
static
inline
int
ldl
(
void
*
ptr
)
static
inline
int
ldl
_raw
(
void
*
ptr
)
{
{
uint8_t
*
b
=
(
uint8_t
*
)
ptr
;
uint8_t
*
b
=
(
uint8_t
*
)
ptr
;
return
(
b
[
0
]
<<
24
|
b
[
1
]
<<
16
|
b
[
2
]
<<
8
|
b
[
3
]);
return
(
b
[
0
]
<<
24
|
b
[
1
]
<<
16
|
b
[
2
]
<<
8
|
b
[
3
]);
}
}
static
inline
uint64_t
ldq
(
void
*
ptr
)
static
inline
uint64_t
ldq
_raw
(
void
*
ptr
)
{
{
uint32_t
a
,
b
;
uint32_t
a
,
b
;
a
=
ldl
(
ptr
);
a
=
ldl
(
ptr
);
...
@@ -216,14 +217,14 @@ static inline uint64_t ldq(void *ptr)
...
@@ -216,14 +217,14 @@ static inline uint64_t ldq(void *ptr)
return
(((
uint64_t
)
a
<<
32
)
|
b
);
return
(((
uint64_t
)
a
<<
32
)
|
b
);
}
}
static
inline
void
stw
(
void
*
ptr
,
int
v
)
static
inline
void
stw
_raw
(
void
*
ptr
,
int
v
)
{
{
uint8_t
*
d
=
(
uint8_t
*
)
ptr
;
uint8_t
*
d
=
(
uint8_t
*
)
ptr
;
d
[
0
]
=
v
>>
8
;
d
[
0
]
=
v
>>
8
;
d
[
1
]
=
v
;
d
[
1
]
=
v
;
}
}
static
inline
void
stl
(
void
*
ptr
,
int
v
)
static
inline
void
stl
_raw
(
void
*
ptr
,
int
v
)
{
{
uint8_t
*
d
=
(
uint8_t
*
)
ptr
;
uint8_t
*
d
=
(
uint8_t
*
)
ptr
;
d
[
0
]
=
v
>>
24
;
d
[
0
]
=
v
>>
24
;
...
@@ -232,7 +233,7 @@ static inline void stl(void *ptr, int v)
...
@@ -232,7 +233,7 @@ static inline void stl(void *ptr, int v)
d
[
3
]
=
v
;
d
[
3
]
=
v
;
}
}
static
inline
void
stq
(
void
*
ptr
,
uint64_t
v
)
static
inline
void
stq
_raw
(
void
*
ptr
,
uint64_t
v
)
{
{
stl
(
ptr
,
v
);
stl
(
ptr
,
v
);
stl
(
ptr
+
4
,
v
>>
32
);
stl
(
ptr
+
4
,
v
>>
32
);
...
@@ -240,64 +241,102 @@ static inline void stq(void *ptr, uint64_t v)
...
@@ -240,64 +241,102 @@ static inline void stq(void *ptr, uint64_t v)
#else
#else
static
inline
int
lduw
(
void
*
ptr
)
static
inline
int
lduw
_raw
(
void
*
ptr
)
{
{
return
*
(
uint16_t
*
)
ptr
;
return
*
(
uint16_t
*
)
ptr
;
}
}
static
inline
int
ldsw
(
void
*
ptr
)
static
inline
int
ldsw
_raw
(
void
*
ptr
)
{
{
return
*
(
int16_t
*
)
ptr
;
return
*
(
int16_t
*
)
ptr
;
}
}
static
inline
int
ldl
(
void
*
ptr
)
static
inline
int
ldl
_raw
(
void
*
ptr
)
{
{
return
*
(
uint32_t
*
)
ptr
;
return
*
(
uint32_t
*
)
ptr
;
}
}
static
inline
uint64_t
ldq
(
void
*
ptr
)
static
inline
uint64_t
ldq
_raw
(
void
*
ptr
)
{
{
return
*
(
uint64_t
*
)
ptr
;
return
*
(
uint64_t
*
)
ptr
;
}
}
static
inline
void
stw
(
void
*
ptr
,
int
v
)
static
inline
void
stw
_raw
(
void
*
ptr
,
int
v
)
{
{
*
(
uint16_t
*
)
ptr
=
v
;
*
(
uint16_t
*
)
ptr
=
v
;
}
}
static
inline
void
stl
(
void
*
ptr
,
int
v
)
static
inline
void
stl
_raw
(
void
*
ptr
,
int
v
)
{
{
*
(
uint32_t
*
)
ptr
=
v
;
*
(
uint32_t
*
)
ptr
=
v
;
}
}
static
inline
void
stq
(
void
*
ptr
,
uint64_t
v
)
static
inline
void
stq
_raw
(
void
*
ptr
,
uint64_t
v
)
{
{
*
(
uint64_t
*
)
ptr
=
v
;
*
(
uint64_t
*
)
ptr
=
v
;
}
}
/* float access */
/* float access */
static
inline
float
ldfl
(
void
*
ptr
)
static
inline
float
ldfl
_raw
(
void
*
ptr
)
{
{
return
*
(
float
*
)
ptr
;
return
*
(
float
*
)
ptr
;
}
}
static
inline
double
ldfq
(
void
*
ptr
)
static
inline
double
ldfq
_raw
(
void
*
ptr
)
{
{
return
*
(
double
*
)
ptr
;
return
*
(
double
*
)
ptr
;
}
}
static
inline
void
stfl
(
void
*
ptr
,
float
v
)
static
inline
void
stfl
_raw
(
void
*
ptr
,
float
v
)
{
{
*
(
float
*
)
ptr
=
v
;
*
(
float
*
)
ptr
=
v
;
}
}
static
inline
void
stfq
(
void
*
ptr
,
double
v
)
static
inline
void
stfq
_raw
(
void
*
ptr
,
double
v
)
{
{
*
(
double
*
)
ptr
=
v
;
*
(
double
*
)
ptr
=
v
;
}
}
#endif
#endif
/* MMU memory access macros */
#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
#define ldub(p) ldub_raw(p)
#define ldsb(p) ldsb_raw(p)
#define lduw(p) lduw_raw(p)
#define ldsw(p) ldsw_raw(p)
#define ldl(p) ldl_raw(p)
#define ldq(p) ldq_raw(p)
#define ldfl(p) ldfl_raw(p)
#define ldfq(p) ldfq_raw(p)
#define stb(p, v) stb_raw(p, v)
#define stw(p, v) stw_raw(p, v)
#define stl(p, v) stl_raw(p, v)
#define stq(p, v) stq_raw(p, v)
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
#define ldub_code(p) ldub_raw(p)
#define ldsb_code(p) ldsb_raw(p)
#define lduw_code(p) lduw_raw(p)
#define ldsw_code(p) ldsw_raw(p)
#define ldl_code(p) ldl_raw(p)
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
#define ldsw_kernel(p) ldsw_raw(p)
#define ldl_kernel(p) ldl_raw(p)
#define stb_kernel(p, v) stb_raw(p, v)
#define stw_kernel(p, v) stw_raw(p, v)
#define stl_kernel(p, v) stl_raw(p, v)
#define stq_kernel(p, v) stq_raw(p, v)
#endif
/* defined(CONFIG_USER_ONLY) */
/* page related stuff */
/* page related stuff */
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
...
...
exec.c
浏览文件 @
61382a50
...
@@ -444,16 +444,20 @@ static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
...
@@ -444,16 +444,20 @@ static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
prot
=
0
;
prot
=
0
;
for
(
addr
=
host_start
;
addr
<
host_end
;
addr
+=
TARGET_PAGE_SIZE
)
for
(
addr
=
host_start
;
addr
<
host_end
;
addr
+=
TARGET_PAGE_SIZE
)
prot
|=
page_get_flags
(
addr
);
prot
|=
page_get_flags
(
addr
);
#if !defined(CONFIG_SOFTMMU)
mprotect
((
void
*
)
host_start
,
host_page_size
,
mprotect
((
void
*
)
host_start
,
host_page_size
,
(
prot
&
PAGE_BITS
)
&
~
PAGE_WRITE
);
(
prot
&
PAGE_BITS
)
&
~
PAGE_WRITE
);
#endif
#if !defined(CONFIG_USER_ONLY)
/* suppress soft TLB */
/* XXX: must flush on all processor with same address space */
tlb_flush_page_write
(
cpu_single_env
,
host_start
);
#endif
#ifdef DEBUG_TB_INVALIDATE
#ifdef DEBUG_TB_INVALIDATE
printf
(
"protecting code page: 0x%08lx
\n
"
,
printf
(
"protecting code page: 0x%08lx
\n
"
,
host_start
);
host_start
);
#endif
#endif
p
->
flags
&=
~
PAGE_WRITE
;
p
->
flags
&=
~
PAGE_WRITE
;
#ifdef DEBUG_TB_CHECK
tb_page_check
();
#endif
}
}
}
}
...
@@ -483,6 +487,9 @@ void tb_link(TranslationBlock *tb)
...
@@ -483,6 +487,9 @@ void tb_link(TranslationBlock *tb)
if
(
page_index2
!=
page_index1
)
{
if
(
page_index2
!=
page_index1
)
{
tb_alloc_page
(
tb
,
page_index2
);
tb_alloc_page
(
tb
,
page_index2
);
}
}
#ifdef DEBUG_TB_CHECK
tb_page_check
();
#endif
tb
->
jmp_first
=
(
TranslationBlock
*
)((
long
)
tb
|
2
);
tb
->
jmp_first
=
(
TranslationBlock
*
)((
long
)
tb
|
2
);
tb
->
jmp_next
[
0
]
=
NULL
;
tb
->
jmp_next
[
0
]
=
NULL
;
tb
->
jmp_next
[
1
]
=
NULL
;
tb
->
jmp_next
[
1
]
=
NULL
;
...
@@ -517,20 +524,23 @@ int page_unprotect(unsigned long address)
...
@@ -517,20 +524,23 @@ int page_unprotect(unsigned long address)
/* if the page was really writable, then we change its
/* if the page was really writable, then we change its
protection back to writable */
protection back to writable */
if
(
prot
&
PAGE_WRITE_ORG
)
{
if
(
prot
&
PAGE_WRITE_ORG
)
{
mprotect
((
void
*
)
host_start
,
host_page_size
,
(
prot
&
PAGE_BITS
)
|
PAGE_WRITE
);
pindex
=
(
address
-
host_start
)
>>
TARGET_PAGE_BITS
;
pindex
=
(
address
-
host_start
)
>>
TARGET_PAGE_BITS
;
p1
[
pindex
].
flags
|=
PAGE_WRITE
;
if
(
!
(
p1
[
pindex
].
flags
&
PAGE_WRITE
))
{
/* and since the content will be modified, we must invalidate
#if !defined(CONFIG_SOFTMMU)
the corresponding translated code. */
mprotect
((
void
*
)
host_start
,
host_page_size
,
tb_invalidate_page
(
address
);
(
prot
&
PAGE_BITS
)
|
PAGE_WRITE
);
#endif
p1
[
pindex
].
flags
|=
PAGE_WRITE
;
/* and since the content will be modified, we must invalidate
the corresponding translated code. */
tb_invalidate_page
(
address
);
#ifdef DEBUG_TB_CHECK
#ifdef DEBUG_TB_CHECK
tb_invalidate_check
(
address
);
tb_invalidate_check
(
address
);
#endif
#endif
return
1
;
return
1
;
}
else
{
}
return
0
;
}
}
return
0
;
}
}
/* call this function when system calls directly modify a memory area */
/* call this function when system calls directly modify a memory area */
...
@@ -734,13 +744,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
...
@@ -734,13 +744,17 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
/* unmap all maped pages and flush all associated code */
/* unmap all maped pages and flush all associated code */
void
page_unmap
(
void
)
void
page_unmap
(
void
)
{
{
PageDesc
*
p
,
*
pmap
;
PageDesc
*
pmap
;
unsigned
long
addr
;
int
i
;
int
i
,
j
,
ret
,
j1
;
for
(
i
=
0
;
i
<
L1_SIZE
;
i
++
)
{
for
(
i
=
0
;
i
<
L1_SIZE
;
i
++
)
{
pmap
=
l1_map
[
i
];
pmap
=
l1_map
[
i
];
if
(
pmap
)
{
if
(
pmap
)
{
#if !defined(CONFIG_SOFTMMU)
PageDesc
*
p
;
unsigned
long
addr
;
int
j
,
ret
,
j1
;
p
=
pmap
;
p
=
pmap
;
for
(
j
=
0
;
j
<
L2_SIZE
;)
{
for
(
j
=
0
;
j
<
L2_SIZE
;)
{
if
(
p
->
flags
&
PAGE_VALID
)
{
if
(
p
->
flags
&
PAGE_VALID
)
{
...
@@ -763,6 +777,7 @@ void page_unmap(void)
...
@@ -763,6 +777,7 @@ void page_unmap(void)
j
++
;
j
++
;
}
}
}
}
#endif
free
(
pmap
);
free
(
pmap
);
l1_map
[
i
]
=
NULL
;
l1_map
[
i
]
=
NULL
;
}
}
...
@@ -773,7 +788,7 @@ void page_unmap(void)
...
@@ -773,7 +788,7 @@ void page_unmap(void)
void
tlb_flush
(
CPUState
*
env
)
void
tlb_flush
(
CPUState
*
env
)
{
{
#if
defined(TARGET_I386
)
#if
!defined(CONFIG_USER_ONLY
)
int
i
;
int
i
;
for
(
i
=
0
;
i
<
CPU_TLB_SIZE
;
i
++
)
{
for
(
i
=
0
;
i
<
CPU_TLB_SIZE
;
i
++
)
{
env
->
tlb_read
[
0
][
i
].
address
=
-
1
;
env
->
tlb_read
[
0
][
i
].
address
=
-
1
;
...
@@ -784,16 +799,38 @@ void tlb_flush(CPUState *env)
...
@@ -784,16 +799,38 @@ void tlb_flush(CPUState *env)
#endif
#endif
}
}
static
inline
void
tlb_flush_entry
(
CPUTLBEntry
*
tlb_entry
,
uint32_t
addr
)
{
if
(
addr
==
(
tlb_entry
->
address
&
(
TARGET_PAGE_MASK
|
TLB_INVALID_MASK
)))
tlb_entry
->
address
=
-
1
;
}
void
tlb_flush_page
(
CPUState
*
env
,
uint32_t
addr
)
void
tlb_flush_page
(
CPUState
*
env
,
uint32_t
addr
)
{
{
#if defined(TARGET_I386)
#if !defined(CONFIG_USER_ONLY)
int
i
;
addr
&=
TARGET_PAGE_MASK
;
i
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
tlb_flush_entry
(
&
env
->
tlb_read
[
0
][
i
],
addr
);
tlb_flush_entry
(
&
env
->
tlb_write
[
0
][
i
],
addr
);
tlb_flush_entry
(
&
env
->
tlb_read
[
1
][
i
],
addr
);
tlb_flush_entry
(
&
env
->
tlb_write
[
1
][
i
],
addr
);
#endif
}
/* make all write to page 'addr' trigger a TLB exception to detect
self modifying code */
void
tlb_flush_page_write
(
CPUState
*
env
,
uint32_t
addr
)
{
#if !defined(CONFIG_USER_ONLY)
int
i
;
int
i
;
addr
&=
TARGET_PAGE_MASK
;
i
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
i
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
env
->
tlb_read
[
0
][
i
].
address
=
-
1
;
tlb_flush_entry
(
&
env
->
tlb_write
[
0
][
i
],
addr
);
env
->
tlb_write
[
0
][
i
].
address
=
-
1
;
tlb_flush_entry
(
&
env
->
tlb_write
[
1
][
i
],
addr
);
env
->
tlb_read
[
1
][
i
].
address
=
-
1
;
env
->
tlb_write
[
1
][
i
].
address
=
-
1
;
#endif
#endif
}
}
...
@@ -900,3 +937,25 @@ int cpu_register_io_memory(int io_index,
...
@@ -900,3 +937,25 @@ int cpu_register_io_memory(int io_index,
}
}
return
io_index
<<
IO_MEM_SHIFT
;
return
io_index
<<
IO_MEM_SHIFT
;
}
}
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _cmmu
#define GETPC() NULL
#define env cpu_single_env
#define SHIFT 0
#include "softmmu_template.h"
#define SHIFT 1
#include "softmmu_template.h"
#define SHIFT 2
#include "softmmu_template.h"
#define SHIFT 3
#include "softmmu_template.h"
#undef env
#endif
hw/vga_template.h
浏览文件 @
61382a50
...
@@ -354,7 +354,7 @@ static void glue(vga_draw_line15_, DEPTH)(VGAState *s1, uint8_t *d,
...
@@ -354,7 +354,7 @@ static void glue(vga_draw_line15_, DEPTH)(VGAState *s1, uint8_t *d,
w
=
width
;
w
=
width
;
do
{
do
{
v
=
lduw
((
void
*
)
s
);
v
=
lduw
_raw
((
void
*
)
s
);
r
=
(
v
>>
7
)
&
0xf8
;
r
=
(
v
>>
7
)
&
0xf8
;
g
=
(
v
>>
2
)
&
0xf8
;
g
=
(
v
>>
2
)
&
0xf8
;
b
=
(
v
<<
3
)
&
0xf8
;
b
=
(
v
<<
3
)
&
0xf8
;
...
@@ -379,7 +379,7 @@ static void glue(vga_draw_line16_, DEPTH)(VGAState *s1, uint8_t *d,
...
@@ -379,7 +379,7 @@ static void glue(vga_draw_line16_, DEPTH)(VGAState *s1, uint8_t *d,
w
=
width
;
w
=
width
;
do
{
do
{
v
=
lduw
((
void
*
)
s
);
v
=
lduw
_raw
((
void
*
)
s
);
r
=
(
v
>>
8
)
&
0xf8
;
r
=
(
v
>>
8
)
&
0xf8
;
g
=
(
v
>>
3
)
&
0xfc
;
g
=
(
v
>>
3
)
&
0xfc
;
b
=
(
v
<<
3
)
&
0xf8
;
b
=
(
v
<<
3
)
&
0xf8
;
...
...
softmmu_header.h
浏览文件 @
61382a50
...
@@ -19,26 +19,48 @@
...
@@ -19,26 +19,48 @@
*/
*/
#if DATA_SIZE == 8
#if DATA_SIZE == 8
#define SUFFIX q
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define DATA_TYPE uint64_t
#elif DATA_SIZE == 4
#elif DATA_SIZE == 4
#define SUFFIX l
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define DATA_TYPE uint32_t
#elif DATA_SIZE == 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_TYPE uint16_t
#define DATA_STYPE int16_t
#define DATA_STYPE int16_t
#elif DATA_SIZE == 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_TYPE uint8_t
#define DATA_STYPE int8_t
#define DATA_STYPE int8_t
#else
#else
#error unsupported data size
#error unsupported data size
#endif
#endif
#if MEMUSER == 0
#if ACCESS_TYPE == 0
#define MEMSUFFIX _kernel
#define CPU_MEM_INDEX 0
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == 1
#define CPU_MEM_INDEX 1
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == 2
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
#define MMUSUFFIX _mmu
#elif ACCESS_TYPE == 3
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3)
#define MMUSUFFIX _cmmu
#else
#else
#
define MEMSUFFIX _user
#
error invalid ACCESS_TYPE
#endif
#endif
#if DATA_SIZE == 8
#if DATA_SIZE == 8
...
@@ -48,24 +70,26 @@
...
@@ -48,24 +70,26 @@
#endif
#endif
#if MEMUSER == 0
DATA_TYPE
REGPARM
(
1
)
glue
(
glue
(
__ld
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
DATA_TYPE
REGPARM
(
1
)
glue
(
glue
(
__ld
,
SUFFIX
),
_mmu
)(
unsigned
long
addr
);
int
is_user
);
void
REGPARM
(
2
)
glue
(
glue
(
__st
,
SUFFIX
),
_mmu
)(
unsigned
long
addr
,
DATA_TYPE
v
);
void
REGPARM
(
2
)
glue
(
glue
(
__st
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
DATA_TYPE
v
,
int
is_user
);
#endif
static
inline
int
glue
(
glue
(
ld
u
,
SUFFIX
),
MEMSUFFIX
)(
void
*
ptr
)
static
inline
int
glue
(
glue
(
ld
,
U
SUFFIX
),
MEMSUFFIX
)(
void
*
ptr
)
{
{
int
index
;
int
index
;
RES_TYPE
res
;
RES_TYPE
res
;
unsigned
long
addr
,
physaddr
;
unsigned
long
addr
,
physaddr
;
int
is_user
;
addr
=
(
unsigned
long
)
ptr
;
addr
=
(
unsigned
long
)
ptr
;
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
if
(
__builtin_expect
(
env
->
tlb_read
[
MEMUSER
][
index
].
address
!=
is_user
=
CPU_MEM_INDEX
;
if
(
__builtin_expect
(
env
->
tlb_read
[
is_user
][
index
].
address
!=
(
addr
&
(
TARGET_PAGE_MASK
|
(
DATA_SIZE
-
1
))),
0
))
{
(
addr
&
(
TARGET_PAGE_MASK
|
(
DATA_SIZE
-
1
))),
0
))
{
res
=
glue
(
glue
(
__ld
,
SUFFIX
),
_mmu
)(
add
r
);
res
=
glue
(
glue
(
__ld
,
SUFFIX
),
MMUSUFFIX
)(
addr
,
is_use
r
);
}
else
{
}
else
{
physaddr
=
addr
+
env
->
tlb_read
[
MEMUSER
][
index
].
addend
;
physaddr
=
addr
+
env
->
tlb_read
[
is_user
][
index
].
addend
;
res
=
glue
(
glue
(
ld
u
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
res
=
glue
(
glue
(
ld
,
U
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
}
}
return
res
;
return
res
;
}
}
...
@@ -75,13 +99,16 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
...
@@ -75,13 +99,16 @@ static inline int glue(glue(lds, SUFFIX), MEMSUFFIX)(void *ptr)
{
{
int
res
,
index
;
int
res
,
index
;
unsigned
long
addr
,
physaddr
;
unsigned
long
addr
,
physaddr
;
int
is_user
;
addr
=
(
unsigned
long
)
ptr
;
addr
=
(
unsigned
long
)
ptr
;
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
if
(
__builtin_expect
(
env
->
tlb_read
[
MEMUSER
][
index
].
address
!=
is_user
=
CPU_MEM_INDEX
;
if
(
__builtin_expect
(
env
->
tlb_read
[
is_user
][
index
].
address
!=
(
addr
&
(
TARGET_PAGE_MASK
|
(
DATA_SIZE
-
1
))),
0
))
{
(
addr
&
(
TARGET_PAGE_MASK
|
(
DATA_SIZE
-
1
))),
0
))
{
res
=
(
DATA_STYPE
)
glue
(
glue
(
__ld
,
SUFFIX
),
_mmu
)(
add
r
);
res
=
(
DATA_STYPE
)
glue
(
glue
(
__ld
,
SUFFIX
),
MMUSUFFIX
)(
addr
,
is_use
r
);
}
else
{
}
else
{
physaddr
=
addr
+
env
->
tlb_read
[
MEMUSER
][
index
].
addend
;
physaddr
=
addr
+
env
->
tlb_read
[
is_user
][
index
].
addend
;
res
=
glue
(
glue
(
lds
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
res
=
glue
(
glue
(
lds
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
}
}
return
res
;
return
res
;
...
@@ -92,13 +119,16 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
...
@@ -92,13 +119,16 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
{
{
int
index
;
int
index
;
unsigned
long
addr
,
physaddr
;
unsigned
long
addr
,
physaddr
;
int
is_user
;
addr
=
(
unsigned
long
)
ptr
;
addr
=
(
unsigned
long
)
ptr
;
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
if
(
__builtin_expect
(
env
->
tlb_write
[
MEMUSER
][
index
].
address
!=
is_user
=
CPU_MEM_INDEX
;
if
(
__builtin_expect
(
env
->
tlb_write
[
is_user
][
index
].
address
!=
(
addr
&
(
TARGET_PAGE_MASK
|
(
DATA_SIZE
-
1
))),
0
))
{
(
addr
&
(
TARGET_PAGE_MASK
|
(
DATA_SIZE
-
1
))),
0
))
{
glue
(
glue
(
__st
,
SUFFIX
),
_mmu
)(
addr
,
v
);
glue
(
glue
(
__st
,
SUFFIX
),
MMUSUFFIX
)(
addr
,
v
,
is_user
);
}
else
{
}
else
{
physaddr
=
addr
+
env
->
tlb_write
[
MEMUSER
][
index
].
addend
;
physaddr
=
addr
+
env
->
tlb_write
[
is_user
][
index
].
addend
;
glue
(
glue
(
st
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
,
v
);
glue
(
glue
(
st
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
,
v
);
}
}
}
}
...
@@ -107,5 +137,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
...
@@ -107,5 +137,7 @@ static inline void glue(glue(st, SUFFIX), MEMSUFFIX)(void *ptr, RES_TYPE v)
#undef DATA_TYPE
#undef DATA_TYPE
#undef DATA_STYPE
#undef DATA_STYPE
#undef SUFFIX
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef DATA_SIZE
#undef MEMSUFFIX
#undef CPU_MEM_INDEX
#undef MMUSUFFIX
softmmu_template.h
浏览文件 @
61382a50
...
@@ -21,23 +21,31 @@
...
@@ -21,23 +21,31 @@
#if DATA_SIZE == 8
#if DATA_SIZE == 8
#define SUFFIX q
#define SUFFIX q
#define USUFFIX q
#define DATA_TYPE uint64_t
#define DATA_TYPE uint64_t
#elif DATA_SIZE == 4
#elif DATA_SIZE == 4
#define SUFFIX l
#define SUFFIX l
#define USUFFIX l
#define DATA_TYPE uint32_t
#define DATA_TYPE uint32_t
#elif DATA_SIZE == 2
#elif DATA_SIZE == 2
#define SUFFIX w
#define SUFFIX w
#define USUFFIX uw
#define DATA_TYPE uint16_t
#define DATA_TYPE uint16_t
#elif DATA_SIZE == 1
#elif DATA_SIZE == 1
#define SUFFIX b
#define SUFFIX b
#define USUFFIX ub
#define DATA_TYPE uint8_t
#define DATA_TYPE uint8_t
#else
#else
#error unsupported data size
#error unsupported data size
#endif
#endif
static
DATA_TYPE
glue
(
slow_ld
,
SUFFIX
)(
unsigned
long
addr
,
void
*
retaddr
);
static
DATA_TYPE
glue
(
glue
(
slow_ld
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
static
void
glue
(
slow_st
,
SUFFIX
)(
unsigned
long
addr
,
DATA_TYPE
val
,
int
is_user
,
void
*
retaddr
);
void
*
retaddr
);
static
void
glue
(
glue
(
slow_st
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
DATA_TYPE
val
,
int
is_user
,
void
*
retaddr
);
static
inline
DATA_TYPE
glue
(
io_read
,
SUFFIX
)(
unsigned
long
physaddr
,
static
inline
DATA_TYPE
glue
(
io_read
,
SUFFIX
)(
unsigned
long
physaddr
,
unsigned
long
tlb_addr
)
unsigned
long
tlb_addr
)
...
@@ -81,16 +89,16 @@ static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
...
@@ -81,16 +89,16 @@ static inline void glue(io_write, SUFFIX)(unsigned long physaddr,
}
}
/* handle all cases except unaligned access which span two pages */
/* handle all cases except unaligned access which span two pages */
DATA_TYPE
REGPARM
(
1
)
glue
(
glue
(
__ld
,
SUFFIX
),
_mmu
)(
unsigned
long
addr
)
DATA_TYPE
REGPARM
(
1
)
glue
(
glue
(
__ld
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
int
is_user
)
{
{
DATA_TYPE
res
;
DATA_TYPE
res
;
int
i
s_user
,
i
ndex
;
int
index
;
unsigned
long
physaddr
,
tlb_addr
;
unsigned
long
physaddr
,
tlb_addr
;
void
*
retaddr
;
void
*
retaddr
;
/* test if there is match for unaligned or IO access */
/* test if there is match for unaligned or IO access */
/* XXX: could done more in memory macro in a non portable way */
/* XXX: could done more in memory macro in a non portable way */
is_user
=
((
env
->
hflags
&
HF_CPL_MASK
)
==
3
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
redo:
redo:
tlb_addr
=
env
->
tlb_read
[
is_user
][
index
].
address
;
tlb_addr
=
env
->
tlb_read
[
is_user
][
index
].
address
;
...
@@ -104,29 +112,31 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr)
...
@@ -104,29 +112,31 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr)
}
else
if
(((
addr
&
0xfff
)
+
DATA_SIZE
-
1
)
>=
TARGET_PAGE_SIZE
)
{
}
else
if
(((
addr
&
0xfff
)
+
DATA_SIZE
-
1
)
>=
TARGET_PAGE_SIZE
)
{
/* slow unaligned access (it spans two pages or IO) */
/* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
do_unaligned_access:
retaddr
=
__builtin_return_address
(
0
);
retaddr
=
GETPC
();
res
=
glue
(
slow_ld
,
SUFFIX
)(
addr
,
retaddr
);
res
=
glue
(
glue
(
slow_ld
,
SUFFIX
),
MMUSUFFIX
)(
addr
,
is_user
,
retaddr
);
}
else
{
}
else
{
/* unaligned access in the same page */
/* unaligned access in the same page */
res
=
glue
(
glue
(
ld
u
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
res
=
glue
(
glue
(
ld
,
U
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
}
}
}
else
{
}
else
{
/* the page is not in the TLB : fill it */
/* the page is not in the TLB : fill it */
retaddr
=
__builtin_return_address
(
0
);
retaddr
=
GETPC
(
);
tlb_fill
(
addr
,
0
,
retaddr
);
tlb_fill
(
addr
,
0
,
is_user
,
retaddr
);
goto
redo
;
goto
redo
;
}
}
return
res
;
return
res
;
}
}
/* handle all unaligned cases */
/* handle all unaligned cases */
static
DATA_TYPE
glue
(
slow_ld
,
SUFFIX
)(
unsigned
long
addr
,
void
*
retaddr
)
static
DATA_TYPE
glue
(
glue
(
slow_ld
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
int
is_user
,
void
*
retaddr
)
{
{
DATA_TYPE
res
,
res1
,
res2
;
DATA_TYPE
res
,
res1
,
res2
;
int
i
s_user
,
i
ndex
,
shift
;
int
index
,
shift
;
unsigned
long
physaddr
,
tlb_addr
,
addr1
,
addr2
;
unsigned
long
physaddr
,
tlb_addr
,
addr1
,
addr2
;
is_user
=
((
env
->
hflags
&
HF_CPL_MASK
)
==
3
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
redo:
redo:
tlb_addr
=
env
->
tlb_read
[
is_user
][
index
].
address
;
tlb_addr
=
env
->
tlb_read
[
is_user
][
index
].
address
;
...
@@ -142,8 +152,10 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
...
@@ -142,8 +152,10 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
/* slow unaligned access (it spans two pages) */
/* slow unaligned access (it spans two pages) */
addr1
=
addr
&
~
(
DATA_SIZE
-
1
);
addr1
=
addr
&
~
(
DATA_SIZE
-
1
);
addr2
=
addr1
+
DATA_SIZE
;
addr2
=
addr1
+
DATA_SIZE
;
res1
=
glue
(
slow_ld
,
SUFFIX
)(
addr1
,
retaddr
);
res1
=
glue
(
glue
(
slow_ld
,
SUFFIX
),
MMUSUFFIX
)(
addr1
,
res2
=
glue
(
slow_ld
,
SUFFIX
)(
addr2
,
retaddr
);
is_user
,
retaddr
);
res2
=
glue
(
glue
(
slow_ld
,
SUFFIX
),
MMUSUFFIX
)(
addr2
,
is_user
,
retaddr
);
shift
=
(
addr
&
(
DATA_SIZE
-
1
))
*
8
;
shift
=
(
addr
&
(
DATA_SIZE
-
1
))
*
8
;
#ifdef TARGET_WORDS_BIGENDIAN
#ifdef TARGET_WORDS_BIGENDIAN
res
=
(
res1
<<
shift
)
|
(
res2
>>
((
DATA_SIZE
*
8
)
-
shift
));
res
=
(
res1
<<
shift
)
|
(
res2
>>
((
DATA_SIZE
*
8
)
-
shift
));
...
@@ -152,24 +164,25 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
...
@@ -152,24 +164,25 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
#endif
#endif
}
else
{
}
else
{
/* unaligned/aligned access in the same page */
/* unaligned/aligned access in the same page */
res
=
glue
(
glue
(
ld
u
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
res
=
glue
(
glue
(
ld
,
U
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
);
}
}
}
else
{
}
else
{
/* the page is not in the TLB : fill it */
/* the page is not in the TLB : fill it */
tlb_fill
(
addr
,
0
,
retaddr
);
tlb_fill
(
addr
,
0
,
is_user
,
retaddr
);
goto
redo
;
goto
redo
;
}
}
return
res
;
return
res
;
}
}
void
REGPARM
(
2
)
glue
(
glue
(
__st
,
SUFFIX
),
_mmu
)(
unsigned
long
addr
,
DATA_TYPE
val
)
void
REGPARM
(
2
)
glue
(
glue
(
__st
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
DATA_TYPE
val
,
int
is_user
)
{
{
unsigned
long
physaddr
,
tlb_addr
;
unsigned
long
physaddr
,
tlb_addr
;
void
*
retaddr
;
void
*
retaddr
;
int
i
s_user
,
i
ndex
;
int
index
;
is_user
=
((
env
->
hflags
&
HF_CPL_MASK
)
==
3
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
redo:
redo:
tlb_addr
=
env
->
tlb_write
[
is_user
][
index
].
address
;
tlb_addr
=
env
->
tlb_write
[
is_user
][
index
].
address
;
...
@@ -182,28 +195,30 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val
...
@@ -182,28 +195,30 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val
glue
(
io_write
,
SUFFIX
)(
physaddr
,
val
,
tlb_addr
);
glue
(
io_write
,
SUFFIX
)(
physaddr
,
val
,
tlb_addr
);
}
else
if
(((
addr
&
0xfff
)
+
DATA_SIZE
-
1
)
>=
TARGET_PAGE_SIZE
)
{
}
else
if
(((
addr
&
0xfff
)
+
DATA_SIZE
-
1
)
>=
TARGET_PAGE_SIZE
)
{
do_unaligned_access:
do_unaligned_access:
retaddr
=
__builtin_return_address
(
0
);
retaddr
=
GETPC
();
glue
(
slow_st
,
SUFFIX
)(
addr
,
val
,
retaddr
);
glue
(
glue
(
slow_st
,
SUFFIX
),
MMUSUFFIX
)(
addr
,
val
,
is_user
,
retaddr
);
}
else
{
}
else
{
/* aligned/unaligned access in the same page */
/* aligned/unaligned access in the same page */
glue
(
glue
(
st
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
,
val
);
glue
(
glue
(
st
,
SUFFIX
),
_raw
)((
uint8_t
*
)
physaddr
,
val
);
}
}
}
else
{
}
else
{
/* the page is not in the TLB : fill it */
/* the page is not in the TLB : fill it */
retaddr
=
__builtin_return_address
(
0
);
retaddr
=
GETPC
(
);
tlb_fill
(
addr
,
1
,
retaddr
);
tlb_fill
(
addr
,
1
,
is_user
,
retaddr
);
goto
redo
;
goto
redo
;
}
}
}
}
/* handles all unaligned cases */
/* handles all unaligned cases */
static
void
glue
(
slow_st
,
SUFFIX
)(
unsigned
long
addr
,
DATA_TYPE
val
,
static
void
glue
(
glue
(
slow_st
,
SUFFIX
),
MMUSUFFIX
)(
unsigned
long
addr
,
void
*
retaddr
)
DATA_TYPE
val
,
int
is_user
,
void
*
retaddr
)
{
{
unsigned
long
physaddr
,
tlb_addr
;
unsigned
long
physaddr
,
tlb_addr
;
int
i
s_user
,
i
ndex
,
i
;
int
index
,
i
;
is_user
=
((
env
->
hflags
&
HF_CPL_MASK
)
==
3
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
index
=
(
addr
>>
TARGET_PAGE_BITS
)
&
(
CPU_TLB_SIZE
-
1
);
redo:
redo:
tlb_addr
=
env
->
tlb_write
[
is_user
][
index
].
address
;
tlb_addr
=
env
->
tlb_write
[
is_user
][
index
].
address
;
...
@@ -219,9 +234,11 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
...
@@ -219,9 +234,11 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
/* XXX: not efficient, but simple */
/* XXX: not efficient, but simple */
for
(
i
=
0
;
i
<
DATA_SIZE
;
i
++
)
{
for
(
i
=
0
;
i
<
DATA_SIZE
;
i
++
)
{
#ifdef TARGET_WORDS_BIGENDIAN
#ifdef TARGET_WORDS_BIGENDIAN
slow_stb
(
addr
+
i
,
val
>>
(((
DATA_SIZE
-
1
)
*
8
)
-
(
i
*
8
)),
retaddr
);
glue
(
slow_stb
,
MMUSUFFIX
)(
addr
+
i
,
val
>>
(((
DATA_SIZE
-
1
)
*
8
)
-
(
i
*
8
)),
is_user
,
retaddr
);
#else
#else
slow_stb
(
addr
+
i
,
val
>>
(
i
*
8
),
retaddr
);
glue
(
slow_stb
,
MMUSUFFIX
)(
addr
+
i
,
val
>>
(
i
*
8
),
is_user
,
retaddr
);
#endif
#endif
}
}
}
else
{
}
else
{
...
@@ -230,7 +247,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
...
@@ -230,7 +247,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
}
}
}
else
{
}
else
{
/* the page is not in the TLB : fill it */
/* the page is not in the TLB : fill it */
tlb_fill
(
addr
,
1
,
retaddr
);
tlb_fill
(
addr
,
1
,
is_user
,
retaddr
);
goto
redo
;
goto
redo
;
}
}
}
}
...
@@ -238,4 +255,5 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
...
@@ -238,4 +255,5 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
#undef SHIFT
#undef SHIFT
#undef DATA_TYPE
#undef DATA_TYPE
#undef SUFFIX
#undef SUFFIX
#undef USUFFIX
#undef DATA_SIZE
#undef DATA_SIZE
target-i386/exec.h
浏览文件 @
61382a50
...
@@ -137,8 +137,10 @@ void helper_invlpg(unsigned int addr);
...
@@ -137,8 +137,10 @@ void helper_invlpg(unsigned int addr);
void
cpu_x86_update_cr0
(
CPUX86State
*
env
);
void
cpu_x86_update_cr0
(
CPUX86State
*
env
);
void
cpu_x86_update_cr3
(
CPUX86State
*
env
);
void
cpu_x86_update_cr3
(
CPUX86State
*
env
);
void
cpu_x86_flush_tlb
(
CPUX86State
*
env
,
uint32_t
addr
);
void
cpu_x86_flush_tlb
(
CPUX86State
*
env
,
uint32_t
addr
);
int
cpu_x86_handle_mmu_fault
(
CPUX86State
*
env
,
uint32_t
addr
,
int
is_write
);
int
cpu_x86_handle_mmu_fault
(
CPUX86State
*
env
,
uint32_t
addr
,
void
tlb_fill
(
unsigned
long
addr
,
int
is_write
,
void
*
retaddr
);
int
is_write
,
int
is_user
,
int
is_softmmu
);
void
tlb_fill
(
unsigned
long
addr
,
int
is_write
,
int
is_user
,
void
*
retaddr
);
void
__hidden
cpu_lock
(
void
);
void
__hidden
cpu_lock
(
void
);
void
__hidden
cpu_unlock
(
void
);
void
__hidden
cpu_unlock
(
void
);
void
do_interrupt
(
int
intno
,
int
is_int
,
int
error_code
,
void
do_interrupt
(
int
intno
,
int
is_int
,
int
error_code
,
...
@@ -366,26 +368,30 @@ static inline void load_eflags(int eflags, int update_mask)
...
@@ -366,26 +368,30 @@ static inline void load_eflags(int eflags, int update_mask)
(
eflags
&
update_mask
);
(
eflags
&
update_mask
);
}
}
/* memory access macros */
/* XXX: move that to a generic header */
#if !defined(CONFIG_USER_ONLY)
#define ldul ldl
#define lduq ldq
#define ldul_user ldl_user
#define ldul_user ldl_user
#define ldul_kernel ldl_kernel
#define ldul_kernel ldl_kernel
#define ldub_raw ldub
#define ACCESS_TYPE 0
#define ldsb_raw ldsb
#define MEMSUFFIX _kernel
#define lduw_raw lduw
#define DATA_SIZE 1
#define ldsw_raw ldsw
#include "softmmu_header.h"
#define ldl_raw ldl
#define ldq_raw ldq
#define DATA_SIZE 2
#include "softmmu_header.h"
#define stb_raw stb
#define DATA_SIZE 4
#define stw_raw stw
#include "softmmu_header.h"
#define stl_raw stl
#define stq_raw stq
#define DATA_SIZE 8
#include "softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#define MEMUSER 0
#define ACCESS_TYPE 1
#define MEMSUFFIX _user
#define DATA_SIZE 1
#define DATA_SIZE 1
#include "softmmu_header.h"
#include "softmmu_header.h"
...
@@ -397,9 +403,12 @@ static inline void load_eflags(int eflags, int update_mask)
...
@@ -397,9 +403,12 @@ static inline void load_eflags(int eflags, int update_mask)
#define DATA_SIZE 8
#define DATA_SIZE 8
#include "softmmu_header.h"
#include "softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#undef MEMUSER
/* these access are slower, they must be as rare as possible */
#define MEMUSER 1
#define ACCESS_TYPE 2
#define MEMSUFFIX _data
#define DATA_SIZE 1
#define DATA_SIZE 1
#include "softmmu_header.h"
#include "softmmu_header.h"
...
@@ -411,6 +420,59 @@ static inline void load_eflags(int eflags, int update_mask)
...
@@ -411,6 +420,59 @@ static inline void load_eflags(int eflags, int update_mask)
#define DATA_SIZE 8
#define DATA_SIZE 8
#include "softmmu_header.h"
#include "softmmu_header.h"
#undef ACCESS_TYPE
#undef MEMSUFFIX
#define ldub(p) ldub_data(p)
#define ldsb(p) ldsb_data(p)
#define lduw(p) lduw_data(p)
#define ldsw(p) ldsw_data(p)
#define ldl(p) ldl_data(p)
#define ldq(p) ldq_data(p)
#define stb(p, v) stb_data(p, v)
#define stw(p, v) stw_data(p, v)
#define stl(p, v) stl_data(p, v)
#define stq(p, v) stq_data(p, v)
static
inline
double
ldfq
(
void
*
ptr
)
{
union
{
double
d
;
uint64_t
i
;
}
u
;
u
.
i
=
ldq
(
ptr
);
return
u
.
d
;
}
static
inline
void
stfq
(
void
*
ptr
,
double
v
)
{
union
{
double
d
;
uint64_t
i
;
}
u
;
u
.
d
=
v
;
stq
(
ptr
,
u
.
i
);
}
#undef MEMUSER
static
inline
float
ldfl
(
void
*
ptr
)
{
union
{
float
f
;
uint32_t
i
;
}
u
;
u
.
i
=
ldl
(
ptr
);
return
u
.
f
;
}
static
inline
void
stfl
(
void
*
ptr
,
float
v
)
{
union
{
float
f
;
uint32_t
i
;
}
u
;
u
.
f
=
v
;
stl
(
ptr
,
u
.
i
);
}
#endif
/* !defined(CONFIG_USER_ONLY) */
target-i386/helper.c
浏览文件 @
61382a50
...
@@ -153,11 +153,11 @@ static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
...
@@ -153,11 +153,11 @@ static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
if
(
index
+
(
4
<<
shift
)
-
1
>
env
->
tr
.
limit
)
if
(
index
+
(
4
<<
shift
)
-
1
>
env
->
tr
.
limit
)
raise_exception_err
(
EXCP0A_TSS
,
env
->
tr
.
selector
&
0xfffc
);
raise_exception_err
(
EXCP0A_TSS
,
env
->
tr
.
selector
&
0xfffc
);
if
(
shift
==
0
)
{
if
(
shift
==
0
)
{
*
esp_ptr
=
lduw
(
env
->
tr
.
base
+
index
);
*
esp_ptr
=
lduw
_kernel
(
env
->
tr
.
base
+
index
);
*
ss_ptr
=
lduw
(
env
->
tr
.
base
+
index
+
2
);
*
ss_ptr
=
lduw
_kernel
(
env
->
tr
.
base
+
index
+
2
);
}
else
{
}
else
{
*
esp_ptr
=
ldl
(
env
->
tr
.
base
+
index
);
*
esp_ptr
=
ldl
_kernel
(
env
->
tr
.
base
+
index
);
*
ss_ptr
=
lduw
(
env
->
tr
.
base
+
index
+
4
);
*
ss_ptr
=
lduw
_kernel
(
env
->
tr
.
base
+
index
+
4
);
}
}
}
}
...
@@ -177,8 +177,8 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
...
@@ -177,8 +177,8 @@ static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
if
((
index
+
7
)
>
dt
->
limit
)
if
((
index
+
7
)
>
dt
->
limit
)
return
-
1
;
return
-
1
;
ptr
=
dt
->
base
+
index
;
ptr
=
dt
->
base
+
index
;
*
e1_ptr
=
ldl
(
ptr
);
*
e1_ptr
=
ldl
_kernel
(
ptr
);
*
e2_ptr
=
ldl
(
ptr
+
4
);
*
e2_ptr
=
ldl
_kernel
(
ptr
+
4
);
return
0
;
return
0
;
}
}
...
@@ -226,8 +226,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
...
@@ -226,8 +226,8 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
if
(
intno
*
8
+
7
>
dt
->
limit
)
if
(
intno
*
8
+
7
>
dt
->
limit
)
raise_exception_err
(
EXCP0D_GPF
,
intno
*
8
+
2
);
raise_exception_err
(
EXCP0D_GPF
,
intno
*
8
+
2
);
ptr
=
dt
->
base
+
intno
*
8
;
ptr
=
dt
->
base
+
intno
*
8
;
e1
=
ldl
(
ptr
);
e1
=
ldl
_kernel
(
ptr
);
e2
=
ldl
(
ptr
+
4
);
e2
=
ldl
_kernel
(
ptr
+
4
);
/* check gate type */
/* check gate type */
type
=
(
e2
>>
DESC_TYPE_SHIFT
)
&
0x1f
;
type
=
(
e2
>>
DESC_TYPE_SHIFT
)
&
0x1f
;
switch
(
type
)
{
switch
(
type
)
{
...
@@ -344,47 +344,47 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
...
@@ -344,47 +344,47 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
int
old_eflags
;
int
old_eflags
;
if
(
env
->
eflags
&
VM_MASK
)
{
if
(
env
->
eflags
&
VM_MASK
)
{
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
env
->
segs
[
R_GS
].
selector
);
stl
_kernel
(
ssp
,
env
->
segs
[
R_GS
].
selector
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
env
->
segs
[
R_FS
].
selector
);
stl
_kernel
(
ssp
,
env
->
segs
[
R_FS
].
selector
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
env
->
segs
[
R_DS
].
selector
);
stl
_kernel
(
ssp
,
env
->
segs
[
R_DS
].
selector
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
env
->
segs
[
R_ES
].
selector
);
stl
_kernel
(
ssp
,
env
->
segs
[
R_ES
].
selector
);
}
}
if
(
new_stack
)
{
if
(
new_stack
)
{
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
old_ss
);
stl
_kernel
(
ssp
,
old_ss
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
old_esp
);
stl
_kernel
(
ssp
,
old_esp
);
}
}
ssp
-=
4
;
ssp
-=
4
;
old_eflags
=
compute_eflags
();
old_eflags
=
compute_eflags
();
stl
(
ssp
,
old_eflags
);
stl
_kernel
(
ssp
,
old_eflags
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
old_cs
);
stl
_kernel
(
ssp
,
old_cs
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
old_eip
);
stl
_kernel
(
ssp
,
old_eip
);
if
(
has_error_code
)
{
if
(
has_error_code
)
{
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
error_code
);
stl
_kernel
(
ssp
,
error_code
);
}
}
}
else
{
}
else
{
if
(
new_stack
)
{
if
(
new_stack
)
{
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
old_ss
);
stw
_kernel
(
ssp
,
old_ss
);
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
old_esp
);
stw
_kernel
(
ssp
,
old_esp
);
}
}
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
compute_eflags
());
stw
_kernel
(
ssp
,
compute_eflags
());
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
old_cs
);
stw
_kernel
(
ssp
,
old_cs
);
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
old_eip
);
stw
_kernel
(
ssp
,
old_eip
);
if
(
has_error_code
)
{
if
(
has_error_code
)
{
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
error_code
);
stw
_kernel
(
ssp
,
error_code
);
}
}
}
}
...
@@ -410,8 +410,8 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
...
@@ -410,8 +410,8 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
if
(
intno
*
4
+
3
>
dt
->
limit
)
if
(
intno
*
4
+
3
>
dt
->
limit
)
raise_exception_err
(
EXCP0D_GPF
,
intno
*
8
+
2
);
raise_exception_err
(
EXCP0D_GPF
,
intno
*
8
+
2
);
ptr
=
dt
->
base
+
intno
*
4
;
ptr
=
dt
->
base
+
intno
*
4
;
offset
=
lduw
(
ptr
);
offset
=
lduw
_kernel
(
ptr
);
selector
=
lduw
(
ptr
+
2
);
selector
=
lduw
_kernel
(
ptr
+
2
);
esp
=
ESP
;
esp
=
ESP
;
ssp
=
env
->
segs
[
R_SS
].
base
;
ssp
=
env
->
segs
[
R_SS
].
base
;
if
(
is_int
)
if
(
is_int
)
...
@@ -420,11 +420,11 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
...
@@ -420,11 +420,11 @@ static void do_interrupt_real(int intno, int is_int, int error_code,
old_eip
=
env
->
eip
;
old_eip
=
env
->
eip
;
old_cs
=
env
->
segs
[
R_CS
].
selector
;
old_cs
=
env
->
segs
[
R_CS
].
selector
;
esp
-=
2
;
esp
-=
2
;
stw
(
ssp
+
(
esp
&
0xffff
),
compute_eflags
());
stw
_kernel
(
ssp
+
(
esp
&
0xffff
),
compute_eflags
());
esp
-=
2
;
esp
-=
2
;
stw
(
ssp
+
(
esp
&
0xffff
),
old_cs
);
stw
_kernel
(
ssp
+
(
esp
&
0xffff
),
old_cs
);
esp
-=
2
;
esp
-=
2
;
stw
(
ssp
+
(
esp
&
0xffff
),
old_eip
);
stw
_kernel
(
ssp
+
(
esp
&
0xffff
),
old_eip
);
/* update processor state */
/* update processor state */
ESP
=
(
ESP
&
~
0xffff
)
|
(
esp
&
0xffff
);
ESP
=
(
ESP
&
~
0xffff
)
|
(
esp
&
0xffff
);
...
@@ -445,7 +445,7 @@ void do_interrupt_user(int intno, int is_int, int error_code,
...
@@ -445,7 +445,7 @@ void do_interrupt_user(int intno, int is_int, int error_code,
dt
=
&
env
->
idt
;
dt
=
&
env
->
idt
;
ptr
=
dt
->
base
+
(
intno
*
8
);
ptr
=
dt
->
base
+
(
intno
*
8
);
e2
=
ldl
(
ptr
+
4
);
e2
=
ldl
_kernel
(
ptr
+
4
);
dpl
=
(
e2
>>
DESC_DPL_SHIFT
)
&
3
;
dpl
=
(
e2
>>
DESC_DPL_SHIFT
)
&
3
;
cpl
=
env
->
hflags
&
HF_CPL_MASK
;
cpl
=
env
->
hflags
&
HF_CPL_MASK
;
...
@@ -651,8 +651,8 @@ void helper_lldt_T0(void)
...
@@ -651,8 +651,8 @@ void helper_lldt_T0(void)
if
((
index
+
7
)
>
dt
->
limit
)
if
((
index
+
7
)
>
dt
->
limit
)
raise_exception_err
(
EXCP0D_GPF
,
selector
&
0xfffc
);
raise_exception_err
(
EXCP0D_GPF
,
selector
&
0xfffc
);
ptr
=
dt
->
base
+
index
;
ptr
=
dt
->
base
+
index
;
e1
=
ldl
(
ptr
);
e1
=
ldl
_kernel
(
ptr
);
e2
=
ldl
(
ptr
+
4
);
e2
=
ldl
_kernel
(
ptr
+
4
);
if
((
e2
&
DESC_S_MASK
)
||
((
e2
>>
DESC_TYPE_SHIFT
)
&
0xf
)
!=
2
)
if
((
e2
&
DESC_S_MASK
)
||
((
e2
>>
DESC_TYPE_SHIFT
)
&
0xf
)
!=
2
)
raise_exception_err
(
EXCP0D_GPF
,
selector
&
0xfffc
);
raise_exception_err
(
EXCP0D_GPF
,
selector
&
0xfffc
);
if
(
!
(
e2
&
DESC_P_MASK
))
if
(
!
(
e2
&
DESC_P_MASK
))
...
@@ -684,8 +684,8 @@ void helper_ltr_T0(void)
...
@@ -684,8 +684,8 @@ void helper_ltr_T0(void)
if
((
index
+
7
)
>
dt
->
limit
)
if
((
index
+
7
)
>
dt
->
limit
)
raise_exception_err
(
EXCP0D_GPF
,
selector
&
0xfffc
);
raise_exception_err
(
EXCP0D_GPF
,
selector
&
0xfffc
);
ptr
=
dt
->
base
+
index
;
ptr
=
dt
->
base
+
index
;
e1
=
ldl
(
ptr
);
e1
=
ldl
_kernel
(
ptr
);
e2
=
ldl
(
ptr
+
4
);
e2
=
ldl
_kernel
(
ptr
+
4
);
type
=
(
e2
>>
DESC_TYPE_SHIFT
)
&
0xf
;
type
=
(
e2
>>
DESC_TYPE_SHIFT
)
&
0xf
;
if
((
e2
&
DESC_S_MASK
)
||
if
((
e2
&
DESC_S_MASK
)
||
(
type
!=
2
&&
type
!=
9
))
(
type
!=
2
&&
type
!=
9
))
...
@@ -694,7 +694,7 @@ void helper_ltr_T0(void)
...
@@ -694,7 +694,7 @@ void helper_ltr_T0(void)
raise_exception_err
(
EXCP0B_NOSEG
,
selector
&
0xfffc
);
raise_exception_err
(
EXCP0B_NOSEG
,
selector
&
0xfffc
);
load_seg_cache_raw_dt
(
&
env
->
tr
,
e1
,
e2
);
load_seg_cache_raw_dt
(
&
env
->
tr
,
e1
,
e2
);
e2
|=
0x00000200
;
/* set the busy bit */
e2
|=
0x00000200
;
/* set the busy bit */
stl
(
ptr
+
4
,
e2
);
stl
_kernel
(
ptr
+
4
,
e2
);
}
}
env
->
tr
.
selector
=
selector
;
env
->
tr
.
selector
=
selector
;
}
}
...
@@ -813,14 +813,14 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
...
@@ -813,14 +813,14 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
ssp
=
env
->
segs
[
R_SS
].
base
;
ssp
=
env
->
segs
[
R_SS
].
base
;
if
(
shift
)
{
if
(
shift
)
{
esp
-=
4
;
esp
-=
4
;
stl
(
ssp
+
(
esp
&
esp_mask
),
env
->
segs
[
R_CS
].
selector
);
stl
_kernel
(
ssp
+
(
esp
&
esp_mask
),
env
->
segs
[
R_CS
].
selector
);
esp
-=
4
;
esp
-=
4
;
stl
(
ssp
+
(
esp
&
esp_mask
),
next_eip
);
stl
_kernel
(
ssp
+
(
esp
&
esp_mask
),
next_eip
);
}
else
{
}
else
{
esp
-=
2
;
esp
-=
2
;
stw
(
ssp
+
(
esp
&
esp_mask
),
env
->
segs
[
R_CS
].
selector
);
stw
_kernel
(
ssp
+
(
esp
&
esp_mask
),
env
->
segs
[
R_CS
].
selector
);
esp
-=
2
;
esp
-=
2
;
stw
(
ssp
+
(
esp
&
esp_mask
),
next_eip
);
stw
_kernel
(
ssp
+
(
esp
&
esp_mask
),
next_eip
);
}
}
if
(
!
(
env
->
segs
[
R_SS
].
flags
&
DESC_B_MASK
))
if
(
!
(
env
->
segs
[
R_SS
].
flags
&
DESC_B_MASK
))
...
@@ -873,14 +873,14 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
...
@@ -873,14 +873,14 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
ssp
=
env
->
segs
[
R_SS
].
base
+
sp
;
ssp
=
env
->
segs
[
R_SS
].
base
+
sp
;
if
(
shift
)
{
if
(
shift
)
{
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
stl
_kernel
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
next_eip
);
stl
_kernel
(
ssp
,
next_eip
);
}
else
{
}
else
{
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
stw
_kernel
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
next_eip
);
stw
_kernel
(
ssp
,
next_eip
);
}
}
sp
-=
(
4
<<
shift
);
sp
-=
(
4
<<
shift
);
...
@@ -975,23 +975,23 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
...
@@ -975,23 +975,23 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
ssp
=
env
->
segs
[
R_SS
].
base
+
sp
;
ssp
=
env
->
segs
[
R_SS
].
base
+
sp
;
if
(
shift
)
{
if
(
shift
)
{
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
old_ss
);
stl
_kernel
(
ssp
,
old_ss
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
old_esp
);
stl
_kernel
(
ssp
,
old_esp
);
ssp
-=
4
*
param_count
;
ssp
-=
4
*
param_count
;
for
(
i
=
0
;
i
<
param_count
;
i
++
)
{
for
(
i
=
0
;
i
<
param_count
;
i
++
)
{
val
=
ldl
(
old_ssp
+
i
*
4
);
val
=
ldl
_kernel
(
old_ssp
+
i
*
4
);
stl
(
ssp
+
i
*
4
,
val
);
stl
_kernel
(
ssp
+
i
*
4
,
val
);
}
}
}
else
{
}
else
{
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
old_ss
);
stw
_kernel
(
ssp
,
old_ss
);
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
old_esp
);
stw
_kernel
(
ssp
,
old_esp
);
ssp
-=
2
*
param_count
;
ssp
-=
2
*
param_count
;
for
(
i
=
0
;
i
<
param_count
;
i
++
)
{
for
(
i
=
0
;
i
<
param_count
;
i
++
)
{
val
=
lduw
(
old_ssp
+
i
*
2
);
val
=
lduw
_kernel
(
old_ssp
+
i
*
2
);
stw
(
ssp
+
i
*
2
,
val
);
stw
_kernel
(
ssp
+
i
*
2
,
val
);
}
}
}
}
}
else
{
}
else
{
...
@@ -1004,14 +1004,14 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
...
@@ -1004,14 +1004,14 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
if
(
shift
)
{
if
(
shift
)
{
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
stl
_kernel
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
ssp
-=
4
;
ssp
-=
4
;
stl
(
ssp
,
next_eip
);
stl
_kernel
(
ssp
,
next_eip
);
}
else
{
}
else
{
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
stw
_kernel
(
ssp
,
env
->
segs
[
R_CS
].
selector
);
ssp
-=
2
;
ssp
-=
2
;
stw
(
ssp
,
next_eip
);
stw
_kernel
(
ssp
,
next_eip
);
}
}
sp
-=
push_size
;
sp
-=
push_size
;
...
@@ -1042,14 +1042,14 @@ void helper_iret_real(int shift)
...
@@ -1042,14 +1042,14 @@ void helper_iret_real(int shift)
ssp
=
env
->
segs
[
R_SS
].
base
+
sp
;
ssp
=
env
->
segs
[
R_SS
].
base
+
sp
;
if
(
shift
==
1
)
{
if
(
shift
==
1
)
{
/* 32 bits */
/* 32 bits */
new_eflags
=
ldl
(
ssp
+
8
);
new_eflags
=
ldl
_kernel
(
ssp
+
8
);
new_cs
=
ldl
(
ssp
+
4
)
&
0xffff
;
new_cs
=
ldl
_kernel
(
ssp
+
4
)
&
0xffff
;
new_eip
=
ldl
(
ssp
)
&
0xffff
;
new_eip
=
ldl
_kernel
(
ssp
)
&
0xffff
;
}
else
{
}
else
{
/* 16 bits */
/* 16 bits */
new_eflags
=
lduw
(
ssp
+
4
);
new_eflags
=
lduw
_kernel
(
ssp
+
4
);
new_cs
=
lduw
(
ssp
+
2
);
new_cs
=
lduw
_kernel
(
ssp
+
2
);
new_eip
=
lduw
(
ssp
);
new_eip
=
lduw
_kernel
(
ssp
);
}
}
new_esp
=
sp
+
(
6
<<
shift
);
new_esp
=
sp
+
(
6
<<
shift
);
ESP
=
(
ESP
&
0xffff0000
)
|
ESP
=
(
ESP
&
0xffff0000
)
|
...
@@ -1078,17 +1078,17 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
...
@@ -1078,17 +1078,17 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
if
(
shift
==
1
)
{
if
(
shift
==
1
)
{
/* 32 bits */
/* 32 bits */
if
(
is_iret
)
if
(
is_iret
)
new_eflags
=
ldl
(
ssp
+
8
);
new_eflags
=
ldl
_kernel
(
ssp
+
8
);
new_cs
=
ldl
(
ssp
+
4
)
&
0xffff
;
new_cs
=
ldl
_kernel
(
ssp
+
4
)
&
0xffff
;
new_eip
=
ldl
(
ssp
);
new_eip
=
ldl
_kernel
(
ssp
);
if
(
is_iret
&&
(
new_eflags
&
VM_MASK
))
if
(
is_iret
&&
(
new_eflags
&
VM_MASK
))
goto
return_to_vm86
;
goto
return_to_vm86
;
}
else
{
}
else
{
/* 16 bits */
/* 16 bits */
if
(
is_iret
)
if
(
is_iret
)
new_eflags
=
lduw
(
ssp
+
4
);
new_eflags
=
lduw
_kernel
(
ssp
+
4
);
new_cs
=
lduw
(
ssp
+
2
);
new_cs
=
lduw
_kernel
(
ssp
+
2
);
new_eip
=
lduw
(
ssp
);
new_eip
=
lduw
_kernel
(
ssp
);
}
}
if
((
new_cs
&
0xfffc
)
==
0
)
if
((
new_cs
&
0xfffc
)
==
0
)
raise_exception_err
(
EXCP0D_GPF
,
new_cs
&
0xfffc
);
raise_exception_err
(
EXCP0D_GPF
,
new_cs
&
0xfffc
);
...
@@ -1124,12 +1124,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
...
@@ -1124,12 +1124,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
ssp
+=
(
4
<<
shift
)
+
((
2
*
is_iret
)
<<
shift
)
+
addend
;
ssp
+=
(
4
<<
shift
)
+
((
2
*
is_iret
)
<<
shift
)
+
addend
;
if
(
shift
==
1
)
{
if
(
shift
==
1
)
{
/* 32 bits */
/* 32 bits */
new_esp
=
ldl
(
ssp
);
new_esp
=
ldl
_kernel
(
ssp
);
new_ss
=
ldl
(
ssp
+
4
)
&
0xffff
;
new_ss
=
ldl
_kernel
(
ssp
+
4
)
&
0xffff
;
}
else
{
}
else
{
/* 16 bits */
/* 16 bits */
new_esp
=
lduw
(
ssp
);
new_esp
=
lduw
_kernel
(
ssp
);
new_ss
=
lduw
(
ssp
+
2
);
new_ss
=
lduw
_kernel
(
ssp
+
2
);
}
}
if
((
new_ss
&
3
)
!=
rpl
)
if
((
new_ss
&
3
)
!=
rpl
)
...
@@ -1175,12 +1175,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
...
@@ -1175,12 +1175,12 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
return
;
return
;
return_to_vm86:
return_to_vm86:
new_esp
=
ldl
(
ssp
+
12
);
new_esp
=
ldl
_kernel
(
ssp
+
12
);
new_ss
=
ldl
(
ssp
+
16
);
new_ss
=
ldl
_kernel
(
ssp
+
16
);
new_es
=
ldl
(
ssp
+
20
);
new_es
=
ldl
_kernel
(
ssp
+
20
);
new_ds
=
ldl
(
ssp
+
24
);
new_ds
=
ldl
_kernel
(
ssp
+
24
);
new_fs
=
ldl
(
ssp
+
28
);
new_fs
=
ldl
_kernel
(
ssp
+
28
);
new_gs
=
ldl
(
ssp
+
32
);
new_gs
=
ldl
_kernel
(
ssp
+
32
);
/* modify processor state */
/* modify processor state */
load_eflags
(
new_eflags
,
FL_UPDATE_CPL0_MASK
|
VM_MASK
|
VIF_MASK
|
VIP_MASK
);
load_eflags
(
new_eflags
,
FL_UPDATE_CPL0_MASK
|
VM_MASK
|
VIF_MASK
|
VIP_MASK
);
...
@@ -1770,6 +1770,11 @@ void helper_frstor(uint8_t *ptr, int data32)
...
@@ -1770,6 +1770,11 @@ void helper_frstor(uint8_t *ptr, int data32)
}
}
}
}
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
#define SHIFT 0
#define SHIFT 0
#include "softmmu_template.h"
#include "softmmu_template.h"
...
@@ -1782,22 +1787,41 @@ void helper_frstor(uint8_t *ptr, int data32)
...
@@ -1782,22 +1787,41 @@ void helper_frstor(uint8_t *ptr, int data32)
#define SHIFT 3
#define SHIFT 3
#include "softmmu_template.h"
#include "softmmu_template.h"
/* try to fill the TLB and return an exception if error */
#endif
void
tlb_fill
(
unsigned
long
addr
,
int
is_write
,
void
*
retaddr
)
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
/* XXX: fix it to restore all registers */
void
tlb_fill
(
unsigned
long
addr
,
int
is_write
,
int
is_user
,
void
*
retaddr
)
{
{
TranslationBlock
*
tb
;
TranslationBlock
*
tb
;
int
ret
;
int
ret
;
unsigned
long
pc
;
unsigned
long
pc
;
ret
=
cpu_x86_handle_mmu_fault
(
env
,
addr
,
is_write
);
CPUX86State
*
saved_env
;
/* XXX: hack to restore env in all cases, even if not called from
generated code */
saved_env
=
env
;
env
=
cpu_single_env
;
if
(
is_write
&&
page_unprotect
(
addr
))
{
/* nothing more to do: the page was write protected because
there was code in it. page_unprotect() flushed the code. */
}
ret
=
cpu_x86_handle_mmu_fault
(
env
,
addr
,
is_write
,
is_user
,
1
);
if
(
ret
)
{
if
(
ret
)
{
/* now we have a real cpu fault */
if
(
retaddr
)
{
pc
=
(
unsigned
long
)
retaddr
;
/* now we have a real cpu fault */
tb
=
tb_find_pc
(
pc
);
pc
=
(
unsigned
long
)
retaddr
;
if
(
tb
)
{
tb
=
tb_find_pc
(
pc
);
/* the PC is inside the translated code. It means that we have
if
(
tb
)
{
a virtual CPU fault */
/* the PC is inside the translated code. It means that we have
cpu_restore_state
(
tb
,
env
,
pc
);
a virtual CPU fault */
cpu_restore_state
(
tb
,
env
,
pc
);
}
}
}
raise_exception_err
(
EXCP0E_PAGE
,
env
->
error_code
);
raise_exception_err
(
EXCP0E_PAGE
,
env
->
error_code
);
}
}
env
=
saved_env
;
}
}
target-i386/helper2.c
浏览文件 @
61382a50
...
@@ -210,7 +210,9 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
...
@@ -210,7 +210,9 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
flags
=
page_get_flags
(
addr
);
flags
=
page_get_flags
(
addr
);
if
(
flags
&
PAGE_VALID
)
{
if
(
flags
&
PAGE_VALID
)
{
virt_addr
=
addr
&
~
0xfff
;
virt_addr
=
addr
&
~
0xfff
;
#if !defined(CONFIG_SOFTMMU)
munmap
((
void
*
)
virt_addr
,
4096
);
munmap
((
void
*
)
virt_addr
,
4096
);
#endif
page_set_flags
(
virt_addr
,
virt_addr
+
4096
,
0
);
page_set_flags
(
virt_addr
,
virt_addr
+
4096
,
0
);
}
}
}
}
...
@@ -221,16 +223,14 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
...
@@ -221,16 +223,14 @@ void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
1 = generate PF fault
1 = generate PF fault
2 = soft MMU activation required for this block
2 = soft MMU activation required for this block
*/
*/
int
cpu_x86_handle_mmu_fault
(
CPUX86State
*
env
,
uint32_t
addr
,
int
is_write
)
int
cpu_x86_handle_mmu_fault
(
CPUX86State
*
env
,
uint32_t
addr
,
int
is_write
,
int
is_user
,
int
is_softmmu
)
{
{
uint8_t
*
pde_ptr
,
*
pte_ptr
;
uint8_t
*
pde_ptr
,
*
pte_ptr
;
uint32_t
pde
,
pte
,
virt_addr
;
uint32_t
pde
,
pte
,
virt_addr
;
int
cpl
,
error_code
,
is_dirty
,
is_user
,
prot
,
page_size
,
ret
;
int
error_code
,
is_dirty
,
prot
,
page_size
,
ret
;
unsigned
long
pd
;
unsigned
long
pd
;
cpl
=
env
->
hflags
&
HF_CPL_MASK
;
is_user
=
(
cpl
==
3
);
#ifdef DEBUG_MMU
#ifdef DEBUG_MMU
printf
(
"MMU fault: addr=0x%08x w=%d u=%d eip=%08x
\n
"
,
printf
(
"MMU fault: addr=0x%08x w=%d u=%d eip=%08x
\n
"
,
addr
,
is_write
,
is_user
,
env
->
eip
);
addr
,
is_write
,
is_user
,
env
->
eip
);
...
@@ -252,7 +252,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
...
@@ -252,7 +252,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
/* page directory entry */
/* page directory entry */
pde_ptr
=
phys_ram_base
+
((
env
->
cr
[
3
]
&
~
0xfff
)
+
((
addr
>>
20
)
&
~
3
));
pde_ptr
=
phys_ram_base
+
((
env
->
cr
[
3
]
&
~
0xfff
)
+
((
addr
>>
20
)
&
~
3
));
pde
=
ldl
(
pde_ptr
);
pde
=
ldl
_raw
(
pde_ptr
);
if
(
!
(
pde
&
PG_PRESENT_MASK
))
{
if
(
!
(
pde
&
PG_PRESENT_MASK
))
{
error_code
=
0
;
error_code
=
0
;
goto
do_fault
;
goto
do_fault
;
...
@@ -274,7 +274,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
...
@@ -274,7 +274,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
pde
|=
PG_ACCESSED_MASK
;
pde
|=
PG_ACCESSED_MASK
;
if
(
is_dirty
)
if
(
is_dirty
)
pde
|=
PG_DIRTY_MASK
;
pde
|=
PG_DIRTY_MASK
;
stl
(
pde_ptr
,
pde
);
stl
_raw
(
pde_ptr
,
pde
);
}
}
pte
=
pde
&
~
0x003ff000
;
/* align to 4MB */
pte
=
pde
&
~
0x003ff000
;
/* align to 4MB */
...
@@ -283,12 +283,12 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
...
@@ -283,12 +283,12 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
}
else
{
}
else
{
if
(
!
(
pde
&
PG_ACCESSED_MASK
))
{
if
(
!
(
pde
&
PG_ACCESSED_MASK
))
{
pde
|=
PG_ACCESSED_MASK
;
pde
|=
PG_ACCESSED_MASK
;
stl
(
pde_ptr
,
pde
);
stl
_raw
(
pde_ptr
,
pde
);
}
}
/* page directory entry */
/* page directory entry */
pte_ptr
=
phys_ram_base
+
((
pde
&
~
0xfff
)
+
((
addr
>>
10
)
&
0xffc
));
pte_ptr
=
phys_ram_base
+
((
pde
&
~
0xfff
)
+
((
addr
>>
10
)
&
0xffc
));
pte
=
ldl
(
pte_ptr
);
pte
=
ldl
_raw
(
pte_ptr
);
if
(
!
(
pte
&
PG_PRESENT_MASK
))
{
if
(
!
(
pte
&
PG_PRESENT_MASK
))
{
error_code
=
0
;
error_code
=
0
;
goto
do_fault
;
goto
do_fault
;
...
@@ -308,7 +308,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
...
@@ -308,7 +308,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
pte
|=
PG_ACCESSED_MASK
;
pte
|=
PG_ACCESSED_MASK
;
if
(
is_dirty
)
if
(
is_dirty
)
pte
|=
PG_DIRTY_MASK
;
pte
|=
PG_DIRTY_MASK
;
stl
(
pte_ptr
,
pte
);
stl
_raw
(
pte_ptr
,
pte
);
}
}
page_size
=
4096
;
page_size
=
4096
;
virt_addr
=
addr
&
~
0xfff
;
virt_addr
=
addr
&
~
0xfff
;
...
@@ -325,7 +325,10 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
...
@@ -325,7 +325,10 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
}
}
do_mapping:
do_mapping:
if
(
env
->
hflags
&
HF_SOFTMMU_MASK
)
{
#if !defined(CONFIG_SOFTMMU)
if
(
is_softmmu
)
#endif
{
unsigned
long
paddr
,
vaddr
,
address
,
addend
,
page_offset
;
unsigned
long
paddr
,
vaddr
,
address
,
addend
,
page_offset
;
int
index
;
int
index
;
...
@@ -352,32 +355,39 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
...
@@ -352,32 +355,39 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
env
->
tlb_write
[
is_user
][
index
].
address
=
address
;
env
->
tlb_write
[
is_user
][
index
].
address
=
address
;
env
->
tlb_write
[
is_user
][
index
].
addend
=
addend
;
env
->
tlb_write
[
is_user
][
index
].
addend
=
addend
;
}
}
page_set_flags
(
vaddr
,
vaddr
+
TARGET_PAGE_SIZE
,
PAGE_VALID
|
PAGE_EXEC
|
prot
);
ret
=
0
;
}
}
ret
=
0
;
#if !defined(CONFIG_SOFTMMU)
/* XXX: incorrect for 4MB pages */
else
{
pd
=
physpage_find
(
pte
&
~
0xfff
);
ret
=
0
;
if
((
pd
&
0xfff
)
!=
0
)
{
/* XXX: incorrect for 4MB pages */
/* IO access: no mapping is done as it will be handled by the
pd
=
physpage_find
(
pte
&
~
0xfff
);
soft MMU */
if
((
pd
&
0xfff
)
!=
0
)
{
if
(
!
(
env
->
hflags
&
HF_SOFTMMU_MASK
))
/* IO access: no mapping is done as it will be handled by the
ret
=
2
;
soft MMU */
}
else
{
if
(
!
(
env
->
hflags
&
HF_SOFTMMU_MASK
))
void
*
map_addr
;
ret
=
2
;
map_addr
=
mmap
((
void
*
)
virt_addr
,
page_size
,
prot
,
}
else
{
MAP_SHARED
|
MAP_FIXED
,
phys_ram_fd
,
pd
);
void
*
map_addr
;
if
(
map_addr
==
MAP_FAILED
)
{
map_addr
=
mmap
((
void
*
)
virt_addr
,
page_size
,
prot
,
fprintf
(
stderr
,
MAP_SHARED
|
MAP_FIXED
,
phys_ram_fd
,
pd
);
"mmap failed when mapped physical address 0x%08x to virtual address 0x%08x
\n
"
,
if
(
map_addr
==
MAP_FAILED
)
{
pte
&
~
0xfff
,
virt_addr
);
fprintf
(
stderr
,
exit
(
1
);
"mmap failed when mapped physical address 0x%08x to virtual address 0x%08x
\n
"
,
}
pte
&
~
0xfff
,
virt_addr
);
exit
(
1
);
}
#ifdef DEBUG_MMU
#ifdef DEBUG_MMU
printf
(
"mmaping 0x%08x to virt 0x%08x pse=%d
\n
"
,
printf
(
"mmaping 0x%08x to virt 0x%08x pse=%d
\n
"
,
pte
&
~
0xfff
,
virt_addr
,
(
page_size
!=
4096
));
pte
&
~
0xfff
,
virt_addr
,
(
page_size
!=
4096
));
#endif
#endif
page_set_flags
(
virt_addr
,
virt_addr
+
page_size
,
page_set_flags
(
virt_addr
,
virt_addr
+
page_size
,
PAGE_VALID
|
PAGE_EXEC
|
prot
);
PAGE_VALID
|
PAGE_EXEC
|
prot
);
}
}
}
#endif
return
ret
;
return
ret
;
do_fault_protect:
do_fault_protect:
error_code
=
PG_ERROR_P_MASK
;
error_code
=
PG_ERROR_P_MASK
;
...
...
target-i386/op.c
浏览文件 @
61382a50
...
@@ -376,14 +376,16 @@ void OPPROTO op_andl_A0_ffff(void)
...
@@ -376,14 +376,16 @@ void OPPROTO op_andl_A0_ffff(void)
/* memory access */
/* memory access */
#define MEMSUFFIX
#define MEMSUFFIX
_raw
#include "ops_mem.h"
#include "ops_mem.h"
#if !defined(CONFIG_USER_ONLY)
#define MEMSUFFIX _user
#define MEMSUFFIX _user
#include "ops_mem.h"
#include "ops_mem.h"
#define MEMSUFFIX _kernel
#define MEMSUFFIX _kernel
#include "ops_mem.h"
#include "ops_mem.h"
#endif
/* used for bit operations */
/* used for bit operations */
...
...
target-i386/translate.c
浏览文件 @
61382a50
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录