Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
int
Rust
提交
7c5ea621
R
Rust
项目概览
int
/
Rust
接近 1 年 前同步成功
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
Rust
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
7c5ea621
编写于
3月 05, 2016
作者:
S
Scott Olson
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Move memory module to its own file.
上级
a29a6e0d
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
163 addition
and
164 deletion
+163
-164
src/interpreter.rs
src/interpreter.rs
+2
-164
src/lib.rs
src/lib.rs
+1
-0
src/memory.rs
src/memory.rs
+160
-0
未找到文件。
src/interpreter.rs
浏览文件 @
7c5ea621
...
...
@@ -16,171 +16,9 @@
use
syntax
::
ast
::
Attribute
;
use
syntax
::
attr
::
AttrMetaMethods
;
const
TRACE_EXECUTION
:
bool
=
true
;
mod
memory
{
use
byteorder
;
use
byteorder
::
ByteOrder
;
use
rustc
::
middle
::
ty
;
use
std
::
collections
::
HashMap
;
use
std
::
mem
;
use
std
::
ops
::
Add
;
use
std
::
ptr
;
use
super
::{
EvalError
,
EvalResult
};
pub
struct
Memory
{
next_id
:
u64
,
alloc_map
:
HashMap
<
u64
,
Allocation
>
,
}
#[derive(Copy,
Clone,
Debug,
Eq,
PartialEq)]
pub
struct
AllocId
(
u64
);
// TODO(tsion): Shouldn't clone Allocation. (Audit the rest of the code.)
#[derive(Clone,
Debug)]
pub
struct
Allocation
{
pub
bytes
:
Vec
<
u8
>
,
// TODO(tsion): relocations
// TODO(tsion): undef mask
}
#[derive(Clone,
Debug,
PartialEq,
Eq)]
pub
struct
Pointer
{
pub
alloc_id
:
AllocId
,
pub
offset
:
usize
,
pub
repr
:
Repr
,
}
#[derive(Clone,
Debug,
PartialEq,
Eq)]
pub
struct
FieldRepr
{
pub
offset
:
usize
,
pub
repr
:
Repr
,
}
#[derive(Clone,
Debug,
PartialEq,
Eq)]
pub
enum
Repr
{
Int
,
Aggregate
{
size
:
usize
,
fields
:
Vec
<
FieldRepr
>
,
},
}
impl
Memory
{
pub
fn
new
()
->
Self
{
Memory
{
next_id
:
0
,
alloc_map
:
HashMap
::
new
()
}
}
pub
fn
allocate_raw
(
&
mut
self
,
size
:
usize
)
->
AllocId
{
let
id
=
AllocId
(
self
.next_id
);
let
alloc
=
Allocation
{
bytes
:
vec!
[
0
;
size
]
};
self
.alloc_map
.insert
(
self
.next_id
,
alloc
);
self
.next_id
+=
1
;
id
}
pub
fn
allocate
(
&
mut
self
,
repr
:
Repr
)
->
Pointer
{
Pointer
{
alloc_id
:
self
.allocate_raw
(
repr
.size
()),
offset
:
0
,
repr
:
repr
,
}
}
pub
fn
get
(
&
self
,
id
:
AllocId
)
->
EvalResult
<&
Allocation
>
{
self
.alloc_map
.get
(
&
id
.0
)
.ok_or
(
EvalError
::
DanglingPointerDeref
)
}
pub
fn
get_mut
(
&
mut
self
,
id
:
AllocId
)
->
EvalResult
<&
mut
Allocation
>
{
self
.alloc_map
.get_mut
(
&
id
.0
)
.ok_or
(
EvalError
::
DanglingPointerDeref
)
}
fn
get_bytes
(
&
self
,
ptr
:
&
Pointer
,
size
:
usize
)
->
EvalResult
<&
[
u8
]
>
{
let
alloc
=
try!
(
self
.get
(
ptr
.alloc_id
));
try!
(
alloc
.check_bytes
(
ptr
.offset
,
ptr
.offset
+
size
));
Ok
(
&
alloc
.bytes
[
ptr
.offset
..
ptr
.offset
+
size
])
}
fn
get_bytes_mut
(
&
mut
self
,
ptr
:
&
Pointer
,
size
:
usize
)
->
EvalResult
<&
mut
[
u8
]
>
{
let
alloc
=
try!
(
self
.get_mut
(
ptr
.alloc_id
));
try!
(
alloc
.check_bytes
(
ptr
.offset
,
ptr
.offset
+
size
));
Ok
(
&
mut
alloc
.bytes
[
ptr
.offset
..
ptr
.offset
+
size
])
}
pub
fn
copy
(
&
mut
self
,
src
:
&
Pointer
,
dest
:
&
Pointer
,
size
:
usize
)
->
EvalResult
<
()
>
{
let
src_bytes
=
try!
(
self
.get_bytes_mut
(
src
,
size
))
.as_mut_ptr
();
let
dest_bytes
=
try!
(
self
.get_bytes_mut
(
dest
,
size
))
.as_mut_ptr
();
use
memory
::{
self
,
Pointer
,
Repr
,
Allocation
};
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe
{
if
src
.alloc_id
==
dest
.alloc_id
{
ptr
::
copy
(
src_bytes
,
dest_bytes
,
size
);
}
else
{
ptr
::
copy_nonoverlapping
(
src_bytes
,
dest_bytes
,
size
);
}
}
Ok
(())
}
pub
fn
read_int
(
&
self
,
ptr
:
&
Pointer
)
->
EvalResult
<
i64
>
{
let
bytes
=
try!
(
self
.get_bytes
(
ptr
,
Repr
::
Int
.size
()));
Ok
(
byteorder
::
NativeEndian
::
read_i64
(
bytes
))
}
pub
fn
write_int
(
&
mut
self
,
ptr
:
&
Pointer
,
n
:
i64
)
->
EvalResult
<
()
>
{
let
bytes
=
try!
(
self
.get_bytes_mut
(
ptr
,
Repr
::
Int
.size
()));
Ok
(
byteorder
::
NativeEndian
::
write_i64
(
bytes
,
n
))
}
}
impl
Allocation
{
fn
check_bytes
(
&
self
,
start
:
usize
,
end
:
usize
)
->
EvalResult
<
()
>
{
if
start
>=
self
.bytes
.len
()
||
end
>
self
.bytes
.len
()
{
return
Err
(
EvalError
::
PointerOutOfBounds
);
}
Ok
(())
}
}
impl
Pointer
{
pub
fn
offset
(
&
self
,
i
:
usize
)
->
Self
{
Pointer
{
offset
:
self
.offset
+
i
,
..
self
.clone
()
}
}
}
impl
Repr
{
// TODO(tsion): Cache these outputs.
pub
fn
from_ty
(
ty
:
ty
::
Ty
)
->
Self
{
match
ty
.sty
{
ty
::
TyInt
(
_
)
=>
Repr
::
Int
,
ty
::
TyTuple
(
ref
fields
)
=>
{
let
mut
size
=
0
;
let
fields
=
fields
.iter
()
.map
(|
ty
|
{
let
repr
=
Repr
::
from_ty
(
ty
);
let
old_size
=
size
;
size
+=
repr
.size
();
FieldRepr
{
offset
:
old_size
,
repr
:
repr
}
})
.collect
();
Repr
::
Aggregate
{
size
:
size
,
fields
:
fields
}
},
_
=>
unimplemented!
(),
}
}
pub
fn
size
(
&
self
)
->
usize
{
match
*
self
{
Repr
::
Int
=>
mem
::
size_of
::
<
i64
>
(),
Repr
::
Aggregate
{
size
,
..
}
=>
size
,
}
}
}
}
use
self
::
memory
::{
Pointer
,
Repr
,
Allocation
};
const
TRACE_EXECUTION
:
bool
=
true
;
#[derive(Clone,
Debug)]
pub
enum
EvalError
{
...
...
src/lib.rs
浏览文件 @
7c5ea621
...
...
@@ -6,3 +6,4 @@
extern
crate
syntax
;
pub
mod
interpreter
;
mod
memory
;
src/memory.rs
0 → 100644
浏览文件 @
7c5ea621
use
byteorder
;
use
byteorder
::
ByteOrder
;
use
rustc
::
middle
::
ty
;
use
std
::
collections
::
HashMap
;
use
std
::
mem
;
use
std
::
ptr
;
use
interpreter
::{
EvalError
,
EvalResult
};
pub
struct
Memory
{
next_id
:
u64
,
alloc_map
:
HashMap
<
u64
,
Allocation
>
,
}
#[derive(Copy,
Clone,
Debug,
Eq,
PartialEq)]
pub
struct
AllocId
(
u64
);
// TODO(tsion): Shouldn't clone Allocation. (Audit the rest of the code.)
#[derive(Clone,
Debug)]
pub
struct
Allocation
{
pub
bytes
:
Vec
<
u8
>
,
// TODO(tsion): relocations
// TODO(tsion): undef mask
}
#[derive(Clone,
Debug,
PartialEq,
Eq)]
pub
struct
Pointer
{
pub
alloc_id
:
AllocId
,
pub
offset
:
usize
,
pub
repr
:
Repr
,
}
#[derive(Clone,
Debug,
PartialEq,
Eq)]
pub
struct
FieldRepr
{
pub
offset
:
usize
,
pub
repr
:
Repr
,
}
#[derive(Clone,
Debug,
PartialEq,
Eq)]
pub
enum
Repr
{
Int
,
Aggregate
{
size
:
usize
,
fields
:
Vec
<
FieldRepr
>
,
},
}
impl
Memory
{
pub
fn
new
()
->
Self
{
Memory
{
next_id
:
0
,
alloc_map
:
HashMap
::
new
()
}
}
pub
fn
allocate_raw
(
&
mut
self
,
size
:
usize
)
->
AllocId
{
let
id
=
AllocId
(
self
.next_id
);
let
alloc
=
Allocation
{
bytes
:
vec!
[
0
;
size
]
};
self
.alloc_map
.insert
(
self
.next_id
,
alloc
);
self
.next_id
+=
1
;
id
}
pub
fn
allocate
(
&
mut
self
,
repr
:
Repr
)
->
Pointer
{
Pointer
{
alloc_id
:
self
.allocate_raw
(
repr
.size
()),
offset
:
0
,
repr
:
repr
,
}
}
pub
fn
get
(
&
self
,
id
:
AllocId
)
->
EvalResult
<&
Allocation
>
{
self
.alloc_map
.get
(
&
id
.0
)
.ok_or
(
EvalError
::
DanglingPointerDeref
)
}
pub
fn
get_mut
(
&
mut
self
,
id
:
AllocId
)
->
EvalResult
<&
mut
Allocation
>
{
self
.alloc_map
.get_mut
(
&
id
.0
)
.ok_or
(
EvalError
::
DanglingPointerDeref
)
}
fn
get_bytes
(
&
self
,
ptr
:
&
Pointer
,
size
:
usize
)
->
EvalResult
<&
[
u8
]
>
{
let
alloc
=
try!
(
self
.get
(
ptr
.alloc_id
));
try!
(
alloc
.check_bytes
(
ptr
.offset
,
ptr
.offset
+
size
));
Ok
(
&
alloc
.bytes
[
ptr
.offset
..
ptr
.offset
+
size
])
}
fn
get_bytes_mut
(
&
mut
self
,
ptr
:
&
Pointer
,
size
:
usize
)
->
EvalResult
<&
mut
[
u8
]
>
{
let
alloc
=
try!
(
self
.get_mut
(
ptr
.alloc_id
));
try!
(
alloc
.check_bytes
(
ptr
.offset
,
ptr
.offset
+
size
));
Ok
(
&
mut
alloc
.bytes
[
ptr
.offset
..
ptr
.offset
+
size
])
}
pub
fn
copy
(
&
mut
self
,
src
:
&
Pointer
,
dest
:
&
Pointer
,
size
:
usize
)
->
EvalResult
<
()
>
{
let
src_bytes
=
try!
(
self
.get_bytes_mut
(
src
,
size
))
.as_mut_ptr
();
let
dest_bytes
=
try!
(
self
.get_bytes_mut
(
dest
,
size
))
.as_mut_ptr
();
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe
{
if
src
.alloc_id
==
dest
.alloc_id
{
ptr
::
copy
(
src_bytes
,
dest_bytes
,
size
);
}
else
{
ptr
::
copy_nonoverlapping
(
src_bytes
,
dest_bytes
,
size
);
}
}
Ok
(())
}
pub
fn
read_int
(
&
self
,
ptr
:
&
Pointer
)
->
EvalResult
<
i64
>
{
let
bytes
=
try!
(
self
.get_bytes
(
ptr
,
Repr
::
Int
.size
()));
Ok
(
byteorder
::
NativeEndian
::
read_i64
(
bytes
))
}
pub
fn
write_int
(
&
mut
self
,
ptr
:
&
Pointer
,
n
:
i64
)
->
EvalResult
<
()
>
{
let
bytes
=
try!
(
self
.get_bytes_mut
(
ptr
,
Repr
::
Int
.size
()));
Ok
(
byteorder
::
NativeEndian
::
write_i64
(
bytes
,
n
))
}
}
impl
Allocation
{
fn
check_bytes
(
&
self
,
start
:
usize
,
end
:
usize
)
->
EvalResult
<
()
>
{
if
start
>=
self
.bytes
.len
()
||
end
>
self
.bytes
.len
()
{
return
Err
(
EvalError
::
PointerOutOfBounds
);
}
Ok
(())
}
}
impl
Pointer
{
pub
fn
offset
(
&
self
,
i
:
usize
)
->
Self
{
Pointer
{
offset
:
self
.offset
+
i
,
..
self
.clone
()
}
}
}
impl
Repr
{
// TODO(tsion): Cache these outputs.
pub
fn
from_ty
(
ty
:
ty
::
Ty
)
->
Self
{
match
ty
.sty
{
ty
::
TyInt
(
_
)
=>
Repr
::
Int
,
ty
::
TyTuple
(
ref
fields
)
=>
{
let
mut
size
=
0
;
let
fields
=
fields
.iter
()
.map
(|
ty
|
{
let
repr
=
Repr
::
from_ty
(
ty
);
let
old_size
=
size
;
size
+=
repr
.size
();
FieldRepr
{
offset
:
old_size
,
repr
:
repr
}
})
.collect
();
Repr
::
Aggregate
{
size
:
size
,
fields
:
fields
}
},
_
=>
unimplemented!
(),
}
}
pub
fn
size
(
&
self
)
->
usize
{
match
*
self
{
Repr
::
Int
=>
mem
::
size_of
::
<
i64
>
(),
Repr
::
Aggregate
{
size
,
..
}
=>
size
,
}
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录