Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
int
Rust
提交
bf7d4534
R
Rust
项目概览
int
/
Rust
11 个月 前同步成功
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
Rust
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
bf7d4534
编写于
12月 10, 2016
作者:
M
Mark-Simulacrum
提交者:
Mark Simulacrum
12月 20, 2016
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Refactor Block into BlockAndBuilder
上级
164619a8
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
1012 addition
and
1448 deletion
+1012
-1448
src/librustc_trans/adt.rs
src/librustc_trans/adt.rs
+19
-24
src/librustc_trans/asm.rs
src/librustc_trans/asm.rs
+1
-1
src/librustc_trans/base.rs
src/librustc_trans/base.rs
+81
-99
src/librustc_trans/build.rs
src/librustc_trans/build.rs
+473
-884
src/librustc_trans/callee.rs
src/librustc_trans/callee.rs
+28
-26
src/librustc_trans/cleanup.rs
src/librustc_trans/cleanup.rs
+38
-38
src/librustc_trans/common.rs
src/librustc_trans/common.rs
+25
-21
src/librustc_trans/debuginfo/mod.rs
src/librustc_trans/debuginfo/mod.rs
+5
-5
src/librustc_trans/glue.rs
src/librustc_trans/glue.rs
+84
-86
src/librustc_trans/intrinsic.rs
src/librustc_trans/intrinsic.rs
+79
-104
src/librustc_trans/meth.rs
src/librustc_trans/meth.rs
+3
-3
src/librustc_trans/mir/block.rs
src/librustc_trans/mir/block.rs
+80
-41
src/librustc_trans/mir/lvalue.rs
src/librustc_trans/mir/lvalue.rs
+1
-1
src/librustc_trans/mir/mod.rs
src/librustc_trans/mir/mod.rs
+10
-16
src/librustc_trans/mir/operand.rs
src/librustc_trans/mir/operand.rs
+3
-3
src/librustc_trans/mir/rvalue.rs
src/librustc_trans/mir/rvalue.rs
+54
-72
src/librustc_trans/mir/statement.rs
src/librustc_trans/mir/statement.rs
+4
-6
src/librustc_trans/tvec.rs
src/librustc_trans/tvec.rs
+21
-15
src/librustc_trans/value.rs
src/librustc_trans/value.rs
+3
-3
未找到文件。
src/librustc_trans/adt.rs
浏览文件 @
bf7d4534
...
...
@@ -304,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
/// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant.
pub
fn
trans_switch
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
trans_switch
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
scrutinee
:
ValueRef
,
range_assert
:
bool
)
...
...
@@ -331,7 +331,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
}
/// Obtain the actual discriminant of a value.
pub
fn
trans_get_discr
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
pub
fn
trans_get_discr
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
scrutinee
:
ValueRef
,
cast_to
:
Option
<
Type
>
,
range_assert
:
bool
)
->
ValueRef
{
...
...
@@ -371,8 +371,12 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
}
}
fn
struct_wrapped_nullable_bitdiscr
(
bcx
:
Block
,
nndiscr
:
u64
,
discrfield
:
&
layout
::
FieldPath
,
scrutinee
:
ValueRef
)
->
ValueRef
{
fn
struct_wrapped_nullable_bitdiscr
(
bcx
:
&
BlockAndBuilder
,
nndiscr
:
u64
,
discrfield
:
&
layout
::
FieldPath
,
scrutinee
:
ValueRef
)
->
ValueRef
{
let
llptrptr
=
GEPi
(
bcx
,
scrutinee
,
&
discrfield
.iter
()
.map
(|
f
|
*
f
as
usize
)
.collect
::
<
Vec
<
_
>>
()[
..
]);
let
llptr
=
Load
(
bcx
,
llptrptr
);
...
...
@@ -381,7 +385,7 @@ fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layou
}
/// Helper for cases where the discriminant is simply loaded.
fn
load_discr
(
bcx
:
Block
,
ity
:
layout
::
Integer
,
ptr
:
ValueRef
,
min
:
u64
,
max
:
u64
,
fn
load_discr
(
bcx
:
&
BlockAndBuilder
,
ity
:
layout
::
Integer
,
ptr
:
ValueRef
,
min
:
u64
,
max
:
u64
,
range_assert
:
bool
)
->
ValueRef
{
let
llty
=
Type
::
from_integer
(
bcx
.ccx
(),
ity
);
...
...
@@ -409,7 +413,7 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6
/// discriminant-like value returned by `trans_switch`.
///
/// This should ideally be less tightly tied to `_match`.
pub
fn
trans_case
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
value
:
Disr
)
pub
fn
trans_case
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
value
:
Disr
)
->
ValueRef
{
let
l
=
bcx
.ccx
()
.layout_of
(
t
);
match
*
l
{
...
...
@@ -430,7 +434,7 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub
fn
trans_set_discr
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
pub
fn
trans_set_discr
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
val
:
ValueRef
,
to
:
Disr
)
{
let
l
=
bcx
.ccx
()
.layout_of
(
t
);
match
*
l
{
...
...
@@ -461,12 +465,11 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
// Issue #34427: As workaround for LLVM bug on
// ARM, use memset of 0 on whole struct rather
// than storing null to single target field.
let
b
=
B
(
bcx
);
let
llptr
=
b
.pointercast
(
val
,
Type
::
i8
(
b
.ccx
)
.ptr_to
());
let
fill_byte
=
C_u8
(
b
.ccx
,
0
);
let
size
=
C_uint
(
b
.ccx
,
nonnull
.stride
()
.bytes
());
let
align
=
C_i32
(
b
.ccx
,
nonnull
.align
.abi
()
as
i32
);
base
::
call_memset
(
&
b
,
llptr
,
fill_byte
,
size
,
align
,
false
);
let
llptr
=
bcx
.pointercast
(
val
,
Type
::
i8
(
bcx
.ccx
())
.ptr_to
());
let
fill_byte
=
C_u8
(
bcx
.ccx
(),
0
);
let
size
=
C_uint
(
bcx
.ccx
(),
nonnull
.stride
()
.bytes
());
let
align
=
C_i32
(
bcx
.ccx
(),
nonnull
.align
.abi
()
as
i32
);
base
::
call_memset
(
bcx
,
llptr
,
fill_byte
,
size
,
align
,
false
);
}
else
{
let
path
=
discrfield
.iter
()
.map
(|
&
i
|
i
as
usize
)
.collect
::
<
Vec
<
_
>>
();
let
llptrptr
=
GEPi
(
bcx
,
val
,
&
path
[
..
]);
...
...
@@ -479,7 +482,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
}
}
fn
target_sets_discr_via_memset
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
)
->
bool
{
fn
target_sets_discr_via_memset
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
)
->
bool
{
bcx
.sess
()
.target.target.arch
==
"arm"
||
bcx
.sess
()
.target.target.arch
==
"aarch64"
}
...
...
@@ -492,9 +495,9 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
}
/// Access a field, at a point when the value's case is known.
pub
fn
trans_field_ptr
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
pub
fn
trans_field_ptr
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
val
:
MaybeSizedValue
,
discr
:
Disr
,
ix
:
usize
)
->
ValueRef
{
trans_field_ptr_builder
(
&
bcx
.build
()
,
t
,
val
,
discr
,
ix
)
trans_field_ptr_builder
(
bcx
,
t
,
val
,
discr
,
ix
)
}
/// Access a field, at a point when the value's case is known.
...
...
@@ -530,7 +533,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
layout
::
UntaggedUnion
{
..
}
=>
{
let
fields
=
compute_fields
(
bcx
.ccx
(),
t
,
0
,
false
);
let
ty
=
type_of
::
in_memory_type_of
(
bcx
.ccx
(),
fields
[
ix
]);
if
bcx
.is_unreachable
()
{
return
C_undef
(
ty
.ptr_to
());
}
bcx
.pointercast
(
val
.value
,
ty
.ptr_to
())
}
layout
::
RawNullablePointer
{
nndiscr
,
..
}
|
...
...
@@ -540,9 +542,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// (e.d., Result of Either with (), as one side.)
let
ty
=
type_of
::
type_of
(
bcx
.ccx
(),
nullfields
[
ix
]);
assert_eq!
(
machine
::
llsize_of_alloc
(
bcx
.ccx
(),
ty
),
0
);
// The contents of memory at this pointer can't matter, but use
// the value that's "reasonable" in case of pointer comparison.
if
bcx
.is_unreachable
()
{
return
C_undef
(
ty
.ptr_to
());
}
bcx
.pointercast
(
val
.value
,
ty
.ptr_to
())
}
layout
::
RawNullablePointer
{
nndiscr
,
..
}
=>
{
...
...
@@ -550,7 +549,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
assert_eq!
(
ix
,
0
);
assert_eq!
(
discr
.0
,
nndiscr
);
let
ty
=
type_of
::
type_of
(
bcx
.ccx
(),
nnty
);
if
bcx
.is_unreachable
()
{
return
C_undef
(
ty
.ptr_to
());
}
bcx
.pointercast
(
val
.value
,
ty
.ptr_to
())
}
layout
::
StructWrappedNullablePointer
{
ref
nonnull
,
nndiscr
,
..
}
=>
{
...
...
@@ -569,9 +567,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
let
fty
=
fields
[
ix
];
let
ccx
=
bcx
.ccx
();
let
ll_fty
=
type_of
::
in_memory_type_of
(
bcx
.ccx
(),
fty
);
if
bcx
.is_unreachable
()
{
return
C_undef
(
ll_fty
.ptr_to
());
}
let
ptr_val
=
if
needs_cast
{
let
fields
=
st
.field_index_by_increasing_offset
()
.map
(|
i
|
{
...
...
src/librustc_trans/asm.rs
浏览文件 @
bf7d4534
...
...
@@ -25,7 +25,7 @@
use
libc
::{
c_uint
,
c_char
};
// Take an inline assembly expression and splat it out via LLVM
pub
fn
trans_inline_asm
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
trans_inline_asm
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ia
:
&
hir
::
InlineAsm
,
outputs
:
Vec
<
(
ValueRef
,
Ty
<
'tcx
>
)
>
,
mut
inputs
:
Vec
<
ValueRef
>
)
{
...
...
src/librustc_trans/base.rs
浏览文件 @
bf7d4534
...
...
@@ -54,11 +54,10 @@
use
build
::
*
;
use
builder
::{
Builder
,
noname
};
use
callee
::{
Callee
};
use
common
::{
Block
,
C_bool
,
C_bytes_in_context
,
C_i32
,
C_uint
};
use
common
::{
Block
AndBuilder
,
C_bool
,
C_bytes_in_context
,
C_i32
,
C_uint
};
use
collector
::{
self
,
TransItemCollectionMode
};
use
common
::{
C_null
,
C_struct_in_context
,
C_u64
,
C_u8
,
C_undef
};
use
common
::{
CrateContext
,
FunctionContext
};
use
common
::{
Result
};
use
common
::{
fulfill_obligation
};
use
common
::{
type_is_zero_size
,
val_ty
};
use
common
;
...
...
@@ -174,11 +173,11 @@ fn drop(&mut self) {
}
}
pub
fn
get_meta
(
bcx
:
Block
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
pub
fn
get_meta
(
bcx
:
&
BlockAndBuilder
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
StructGEP
(
bcx
,
fat_ptr
,
abi
::
FAT_PTR_EXTRA
)
}
pub
fn
get_dataptr
(
bcx
:
Block
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
pub
fn
get_dataptr
(
bcx
:
&
BlockAndBuilder
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
StructGEP
(
bcx
,
fat_ptr
,
abi
::
FAT_PTR_ADDR
)
}
...
...
@@ -190,7 +189,9 @@ pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
b
.struct_gep
(
fat_ptr
,
abi
::
FAT_PTR_ADDR
)
}
fn
require_alloc_fn
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
info_ty
:
Ty
<
'tcx
>
,
it
:
LangItem
)
->
DefId
{
fn
require_alloc_fn
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
info_ty
:
Ty
<
'tcx
>
,
it
:
LangItem
)
->
DefId
{
match
bcx
.tcx
()
.lang_items
.require
(
it
)
{
Ok
(
id
)
=>
id
,
Err
(
s
)
=>
{
...
...
@@ -202,21 +203,19 @@ fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: L
// The following malloc_raw_dyn* functions allocate a box to contain
// a given type, but with a potentially dynamic size.
pub
fn
malloc_raw_dyn
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
malloc_raw_dyn
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
llty_ptr
:
Type
,
info_ty
:
Ty
<
'tcx
>
,
size
:
ValueRef
,
align
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
Result
<
'blk
,
'tcx
>
{
->
ValueRef
{
let
_
icx
=
push_ctxt
(
"malloc_raw_exchange"
);
// Allocate space:
let
def_id
=
require_alloc_fn
(
bcx
,
info_ty
,
ExchangeMallocFnLangItem
);
let
r
=
Callee
::
def
(
bcx
.ccx
(),
def_id
,
bcx
.tcx
()
.intern_substs
(
&
[]))
.call
(
bcx
,
debug_loc
,
&
[
size
,
align
],
None
);
Result
::
new
(
r
.bcx
,
PointerCast
(
r
.bcx
,
r
.val
,
llty_ptr
))
let
r
=
Callee
::
def
(
bcx
.ccx
(),
def_id
,
bcx
.tcx
()
.intern_substs
(
&
[]))
.reify
(
bcx
.ccx
());
PointerCast
(
bcx
,
Call
(
bcx
,
r
,
&
[
size
,
align
],
debug_loc
),
llty_ptr
)
}
...
...
@@ -254,7 +253,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
}
}
pub
fn
compare_simd_types
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
compare_simd_types
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
...
...
@@ -311,7 +310,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
}
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub
fn
unsize_thin_ptr
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
unsize_thin_ptr
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
src
:
ValueRef
,
src_ty
:
Ty
<
'tcx
>
,
dst_ty
:
Ty
<
'tcx
>
)
...
...
@@ -336,7 +335,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub
fn
coerce_unsized_into
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
coerce_unsized_into
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
src
:
ValueRef
,
src_ty
:
Ty
<
'tcx
>
,
dst
:
ValueRef
,
...
...
@@ -415,7 +414,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx
}
}
pub
fn
cast_shift_expr_rhs
(
cx
:
Block
,
op
:
hir
::
BinOp_
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
pub
fn
cast_shift_expr_rhs
(
cx
:
&
BlockAndBuilder
,
op
:
hir
::
BinOp_
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
cast_shift_rhs
(
op
,
lhs
,
rhs
,
|
a
,
b
|
Trunc
(
cx
,
a
,
b
),
|
a
,
b
|
ZExt
(
cx
,
a
,
b
))
}
...
...
@@ -462,38 +461,38 @@ fn cast_shift_rhs<F, G>(op: hir::BinOp_,
}
}
pub
fn
invoke
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
invoke
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
llfn
:
ValueRef
,
llargs
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
->
(
ValueRef
,
Block
<
'blk
,
'tcx
>
)
{
->
(
ValueRef
,
Block
AndBuilder
<
'blk
,
'tcx
>
)
{
let
_
icx
=
push_ctxt
(
"invoke_"
);
if
bcx
.
unreachable
.get
()
{
if
bcx
.
is_unreachable
()
{
return
(
C_null
(
Type
::
i8
(
bcx
.ccx
())),
bcx
);
}
if
need_invoke
(
bcx
)
{
debug!
(
"invoking {:?} at {:?}"
,
Value
(
llfn
),
bcx
.llbb
);
if
need_invoke
(
&
bcx
)
{
debug!
(
"invoking {:?} at {:?}"
,
Value
(
llfn
),
bcx
.llbb
()
);
for
&
llarg
in
llargs
{
debug!
(
"arg: {:?}"
,
Value
(
llarg
));
}
let
normal_bcx
=
bcx
.fcx
.new_block
(
"normal-return"
);
let
landing_pad
=
bcx
.fcx
.get_landing_pad
();
let
normal_bcx
=
bcx
.fcx
()
.new_block
(
"normal-return"
);
let
landing_pad
=
bcx
.fcx
()
.get_landing_pad
();
let
llresult
=
Invoke
(
bcx
,
let
llresult
=
Invoke
(
&
bcx
,
llfn
,
&
llargs
[
..
],
normal_bcx
.llbb
,
landing_pad
,
debug_loc
);
return
(
llresult
,
normal_bcx
);
return
(
llresult
,
normal_bcx
.build
()
);
}
else
{
debug!
(
"calling {:?} at {:?}"
,
Value
(
llfn
),
bcx
.llbb
);
debug!
(
"calling {:?} at {:?}"
,
Value
(
llfn
),
bcx
.llbb
()
);
for
&
llarg
in
llargs
{
debug!
(
"arg: {:?}"
,
Value
(
llarg
));
}
let
llresult
=
Call
(
bcx
,
llfn
,
&
llargs
[
..
],
debug_loc
);
let
llresult
=
Call
(
&
bcx
,
llfn
,
&
llargs
[
..
],
debug_loc
);
return
(
llresult
,
bcx
);
}
}
...
...
@@ -507,15 +506,11 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
sess
.target.target.options.is_like_msvc
}
pub
fn
avoid_invoke
(
bcx
:
Block
)
->
bool
{
bcx
.sess
()
.no_landing_pads
()
||
bcx
.lpad
()
.is_some
()
}
pub
fn
need_invoke
(
bcx
:
Block
)
->
bool
{
if
avoid_invoke
(
bcx
)
{
fn
need_invoke
(
bcx
:
&
BlockAndBuilder
)
->
bool
{
if
bcx
.sess
()
.no_landing_pads
()
||
bcx
.lpad
()
.is_some
()
{
false
}
else
{
bcx
.fcx
.needs_invoke
()
bcx
.fcx
()
.needs_invoke
()
}
}
...
...
@@ -527,11 +522,8 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
pub
fn
load_ty
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
ptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
C_undef
(
type_of
::
type_of
(
cx
.ccx
(),
t
));
}
load_ty_builder
(
&
B
(
cx
),
ptr
,
t
)
pub
fn
load_ty
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
->
ValueRef
{
load_ty_builder
(
cx
,
ptr
,
t
)
}
pub
fn
load_ty_builder
<
'a
,
'tcx
>
(
b
:
&
Builder
<
'a
,
'tcx
>
,
ptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
->
ValueRef
{
...
...
@@ -569,8 +561,8 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
pub
fn
store_ty
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
dst
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
if
cx
.
unreachable
.get
()
{
pub
fn
store_ty
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
dst
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
if
cx
.
is_unreachable
()
{
return
;
}
...
...
@@ -585,7 +577,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t
}
}
pub
fn
store_fat_ptr
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
store_fat_ptr
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
data
:
ValueRef
,
extra
:
ValueRef
,
dst
:
ValueRef
,
...
...
@@ -595,18 +587,18 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
Store
(
cx
,
extra
,
get_meta
(
cx
,
dst
));
}
pub
fn
load_fat_ptr
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
load_fat_ptr
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
src
:
ValueRef
,
ty
:
Ty
<
'tcx
>
)
->
(
ValueRef
,
ValueRef
)
{
if
cx
.
unreachable
.get
()
{
if
cx
.
is_unreachable
()
{
// FIXME: remove me
return
(
Load
(
cx
,
get_dataptr
(
cx
,
src
)),
Load
(
cx
,
get_meta
(
cx
,
src
)));
}
load_fat_ptr_builder
(
&
B
(
cx
)
,
src
,
ty
)
load_fat_ptr_builder
(
cx
,
src
,
ty
)
}
pub
fn
load_fat_ptr_builder
<
'a
,
'tcx
>
(
...
...
@@ -629,7 +621,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>(
(
ptr
,
meta
)
}
pub
fn
from_immediate
(
bcx
:
Block
,
val
:
ValueRef
)
->
ValueRef
{
pub
fn
from_immediate
(
bcx
:
&
BlockAndBuilder
,
val
:
ValueRef
)
->
ValueRef
{
if
val_ty
(
val
)
==
Type
::
i1
(
bcx
.ccx
())
{
ZExt
(
bcx
,
val
,
Type
::
i8
(
bcx
.ccx
()))
}
else
{
...
...
@@ -637,7 +629,7 @@ pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
}
}
pub
fn
to_immediate
(
bcx
:
Block
,
val
:
ValueRef
,
ty
:
Ty
)
->
ValueRef
{
pub
fn
to_immediate
(
bcx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ty
:
Ty
)
->
ValueRef
{
if
ty
.is_bool
()
{
Trunc
(
bcx
,
val
,
Type
::
i1
(
bcx
.ccx
()))
}
else
{
...
...
@@ -645,23 +637,23 @@ pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
}
}
pub
fn
with_cond
<
'blk
,
'tcx
,
F
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
val
:
ValueRef
,
f
:
F
)
->
Block
<
'blk
,
'tcx
>
where
F
:
FnOnce
(
Block
<
'blk
,
'tcx
>
)
->
Block
<
'blk
,
'tcx
>
pub
fn
with_cond
<
'blk
,
'tcx
,
F
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
val
:
ValueRef
,
f
:
F
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
where
F
:
FnOnce
(
BlockAndBuilder
<
'blk
,
'tcx
>
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"with_cond"
);
if
bcx
.
unreachable
.get
()
||
common
::
const_to_opt_uint
(
val
)
==
Some
(
0
)
{
if
bcx
.
is_unreachable
()
||
common
::
const_to_opt_uint
(
val
)
==
Some
(
0
)
{
return
bcx
;
}
let
fcx
=
bcx
.fcx
;
let
next_cx
=
fcx
.new_block
(
"next"
);
let
cond_cx
=
fcx
.new_block
(
"cond"
);
CondBr
(
bcx
,
val
,
cond_cx
.llbb
,
next_cx
.llbb
,
DebugLoc
::
None
);
let
fcx
=
bcx
.fcx
()
;
let
next_cx
=
fcx
.new_block
(
"next"
)
.build
()
;
let
cond_cx
=
fcx
.new_block
(
"cond"
)
.build
()
;
CondBr
(
&
bcx
,
val
,
cond_cx
.llbb
(),
next_cx
.llbb
()
,
DebugLoc
::
None
);
let
after_cx
=
f
(
cond_cx
);
if
!
after_cx
.terminated
.get
()
{
Br
(
after_cx
,
next_cx
.llbb
,
DebugLoc
::
None
);
}
Br
(
&
after_cx
,
next_cx
.llbb
(),
DebugLoc
::
None
);
next_cx
}
...
...
@@ -711,26 +703,25 @@ pub fn call(self, b: &Builder, ptr: ValueRef) {
}
}
pub
fn
call_lifetime_start
(
bcx
:
Block
,
ptr
:
ValueRef
)
{
if
!
bcx
.
unreachable
.get
()
{
Lifetime
::
Start
.call
(
&
bcx
.build
()
,
ptr
);
pub
fn
call_lifetime_start
(
bcx
:
&
BlockAndBuilder
,
ptr
:
ValueRef
)
{
if
!
bcx
.
is_unreachable
()
{
Lifetime
::
Start
.call
(
bcx
,
ptr
);
}
}
pub
fn
call_lifetime_end
(
bcx
:
Block
,
ptr
:
ValueRef
)
{
if
!
bcx
.
unreachable
.get
()
{
Lifetime
::
End
.call
(
&
bcx
.build
()
,
ptr
);
pub
fn
call_lifetime_end
(
bcx
:
&
BlockAndBuilder
,
ptr
:
ValueRef
)
{
if
!
bcx
.
is_unreachable
()
{
Lifetime
::
End
.call
(
bcx
,
ptr
);
}
}
// Generates code for resumption of unwind at the end of a landing pad.
pub
fn
trans_unwind_resume
(
bcx
:
Block
,
lpval
:
ValueRef
)
{
pub
fn
trans_unwind_resume
(
bcx
:
&
BlockAndBuilder
,
lpval
:
ValueRef
)
{
if
!
bcx
.sess
()
.target.target.options.custom_unwind_resume
{
Resume
(
bcx
,
lpval
);
bcx
.resume
(
lpval
);
}
else
{
let
exc_ptr
=
ExtractValue
(
bcx
,
lpval
,
0
);
bcx
.fcx
.eh_unwind_resume
()
.call
(
bcx
,
DebugLoc
::
None
,
&
[
exc_ptr
],
None
);
Call
(
bcx
,
bcx
.fcx
()
.eh_unwind_resume
()
.reify
(
bcx
.ccx
()),
&
[
exc_ptr
],
DebugLoc
::
None
);
}
}
...
...
@@ -752,11 +743,11 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
b
.call
(
memcpy
,
&
[
dst_ptr
,
src_ptr
,
size
,
align
,
volatile
],
None
);
}
pub
fn
memcpy_ty
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
dst
:
ValueRef
,
src
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
pub
fn
memcpy_ty
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
dst
:
ValueRef
,
src
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
let
_
icx
=
push_ctxt
(
"memcpy_ty"
);
let
ccx
=
bcx
.ccx
();
if
type_is_zero_size
(
ccx
,
t
)
||
bcx
.
unreachable
.get
()
{
if
type_is_zero_size
(
ccx
,
t
)
||
bcx
.
is_unreachable
()
{
return
;
}
...
...
@@ -764,7 +755,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe
let
llty
=
type_of
::
type_of
(
ccx
,
t
);
let
llsz
=
llsize_of
(
ccx
,
llty
);
let
llalign
=
type_of
::
align_of
(
ccx
,
t
);
call_memcpy
(
&
B
(
bcx
)
,
dst
,
src
,
llsz
,
llalign
as
u32
);
call_memcpy
(
bcx
,
dst
,
src
,
llsz
,
llalign
as
u32
);
}
else
if
common
::
type_is_fat_ptr
(
bcx
.tcx
(),
t
)
{
let
(
data
,
extra
)
=
load_fat_ptr
(
bcx
,
src
,
t
);
store_fat_ptr
(
bcx
,
data
,
extra
,
dst
,
t
);
...
...
@@ -773,13 +764,13 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe
}
}
pub
fn
init_zero_mem
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
llptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
if
cx
.
unreachable
.get
()
{
pub
fn
init_zero_mem
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
llptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
if
cx
.
is_unreachable
()
{
return
;
}
let
_
icx
=
push_ctxt
(
"init_zero_mem"
);
let
bcx
=
cx
;
memfill
(
&
B
(
bcx
)
,
llptr
,
t
,
0
);
memfill
(
bcx
,
llptr
,
t
,
0
);
}
// Always use this function instead of storing a constant byte to the memory
...
...
@@ -812,24 +803,17 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
b
.call
(
llintrinsicfn
,
&
[
ptr
,
fill_byte
,
size
,
align
,
volatile
],
None
);
}
pub
fn
alloc_ty
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
alloc_ty
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ty
:
Ty
<
'tcx
>
,
name
:
&
str
)
->
ValueRef
{
assert
!
(
!
ty
.has_param_types
());
alloca
(
bcx
,
type_of
::
type_of
(
bcx
.ccx
(),
ty
),
name
)
}
pub
fn
alloca
(
cx
:
Block
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
pub
fn
alloca
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
let
_
icx
=
push_ctxt
(
"alloca"
);
if
cx
.unreachable
.get
()
{
unsafe
{
return
llvm
::
LLVMGetUndef
(
ty
.ptr_to
()
.to_ref
());
}
}
DebugLoc
::
None
.apply
(
cx
.fcx
);
let
result
=
Alloca
(
cx
,
ty
,
name
);
debug!
(
"alloca({:?}) = {:?}"
,
name
,
result
);
result
DebugLoc
::
None
.apply
(
cx
.fcx
());
Alloca
(
cx
,
ty
,
name
)
}
impl
<
'blk
,
'tcx
>
FunctionContext
<
'blk
,
'tcx
>
{
...
...
@@ -894,14 +878,14 @@ pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
/// Performs setup on a newly created function, creating the entry
/// scope block and allocating space for the return pointer.
pub
fn
init
(
&
'blk
self
,
skip_retptr
:
bool
)
->
Block
<
'blk
,
'tcx
>
{
let
entry_bcx
=
self
.new_block
(
"entry-block"
);
pub
fn
init
(
&
'blk
self
,
skip_retptr
:
bool
)
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
entry_bcx
=
self
.new_block
(
"entry-block"
)
.build
()
;
// Use a dummy instruction as the insertion point for all allocas.
// This is later removed in FunctionContext::cleanup.
self
.alloca_insert_pt
.set
(
Some
(
unsafe
{
Load
(
entry_bcx
,
C_null
(
Type
::
i8p
(
self
.ccx
)));
llvm
::
LLVMGetFirstInstruction
(
entry_bcx
.llbb
)
Load
(
&
entry_bcx
,
C_null
(
Type
::
i8p
(
self
.ccx
)));
llvm
::
LLVMGetFirstInstruction
(
entry_bcx
.llbb
()
)
}));
if
!
self
.fn_ty.ret
.is_ignore
()
&&
!
skip_retptr
{
...
...
@@ -929,7 +913,7 @@ pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> {
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
/// and builds the return block.
pub
fn
finish
(
&
'blk
self
,
ret_cx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
finish
(
&
'blk
self
,
ret_cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ret_debug_loc
:
DebugLoc
)
{
let
_
icx
=
push_ctxt
(
"FunctionContext::finish"
);
...
...
@@ -940,10 +924,9 @@ pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>,
}
// Builds the return block for a function.
pub
fn
build_return_block
(
&
self
,
ret_cx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
build_return_block
(
&
self
,
ret_cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ret_debug_location
:
DebugLoc
)
{
if
self
.llretslotptr
.get
()
.is_none
()
||
ret_cx
.unreachable
.get
()
||
if
self
.llretslotptr
.get
()
.is_none
()
||
ret_cx
.is_unreachable
()
||
self
.fn_ty.ret
.is_indirect
()
{
return
RetVoid
(
ret_cx
,
ret_debug_location
);
}
...
...
@@ -978,7 +961,7 @@ pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>,
assert_eq!
(
cast_ty
,
None
);
let
llsz
=
llsize_of
(
self
.ccx
,
self
.fn_ty.ret.ty
);
let
llalign
=
llalign_of_min
(
self
.ccx
,
self
.fn_ty.ret.ty
);
call_memcpy
(
&
B
(
ret_cx
)
,
get_param
(
self
.llfn
,
0
),
call_memcpy
(
&
ret_cx
,
get_param
(
self
.llfn
,
0
),
retslot
,
llsz
,
llalign
as
u32
);
RetVoid
(
ret_cx
,
ret_debug_location
)
}
...
...
@@ -1080,23 +1063,22 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let
mut
llarg_idx
=
fcx
.fn_ty.ret
.is_indirect
()
as
usize
;
let
mut
arg_idx
=
0
;
for
(
i
,
arg_ty
)
in
sig
.inputs
()
.iter
()
.enumerate
()
{
let
lldestptr
=
adt
::
trans_field_ptr
(
bcx
,
sig
.output
(),
dest_val
,
Disr
::
from
(
disr
),
i
);
let
lldestptr
=
adt
::
trans_field_ptr
(
&
bcx
,
sig
.output
(),
dest_val
,
Disr
::
from
(
disr
),
i
);
let
arg
=
&
fcx
.fn_ty.args
[
arg_idx
];
arg_idx
+=
1
;
let
b
=
&
bcx
.build
();
if
common
::
type_is_fat_ptr
(
bcx
.tcx
(),
arg_ty
)
{
let
meta
=
&
fcx
.fn_ty.args
[
arg_idx
];
arg_idx
+=
1
;
arg
.store_fn_arg
(
b
,
&
mut
llarg_idx
,
get_dataptr
(
bcx
,
lldestptr
));
meta
.store_fn_arg
(
b
,
&
mut
llarg_idx
,
get_meta
(
bcx
,
lldestptr
));
arg
.store_fn_arg
(
&
bcx
,
&
mut
llarg_idx
,
get_dataptr
(
&
bcx
,
lldestptr
));
meta
.store_fn_arg
(
&
bcx
,
&
mut
llarg_idx
,
get_meta
(
&
bcx
,
lldestptr
));
}
else
{
arg
.store_fn_arg
(
b
,
&
mut
llarg_idx
,
lldestptr
);
arg
.store_fn_arg
(
&
bcx
,
&
mut
llarg_idx
,
lldestptr
);
}
}
adt
::
trans_set_discr
(
bcx
,
sig
.output
(),
dest
,
disr
);
adt
::
trans_set_discr
(
&
bcx
,
sig
.output
(),
dest
,
disr
);
}
fcx
.finish
(
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
}
pub
fn
llvm_linkage_by_name
(
name
:
&
str
)
->
Option
<
Linkage
>
{
...
...
src/librustc_trans/build.rs
浏览文件 @
bf7d4534
...
...
@@ -18,30 +18,12 @@
use
common
::
*
;
use
syntax_pos
::
Span
;
use
builder
::
Builder
;
use
type_
::
Type
;
use
value
::
Value
;
use
debuginfo
::
DebugLoc
;
use
libc
::{
c_uint
,
c_char
};
pub
fn
terminate
(
cx
:
Block
,
_
:
&
str
)
{
debug!
(
"terminate({})"
,
cx
.to_str
());
cx
.terminated
.set
(
true
);
}
pub
fn
check_not_terminated
(
cx
:
Block
)
{
if
cx
.terminated
.get
()
{
bug!
(
"already terminated!"
);
}
}
pub
fn
B
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
)
->
Builder
<
'blk
,
'tcx
>
{
let
b
=
cx
.fcx.ccx
.builder
();
b
.position_at_end
(
cx
.llbb
);
b
}
// The difference between a block being unreachable and being terminated is
// somewhat obscure, and has to do with error checking. When a block is
// terminated, we're saying that trying to add any further statements in the
...
...
@@ -50,70 +32,48 @@ pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> {
// for (panic/break/return statements, call to diverging functions, etc), and
// further instructions to the block should simply be ignored.
pub
fn
RetVoid
(
cx
:
Block
,
debug_loc
:
DebugLoc
)
{
if
cx
.unreachable
.get
()
{
return
;
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"RetVoid"
);
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.ret_void
();
pub
fn
RetVoid
(
cx
:
&
BlockAndBuilder
,
debug_loc
:
DebugLoc
)
{
cx
.terminate
();
debug_loc
.apply
(
cx
.fcx
());
cx
.ret_void
();
}
pub
fn
Ret
(
cx
:
Block
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
{
if
cx
.unreachable
.get
()
{
return
;
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"Ret"
);
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.ret
(
v
);
pub
fn
Ret
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
{
cx
.terminate
();
debug_loc
.apply
(
cx
.fcx
());
cx
.ret
(
v
);
}
pub
fn
AggregateRet
(
cx
:
Block
,
ret_vals
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
{
if
cx
.unreachable
.get
()
{
return
;
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"AggregateRet"
);
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.aggregate_ret
(
ret_vals
);
pub
fn
AggregateRet
(
cx
:
&
BlockAndBuilder
,
ret_vals
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
{
cx
.terminate
();
debug_loc
.apply
(
cx
.fcx
());
cx
.aggregate_ret
(
ret_vals
);
}
pub
fn
Br
(
cx
:
Block
,
dest
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
{
if
cx
.unreachable
.get
()
{
return
;
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"Br"
);
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.br
(
dest
);
}
pub
fn
CondBr
(
cx
:
Block
,
if_
:
ValueRef
,
then
:
BasicBlockRef
,
else_
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
{
if
cx
.unreachable
.get
()
{
return
;
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"CondBr"
);
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.cond_br
(
if_
,
then
,
else_
);
pub
fn
Br
(
cx
:
&
BlockAndBuilder
,
dest
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
{
cx
.terminate
();
debug_loc
.apply
(
cx
.fcx
());
cx
.br
(
dest
);
}
pub
fn
Switch
(
cx
:
Block
,
v
:
ValueRef
,
else_
:
BasicBlockRef
,
num_cases
:
usize
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
v
);
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"Switch"
);
B
(
cx
)
.switch
(
v
,
else_
,
num_cases
)
pub
fn
CondBr
(
cx
:
&
BlockAndBuilder
,
if_
:
ValueRef
,
then
:
BasicBlockRef
,
else_
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
{
cx
.terminate
();
debug_loc
.apply
(
cx
.fcx
());
cx
.cond_br
(
if_
,
then
,
else_
);
}
pub
fn
Switch
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
else_
:
BasicBlockRef
,
num_cases
:
usize
)
->
ValueRef
{
cx
.terminate
();
cx
.switch
(
v
,
else_
,
num_cases
)
}
pub
fn
AddCase
(
s
:
ValueRef
,
on_val
:
ValueRef
,
dest
:
BasicBlockRef
)
{
unsafe
{
if
llvm
::
LLVMIsUndef
(
s
)
==
llvm
::
True
{
return
;
}
...
...
@@ -121,475 +81,340 @@ pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
}
}
pub
fn
IndirectBr
(
cx
:
Block
,
addr
:
ValueRef
,
num_dests
:
usize
,
debug_loc
:
DebugLoc
)
{
if
cx
.unreachable
.get
()
{
return
;
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"IndirectBr"
);
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.indirect_br
(
addr
,
num_dests
);
}
pub
fn
Invoke
(
cx
:
Block
,
fn_
:
ValueRef
,
args
:
&
[
ValueRef
],
then
:
BasicBlockRef
,
catch
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
C_null
(
Type
::
i8
(
cx
.ccx
()));
}
check_not_terminated
(
cx
);
terminate
(
cx
,
"Invoke"
);
debug!
(
"Invoke({:?} with arguments ({}))"
,
Value
(
fn_
),
args
.iter
()
.map
(|
a
|
{
format!
(
"{:?}"
,
Value
(
*
a
))
})
.collect
::
<
Vec
<
String
>>
()
.join
(
", "
));
debug_loc
.apply
(
cx
.fcx
);
let
bundle
=
cx
.lpad
()
.and_then
(|
b
|
b
.bundle
());
B
(
cx
)
.invoke
(
fn_
,
args
,
then
,
catch
,
bundle
)
}
pub
fn
Unreachable
(
cx
:
Block
)
{
if
cx
.unreachable
.get
()
{
return
}
cx
.unreachable
.set
(
true
);
if
!
cx
.terminated
.get
()
{
B
(
cx
)
.unreachable
();
}
pub
fn
IndirectBr
(
cx
:
&
BlockAndBuilder
,
addr
:
ValueRef
,
num_dests
:
usize
,
debug_loc
:
DebugLoc
)
{
cx
.terminate
();
debug_loc
.apply
(
cx
.fcx
());
cx
.indirect_br
(
addr
,
num_dests
);
}
pub
fn
_
Undef
(
val
:
ValueRef
)
->
ValueRef
{
unsafe
{
return
llvm
::
LLVMGetUndef
(
val_ty
(
val
)
.to_ref
());
}
pub
fn
Invoke
(
cx
:
&
BlockAndBuilder
,
fn_
:
ValueRef
,
args
:
&
[
ValueRef
],
then
:
BasicBlockRef
,
catch
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
cx
.terminate
();
debug!
(
"Invoke({:?} with arguments ({}))"
,
Value
(
fn_
),
args
.iter
()
.map
(|
a
|
{
format!
(
"{:?}"
,
Value
(
*
a
))
})
.collect
::
<
Vec
<
String
>>
()
.join
(
", "
));
debug_loc
.apply
(
cx
.fcx
());
let
bundle
=
cx
.lpad
()
.and_then
(|
b
|
b
.bundle
());
cx
.invoke
(
fn_
,
args
,
then
,
catch
,
bundle
)
}
/* Arithmetic */
pub
fn
Add
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
Add
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.add
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.add
(
lhs
,
rhs
)
}
pub
fn
NSWAdd
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
NSWAdd
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswadd
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nswadd
(
lhs
,
rhs
)
}
pub
fn
NUWAdd
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
NUWAdd
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwadd
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nuwadd
(
lhs
,
rhs
)
}
pub
fn
FAdd
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FAdd
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fadd
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fadd
(
lhs
,
rhs
)
}
pub
fn
FAddFast
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FAddFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fadd_fast
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fadd_fast
(
lhs
,
rhs
)
}
pub
fn
Sub
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
Sub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.sub
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.sub
(
lhs
,
rhs
)
}
pub
fn
NSWSub
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
NSWSub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswsub
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nswsub
(
lhs
,
rhs
)
}
pub
fn
NUWSub
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
NUWSub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwsub
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nuwsub
(
lhs
,
rhs
)
}
pub
fn
FSub
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FSub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fsub
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fsub
(
lhs
,
rhs
)
}
pub
fn
FSubFast
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FSubFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fsub_fast
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fsub_fast
(
lhs
,
rhs
)
}
pub
fn
Mul
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
Mul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.mul
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.mul
(
lhs
,
rhs
)
}
pub
fn
NSWMul
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
NSWMul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswmul
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nswmul
(
lhs
,
rhs
)
}
pub
fn
NUWMul
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
NUWMul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwmul
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nuwmul
(
lhs
,
rhs
)
}
pub
fn
FMul
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FMul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fmul
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fmul
(
lhs
,
rhs
)
}
pub
fn
FMulFast
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FMulFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fmul_fast
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fmul_fast
(
lhs
,
rhs
)
}
pub
fn
UDiv
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
UDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.udiv
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.udiv
(
lhs
,
rhs
)
}
pub
fn
SDiv
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
SDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.sdiv
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.sdiv
(
lhs
,
rhs
)
}
pub
fn
ExactSDiv
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
ExactSDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.exactsdiv
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.exactsdiv
(
lhs
,
rhs
)
}
pub
fn
FDiv
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fdiv
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fdiv
(
lhs
,
rhs
)
}
pub
fn
FDivFast
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FDivFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fdiv_fast
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fdiv_fast
(
lhs
,
rhs
)
}
pub
fn
URem
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
URem
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.urem
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.urem
(
lhs
,
rhs
)
}
pub
fn
SRem
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
SRem
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.srem
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.srem
(
lhs
,
rhs
)
}
pub
fn
FRem
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FRem
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.frem
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.frem
(
lhs
,
rhs
)
}
pub
fn
FRemFast
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
FRemFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.frem_fast
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.frem_fast
(
lhs
,
rhs
)
}
pub
fn
Shl
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
Shl
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.shl
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.shl
(
lhs
,
rhs
)
}
pub
fn
LShr
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
LShr
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.lshr
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.lshr
(
lhs
,
rhs
)
}
pub
fn
AShr
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
AShr
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.ashr
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.ashr
(
lhs
,
rhs
)
}
pub
fn
And
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
And
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.and
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.and
(
lhs
,
rhs
)
}
pub
fn
Or
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
Or
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.or
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.or
(
lhs
,
rhs
)
}
pub
fn
Xor
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.xor
(
lhs
,
rhs
)
}
pub
fn
BinOp
(
cx
:
Block
,
op
:
Opcode
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
lhs
);
pub
fn
Xor
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.xor
(
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.binop
(
op
,
lhs
,
rhs
)
}
pub
fn
Neg
(
cx
:
Block
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
v
);
pub
fn
BinOp
(
cx
:
&
BlockAndBuilder
,
op
:
Opcode
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.binop
(
op
,
lhs
,
rhs
)
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.neg
(
v
)
pub
fn
Neg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.neg
(
v
)
}
pub
fn
NSWNeg
(
cx
:
Block
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
v
);
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nswneg
(
v
)
pub
fn
NSWNeg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswneg
(
v
)
}
pub
fn
NUWNeg
(
cx
:
Block
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
v
);
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.nuwneg
(
v
)
pub
fn
NUWNeg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwneg
(
v
)
}
pub
fn
FNeg
(
cx
:
Block
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
v
);
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fneg
(
v
)
pub
fn
FNeg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fneg
(
v
)
}
pub
fn
Not
(
cx
:
Block
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
v
);
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.not
(
v
)
pub
fn
Not
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.not
(
v
)
}
pub
fn
Alloca
(
cx
:
Block
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
ty
.ptr_to
()
.to_ref
());
}
AllocaFcx
(
cx
.fcx
,
ty
,
name
)
}
pub
fn
Alloca
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
AllocaFcx
(
cx
.fcx
(),
ty
,
name
)
}
pub
fn
AllocaFcx
(
fcx
:
&
FunctionContext
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
...
...
@@ -599,336 +424,179 @@ pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef {
b
.alloca
(
ty
,
name
)
}
pub
fn
Free
(
cx
:
Block
,
pointer_val
:
ValueRef
)
{
if
cx
.unreachable
.get
()
{
return
;
}
B
(
cx
)
.free
(
pointer_val
)
pub
fn
Free
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
)
{
cx
.free
(
pointer_val
)
}
pub
fn
Load
(
cx
:
Block
,
pointer_val
:
ValueRef
)
->
ValueRef
{
unsafe
{
let
ccx
=
cx
.fcx.ccx
;
if
cx
.unreachable
.get
()
{
let
ty
=
val_ty
(
pointer_val
);
let
eltty
=
if
ty
.kind
()
==
llvm
::
Array
{
ty
.element_type
()
}
else
{
ccx
.int_type
()
};
return
llvm
::
LLVMGetUndef
(
eltty
.to_ref
());
}
B
(
cx
)
.load
(
pointer_val
)
}
pub
fn
Load
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
)
->
ValueRef
{
cx
.load
(
pointer_val
)
}
pub
fn
VolatileLoad
(
cx
:
Block
,
pointer_val
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.volatile_load
(
pointer_val
)
}
pub
fn
VolatileLoad
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
)
->
ValueRef
{
cx
.volatile_load
(
pointer_val
)
}
pub
fn
AtomicLoad
(
cx
:
Block
,
pointer_val
:
ValueRef
,
order
:
AtomicOrdering
)
->
ValueRef
{
unsafe
{
let
ccx
=
cx
.fcx.ccx
;
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
ccx
.int_type
()
.to_ref
());
}
B
(
cx
)
.atomic_load
(
pointer_val
,
order
)
}
pub
fn
AtomicLoad
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
,
order
:
AtomicOrdering
)
->
ValueRef
{
cx
.atomic_load
(
pointer_val
,
order
)
}
pub
fn
LoadRangeAssert
(
cx
:
Block
,
pointer_val
:
ValueRef
,
lo
:
u64
,
hi
:
u64
,
signed
:
llvm
::
Bool
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
let
ccx
=
cx
.fcx.ccx
;
let
ty
=
val_ty
(
pointer_val
);
let
eltty
=
if
ty
.kind
()
==
llvm
::
Array
{
ty
.element_type
()
}
else
{
ccx
.int_type
()
};
unsafe
{
llvm
::
LLVMGetUndef
(
eltty
.to_ref
())
}
}
else
{
B
(
cx
)
.load_range_assert
(
pointer_val
,
lo
,
hi
,
signed
)
}
pub
fn
LoadRangeAssert
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
,
lo
:
u64
,
hi
:
u64
,
signed
:
llvm
::
Bool
)
->
ValueRef
{
cx
.load_range_assert
(
pointer_val
,
lo
,
hi
,
signed
)
}
pub
fn
LoadNonNull
(
cx
:
Block
,
ptr
:
ValueRef
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
let
ccx
=
cx
.fcx.ccx
;
let
ty
=
val_ty
(
ptr
);
let
eltty
=
if
ty
.kind
()
==
llvm
::
Array
{
ty
.element_type
()
}
else
{
ccx
.int_type
()
};
unsafe
{
llvm
::
LLVMGetUndef
(
eltty
.to_ref
())
}
}
else
{
B
(
cx
)
.load_nonnull
(
ptr
)
}
pub
fn
LoadNonNull
(
cx
:
&
BlockAndBuilder
,
ptr
:
ValueRef
)
->
ValueRef
{
cx
.load_nonnull
(
ptr
)
}
pub
fn
Store
(
cx
:
Block
,
val
:
ValueRef
,
ptr
:
ValueRef
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
C_nil
(
cx
.ccx
());
}
B
(
cx
)
.store
(
val
,
ptr
)
pub
fn
Store
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ptr
:
ValueRef
)
->
ValueRef
{
cx
.store
(
val
,
ptr
)
}
pub
fn
VolatileStore
(
cx
:
Block
,
val
:
ValueRef
,
ptr
:
ValueRef
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
C_nil
(
cx
.ccx
());
}
B
(
cx
)
.volatile_store
(
val
,
ptr
)
pub
fn
VolatileStore
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ptr
:
ValueRef
)
->
ValueRef
{
cx
.volatile_store
(
val
,
ptr
)
}
pub
fn
AtomicStore
(
cx
:
Block
,
val
:
ValueRef
,
ptr
:
ValueRef
,
order
:
AtomicOrdering
)
{
if
cx
.unreachable
.get
()
{
return
;
}
B
(
cx
)
.atomic_store
(
val
,
ptr
,
order
)
pub
fn
AtomicStore
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ptr
:
ValueRef
,
order
:
AtomicOrdering
)
{
cx
.atomic_store
(
val
,
ptr
,
order
)
}
pub
fn
GEP
(
cx
:
Block
,
pointer
:
ValueRef
,
indices
:
&
[
ValueRef
])
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.ptr_to
()
.to_ref
());
}
B
(
cx
)
.gep
(
pointer
,
indices
)
}
pub
fn
GEP
(
cx
:
&
BlockAndBuilder
,
pointer
:
ValueRef
,
indices
:
&
[
ValueRef
])
->
ValueRef
{
cx
.gep
(
pointer
,
indices
)
}
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
#[inline]
pub
fn
GEPi
(
cx
:
Block
,
base
:
ValueRef
,
ixs
:
&
[
usize
])
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.ptr_to
()
.to_ref
());
}
B
(
cx
)
.gepi
(
base
,
ixs
)
}
pub
fn
GEPi
(
cx
:
&
BlockAndBuilder
,
base
:
ValueRef
,
ixs
:
&
[
usize
])
->
ValueRef
{
cx
.gepi
(
base
,
ixs
)
}
pub
fn
InBoundsGEP
(
cx
:
Block
,
pointer
:
ValueRef
,
indices
:
&
[
ValueRef
])
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.ptr_to
()
.to_ref
());
}
B
(
cx
)
.inbounds_gep
(
pointer
,
indices
)
}
pub
fn
InBoundsGEP
(
cx
:
&
BlockAndBuilder
,
pointer
:
ValueRef
,
indices
:
&
[
ValueRef
])
->
ValueRef
{
cx
.inbounds_gep
(
pointer
,
indices
)
}
pub
fn
StructGEP
(
cx
:
Block
,
pointer
:
ValueRef
,
idx
:
usize
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.ptr_to
()
.to_ref
());
}
B
(
cx
)
.struct_gep
(
pointer
,
idx
)
}
pub
fn
StructGEP
(
cx
:
&
BlockAndBuilder
,
pointer
:
ValueRef
,
idx
:
usize
)
->
ValueRef
{
cx
.struct_gep
(
pointer
,
idx
)
}
pub
fn
GlobalString
(
cx
:
Block
,
_
str
:
*
const
c_char
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
i8p
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.global_string
(
_
str
)
}
pub
fn
GlobalString
(
cx
:
&
BlockAndBuilder
,
_
str
:
*
const
c_char
)
->
ValueRef
{
cx
.global_string
(
_
str
)
}
pub
fn
GlobalStringPtr
(
cx
:
Block
,
_
str
:
*
const
c_char
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
i8p
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.global_string_ptr
(
_
str
)
}
pub
fn
GlobalStringPtr
(
cx
:
&
BlockAndBuilder
,
_
str
:
*
const
c_char
)
->
ValueRef
{
cx
.global_string_ptr
(
_
str
)
}
/* Casts */
pub
fn
Trunc
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.trunc
(
val
,
dest_ty
)
}
pub
fn
Trunc
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.trunc
(
val
,
dest_ty
)
}
pub
fn
ZExt
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.zext
(
val
,
dest_ty
)
}
pub
fn
ZExt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.zext
(
val
,
dest_ty
)
}
pub
fn
SExt
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.sext
(
val
,
dest_ty
)
}
pub
fn
SExt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.sext
(
val
,
dest_ty
)
}
pub
fn
FPToUI
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.fptoui
(
val
,
dest_ty
)
}
pub
fn
FPToUI
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fptoui
(
val
,
dest_ty
)
}
pub
fn
FPToSI
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.fptosi
(
val
,
dest_ty
)
}
pub
fn
FPToSI
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fptosi
(
val
,
dest_ty
)
}
pub
fn
UIToFP
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.uitofp
(
val
,
dest_ty
)
}
pub
fn
UIToFP
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.uitofp
(
val
,
dest_ty
)
}
pub
fn
SIToFP
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.sitofp
(
val
,
dest_ty
)
}
pub
fn
SIToFP
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.sitofp
(
val
,
dest_ty
)
}
pub
fn
FPTrunc
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.fptrunc
(
val
,
dest_ty
)
}
pub
fn
FPTrunc
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fptrunc
(
val
,
dest_ty
)
}
pub
fn
FPExt
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.fpext
(
val
,
dest_ty
)
}
pub
fn
FPExt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fpext
(
val
,
dest_ty
)
}
pub
fn
PtrToInt
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.ptrtoint
(
val
,
dest_ty
)
}
pub
fn
PtrToInt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.ptrtoint
(
val
,
dest_ty
)
}
pub
fn
IntToPtr
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.inttoptr
(
val
,
dest_ty
)
}
pub
fn
IntToPtr
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.inttoptr
(
val
,
dest_ty
)
}
pub
fn
BitCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.bitcast
(
val
,
dest_ty
)
}
pub
fn
BitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.bitcast
(
val
,
dest_ty
)
}
pub
fn
ZExtOrBitCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.zext_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
ZExtOrBitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.zext_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
SExtOrBitCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.sext_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
SExtOrBitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.sext_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
TruncOrBitCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.trunc_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
TruncOrBitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.trunc_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
Cast
(
cx
:
Block
,
op
:
Opcode
,
val
:
ValueRef
,
dest_ty
:
Type
,
_
:
*
const
u8
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.cast
(
op
,
val
,
dest_ty
)
pub
fn
Cast
(
cx
:
&
BlockAndBuilder
,
op
:
Opcode
,
val
:
ValueRef
,
dest_ty
:
Type
,
_
:
*
const
u8
)
->
ValueRef
{
cx
.cast
(
op
,
val
,
dest_ty
)
}
}
pub
fn
PointerCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.pointercast
(
val
,
dest_ty
)
}
pub
fn
PointerCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.pointercast
(
val
,
dest_ty
)
}
pub
fn
IntCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.intcast
(
val
,
dest_ty
)
}
pub
fn
IntCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.intcast
(
val
,
dest_ty
)
}
pub
fn
FPCast
(
cx
:
Block
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
dest_ty
.to_ref
());
}
B
(
cx
)
.fpcast
(
val
,
dest_ty
)
}
pub
fn
FPCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fpcast
(
val
,
dest_ty
)
}
/* Comparisons */
pub
fn
ICmp
(
cx
:
Block
,
op
:
IntPredicate
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
i1
(
cx
.ccx
())
.to_ref
());
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.icmp
(
op
,
lhs
,
rhs
)
pub
fn
ICmp
(
cx
:
&
BlockAndBuilder
,
op
:
IntPredicate
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.icmp
(
op
,
lhs
,
rhs
)
}
}
pub
fn
FCmp
(
cx
:
Block
,
op
:
RealPredicate
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
i1
(
cx
.ccx
())
.to_ref
());
}
debug_loc
.apply
(
cx
.fcx
);
B
(
cx
)
.fcmp
(
op
,
lhs
,
rhs
)
pub
fn
FCmp
(
cx
:
&
BlockAndBuilder
,
op
:
RealPredicate
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fcmp
(
op
,
lhs
,
rhs
)
}
}
/* Miscellaneous instructions */
pub
fn
EmptyPhi
(
cx
:
Block
,
ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
ty
.to_ref
());
}
B
(
cx
)
.empty_phi
(
ty
)
}
pub
fn
EmptyPhi
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
)
->
ValueRef
{
cx
.empty_phi
(
ty
)
}
pub
fn
Phi
(
cx
:
Block
,
ty
:
Type
,
vals
:
&
[
ValueRef
],
bbs
:
&
[
BasicBlockRef
])
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
ty
.to_ref
());
}
B
(
cx
)
.phi
(
ty
,
vals
,
bbs
)
}
pub
fn
Phi
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
vals
:
&
[
ValueRef
],
bbs
:
&
[
BasicBlockRef
])
->
ValueRef
{
cx
.phi
(
ty
,
vals
,
bbs
)
}
pub
fn
AddIncomingToPhi
(
phi
:
ValueRef
,
val
:
ValueRef
,
bb
:
BasicBlockRef
)
{
...
...
@@ -938,230 +606,151 @@ pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
}
}
pub
fn
_
UndefReturn
(
cx
:
Block
,
fn_
:
ValueRef
)
->
ValueRef
{
unsafe
{
let
ccx
=
cx
.fcx.ccx
;
let
ty
=
val_ty
(
fn_
);
let
retty
=
if
ty
.kind
()
==
llvm
::
Function
{
ty
.return_type
()
}
else
{
ccx
.int_type
()
};
B
(
cx
)
.count_insn
(
"ret_undef"
);
llvm
::
LLVMGetUndef
(
retty
.to_ref
())
}
}
pub
fn
add_span_comment
(
cx
:
Block
,
sp
:
Span
,
text
:
&
str
)
{
B
(
cx
)
.add_span_comment
(
sp
,
text
)
pub
fn
add_span_comment
(
cx
:
&
BlockAndBuilder
,
sp
:
Span
,
text
:
&
str
)
{
cx
.add_span_comment
(
sp
,
text
)
}
pub
fn
add_comment
(
cx
:
Block
,
text
:
&
str
)
{
B
(
cx
)
.add_comment
(
text
)
pub
fn
add_comment
(
cx
:
&
BlockAndBuilder
,
text
:
&
str
)
{
cx
.add_comment
(
text
)
}
pub
fn
InlineAsmCall
(
cx
:
Block
,
asm
:
*
const
c_char
,
cons
:
*
const
c_char
,
inputs
:
&
[
ValueRef
],
output
:
Type
,
volatile
:
bool
,
alignstack
:
bool
,
dia
:
AsmDialect
)
->
ValueRef
{
B
(
cx
)
.inline_asm_call
(
asm
,
cons
,
inputs
,
output
,
volatile
,
alignstack
,
dia
)
pub
fn
InlineAsmCall
(
cx
:
&
BlockAndBuilder
,
asm
:
*
const
c_char
,
cons
:
*
const
c_char
,
inputs
:
&
[
ValueRef
],
output
:
Type
,
volatile
:
bool
,
alignstack
:
bool
,
dia
:
AsmDialect
)
->
ValueRef
{
cx
.inline_asm_call
(
asm
,
cons
,
inputs
,
output
,
volatile
,
alignstack
,
dia
)
}
pub
fn
Call
(
cx
:
Block
,
fn_
:
ValueRef
,
args
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
UndefReturn
(
cx
,
fn_
);
pub
fn
Call
(
cx
:
&
BlockAndBuilder
,
fn_
:
ValueRef
,
args
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
let
bundle
=
cx
.lpad
()
.and_then
(|
b
|
b
.bundle
());
cx
.call
(
fn_
,
args
,
bundle
)
}
debug_loc
.apply
(
cx
.fcx
);
let
bundle
=
cx
.lpad
.get
()
.and_then
(|
b
|
b
.bundle
());
B
(
cx
)
.call
(
fn_
,
args
,
bundle
)
}
pub
fn
AtomicFence
(
cx
:
Block
,
order
:
AtomicOrdering
,
scope
:
SynchronizationScope
)
{
if
cx
.unreachable
.get
()
{
return
;
}
B
(
cx
)
.atomic_fence
(
order
,
scope
)
}
pub
fn
Select
(
cx
:
Block
,
if_
:
ValueRef
,
then
:
ValueRef
,
else_
:
ValueRef
)
->
ValueRef
{
if
cx
.unreachable
.get
()
{
return
_
Undef
(
then
);
}
B
(
cx
)
.select
(
if_
,
then
,
else_
)
pub
fn
AtomicFence
(
cx
:
&
BlockAndBuilder
,
order
:
AtomicOrdering
,
scope
:
SynchronizationScope
)
{
cx
.atomic_fence
(
order
,
scope
)
}
pub
fn
VAArg
(
cx
:
Block
,
list
:
ValueRef
,
ty
:
Type
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
ty
.to_ref
());
}
B
(
cx
)
.va_arg
(
list
,
ty
)
}
pub
fn
Select
(
cx
:
&
BlockAndBuilder
,
if_
:
ValueRef
,
then
:
ValueRef
,
else_
:
ValueRef
)
->
ValueRef
{
cx
.select
(
if_
,
then
,
else_
)
}
pub
fn
ExtractElement
(
cx
:
Block
,
vec_val
:
ValueRef
,
index
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.extract_element
(
vec_val
,
index
)
}
pub
fn
VAArg
(
cx
:
&
BlockAndBuilder
,
list
:
ValueRef
,
ty
:
Type
)
->
ValueRef
{
cx
.va_arg
(
list
,
ty
)
}
pub
fn
InsertElement
(
cx
:
Block
,
vec_val
:
ValueRef
,
elt_val
:
ValueRef
,
index
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.insert_element
(
vec_val
,
elt_val
,
index
)
}
pub
fn
ExtractElement
(
cx
:
&
BlockAndBuilder
,
vec_val
:
ValueRef
,
index
:
ValueRef
)
->
ValueRef
{
cx
.extract_element
(
vec_val
,
index
)
}
pub
fn
ShuffleVector
(
cx
:
Block
,
v1
:
ValueRef
,
v2
:
ValueRef
,
mask
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.shuffle_vector
(
v1
,
v2
,
mask
)
}
pub
fn
InsertElement
(
cx
:
&
BlockAndBuilder
,
vec_val
:
ValueRef
,
elt_val
:
ValueRef
,
index
:
ValueRef
)
->
ValueRef
{
cx
.insert_element
(
vec_val
,
elt_val
,
index
)
}
pub
fn
VectorSplat
(
cx
:
Block
,
num_elts
:
usize
,
elt_val
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.vector_splat
(
num_elts
,
elt_val
)
}
pub
fn
ShuffleVector
(
cx
:
&
BlockAndBuilder
,
v1
:
ValueRef
,
v2
:
ValueRef
,
mask
:
ValueRef
)
->
ValueRef
{
cx
.shuffle_vector
(
v1
,
v2
,
mask
)
}
pub
fn
ExtractValue
(
cx
:
Block
,
agg_val
:
ValueRef
,
index
:
usize
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.extract_value
(
agg_val
,
index
)
}
pub
fn
VectorSplat
(
cx
:
&
BlockAndBuilder
,
num_elts
:
usize
,
elt_val
:
ValueRef
)
->
ValueRef
{
cx
.vector_splat
(
num_elts
,
elt_val
)
}
pub
fn
InsertValue
(
cx
:
Block
,
agg_val
:
ValueRef
,
elt_val
:
ValueRef
,
index
:
usize
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
nil
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.insert_value
(
agg_val
,
elt_val
,
index
)
}
pub
fn
ExtractValue
(
cx
:
&
BlockAndBuilder
,
agg_val
:
ValueRef
,
index
:
usize
)
->
ValueRef
{
cx
.extract_value
(
agg_val
,
index
)
}
pub
fn
IsNull
(
cx
:
Block
,
val
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
i1
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.is_null
(
val
)
}
pub
fn
InsertValue
(
cx
:
&
BlockAndBuilder
,
agg_val
:
ValueRef
,
elt_val
:
ValueRef
,
index
:
usize
)
->
ValueRef
{
cx
.insert_value
(
agg_val
,
elt_val
,
index
)
}
pub
fn
IsNotNull
(
cx
:
Block
,
val
:
ValueRef
)
->
ValueRef
{
unsafe
{
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
Type
::
i1
(
cx
.ccx
())
.to_ref
());
}
B
(
cx
)
.is_not_null
(
val
)
}
pub
fn
IsNull
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
)
->
ValueRef
{
cx
.is_null
(
val
)
}
pub
fn
PtrDiff
(
cx
:
Block
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
unsafe
{
let
ccx
=
cx
.fcx.ccx
;
if
cx
.unreachable
.get
()
{
return
llvm
::
LLVMGetUndef
(
ccx
.int_type
()
.to_ref
());
}
B
(
cx
)
.ptrdiff
(
lhs
,
rhs
)
}
pub
fn
IsNotNull
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
)
->
ValueRef
{
cx
.is_not_null
(
val
)
}
pub
fn
Trap
(
cx
:
Block
)
{
if
cx
.unreachable
.get
()
{
return
;
}
B
(
cx
)
.trap
();
pub
fn
PtrDiff
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
cx
.ptrdiff
(
lhs
,
rhs
)
}
pub
fn
LandingPad
(
cx
:
Block
,
ty
:
Type
,
pers_fn
:
ValueRef
,
num_clauses
:
usize
)
->
ValueRef
{
check_not_terminated
(
cx
);
assert
!
(
!
cx
.unreachable
.get
());
B
(
cx
)
.landing_pad
(
ty
,
pers_fn
,
num_clauses
,
cx
.fcx.llfn
)
pub
fn
Trap
(
cx
:
&
BlockAndBuilder
)
{
cx
.trap
();
}
pub
fn
AddClause
(
cx
:
Block
,
landing_pad
:
ValueRef
,
clause
:
ValueRef
)
{
B
(
cx
)
.add_clause
(
landing_pad
,
clause
)
pub
fn
LandingPad
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
pers_fn
:
ValueRef
,
num_clauses
:
usize
)
->
ValueRef
{
assert
!
(
!
cx
.is_unreachable
());
cx
.landing_pad
(
ty
,
pers_fn
,
num_clauses
,
cx
.fcx
()
.llfn
)
}
pub
fn
SetCleanup
(
cx
:
Block
,
landing_pad
:
ValueRef
)
{
B
(
cx
)
.set_cleanup
(
landing_pad
)
pub
fn
AddClause
(
cx
:
&
BlockAndBuilder
,
landing_pad
:
ValueRef
,
clause
:
ValueRef
)
{
cx
.add_clause
(
landing_pad
,
clause
)
}
pub
fn
Set
PersonalityFn
(
cx
:
Block
,
f
:
ValueRef
)
{
B
(
cx
)
.set_personality_fn
(
f
)
pub
fn
Set
Cleanup
(
cx
:
&
BlockAndBuilder
,
landing_pad
:
ValueRef
)
{
cx
.set_cleanup
(
landing_pad
)
}
pub
fn
Resume
(
cx
:
Block
,
exn
:
ValueRef
)
->
ValueRef
{
check_not_terminated
(
cx
);
terminate
(
cx
,
"Resume"
);
B
(
cx
)
.resume
(
exn
)
pub
fn
SetPersonalityFn
(
cx
:
&
BlockAndBuilder
,
f
:
ValueRef
)
{
cx
.set_personality_fn
(
f
)
}
// Atomic Operations
pub
fn
AtomicCmpXchg
(
cx
:
Block
,
dst
:
ValueRef
,
cmp
:
ValueRef
,
src
:
ValueRef
,
order
:
AtomicOrdering
,
failure_order
:
AtomicOrdering
,
weak
:
llvm
::
Bool
)
->
ValueRef
{
B
(
cx
)
.atomic_cmpxchg
(
dst
,
cmp
,
src
,
order
,
failure_order
,
weak
)
pub
fn
AtomicCmpXchg
(
cx
:
&
BlockAndBuilder
,
dst
:
ValueRef
,
cmp
:
ValueRef
,
src
:
ValueRef
,
order
:
AtomicOrdering
,
failure_order
:
AtomicOrdering
,
weak
:
llvm
::
Bool
)
->
ValueRef
{
cx
.atomic_cmpxchg
(
dst
,
cmp
,
src
,
order
,
failure_order
,
weak
)
}
pub
fn
AtomicRMW
(
cx
:
Block
,
op
:
AtomicRmwBinOp
,
dst
:
ValueRef
,
src
:
ValueRef
,
order
:
AtomicOrdering
)
->
ValueRef
{
B
(
cx
)
.atomic_rmw
(
op
,
dst
,
src
,
order
)
pub
fn
AtomicRMW
(
cx
:
&
BlockAndBuilder
,
op
:
AtomicRmwBinOp
,
dst
:
ValueRef
,
src
:
ValueRef
,
order
:
AtomicOrdering
)
->
ValueRef
{
cx
.atomic_rmw
(
op
,
dst
,
src
,
order
)
}
pub
fn
CleanupPad
(
cx
:
Block
,
parent
:
Option
<
ValueRef
>
,
args
:
&
[
ValueRef
])
->
ValueRef
{
check_not_terminated
(
cx
);
assert
!
(
!
cx
.unreachable
.get
());
B
(
cx
)
.cleanup_pad
(
parent
,
args
)
pub
fn
CleanupPad
(
cx
:
&
BlockAndBuilder
,
parent
:
Option
<
ValueRef
>
,
args
:
&
[
ValueRef
])
->
ValueRef
{
assert
!
(
!
cx
.is_unreachable
());
cx
.cleanup_pad
(
parent
,
args
)
}
pub
fn
CleanupRet
(
cx
:
Block
,
cleanup
:
ValueRef
,
unwind
:
Option
<
BasicBlockRef
>
)
->
ValueRef
{
check_not_terminated
(
cx
);
terminate
(
cx
,
"CleanupRet"
);
B
(
cx
)
.cleanup_ret
(
cleanup
,
unwind
)
pub
fn
CleanupRet
(
cx
:
&
BlockAndBuilder
,
cleanup
:
ValueRef
,
unwind
:
Option
<
BasicBlockRef
>
)
->
ValueRef
{
cx
.terminate
();
cx
.cleanup_ret
(
cleanup
,
unwind
)
}
pub
fn
CatchPad
(
cx
:
Block
,
parent
:
ValueRef
,
args
:
&
[
ValueRef
])
->
ValueRef
{
check_not_terminated
(
cx
);
assert
!
(
!
cx
.unreachable
.get
());
B
(
cx
)
.catch_pad
(
parent
,
args
)
pub
fn
CatchPad
(
cx
:
&
BlockAndBuilder
,
parent
:
ValueRef
,
args
:
&
[
ValueRef
])
->
ValueRef
{
assert
!
(
!
cx
.is_unreachable
());
cx
.catch_pad
(
parent
,
args
)
}
pub
fn
CatchRet
(
cx
:
Block
,
pad
:
ValueRef
,
unwind
:
BasicBlockRef
)
->
ValueRef
{
check_not_terminated
(
cx
);
terminate
(
cx
,
"CatchRet"
);
B
(
cx
)
.catch_ret
(
pad
,
unwind
)
pub
fn
CatchRet
(
cx
:
&
BlockAndBuilder
,
pad
:
ValueRef
,
unwind
:
BasicBlockRef
)
->
ValueRef
{
cx
.terminate
();
cx
.catch_ret
(
pad
,
unwind
)
}
pub
fn
CatchSwitch
(
cx
:
Block
,
parent
:
Option
<
ValueRef
>
,
unwind
:
Option
<
BasicBlockRef
>
,
num_handlers
:
usize
)
->
ValueRef
{
check_not_terminated
(
cx
);
terminate
(
cx
,
"CatchSwitch"
);
B
(
cx
)
.catch_switch
(
parent
,
unwind
,
num_handlers
)
pub
fn
CatchSwitch
(
cx
:
&
BlockAndBuilder
,
parent
:
Option
<
ValueRef
>
,
unwind
:
Option
<
BasicBlockRef
>
,
num_handlers
:
usize
)
->
ValueRef
{
cx
.terminate
();
cx
.catch_switch
(
parent
,
unwind
,
num_handlers
)
}
pub
fn
AddHandler
(
cx
:
Block
,
catch_switch
:
ValueRef
,
handler
:
BasicBlockRef
)
{
B
(
cx
)
.add_handler
(
catch_switch
,
handler
)
pub
fn
AddHandler
(
cx
:
&
BlockAndBuilder
,
catch_switch
:
ValueRef
,
handler
:
BasicBlockRef
)
{
cx
.add_handler
(
catch_switch
,
handler
)
}
src/librustc_trans/callee.rs
浏览文件 @
bf7d4534
...
...
@@ -26,7 +26,9 @@
use
base
;
use
base
::
*
;
use
build
::
*
;
use
common
::{
self
,
Block
,
Result
,
CrateContext
,
FunctionContext
,
SharedCrateContext
};
use
common
::{
self
,
Block
,
BlockAndBuilder
,
CrateContext
,
FunctionContext
,
SharedCrateContext
};
use
consts
;
use
debuginfo
::
DebugLoc
;
use
declare
;
...
...
@@ -207,11 +209,11 @@ pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
/// For non-lang items, `dest` is always Some, and hence the result is written
/// into memory somewhere. Nonetheless we return the actual return value of the
/// function.
pub
fn
call
<
'a
,
'blk
>
(
self
,
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
call
<
'a
,
'blk
>
(
self
,
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
debug_loc
:
DebugLoc
,
args
:
&
[
ValueRef
],
dest
:
Option
<
ValueRef
>
)
->
Result
<
'blk
,
'tcx
>
{
->
(
BlockAndBuilder
<
'blk
,
'tcx
>
,
ValueRef
)
{
trans_call_inner
(
bcx
,
debug_loc
,
self
,
args
,
dest
)
}
...
...
@@ -370,8 +372,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let
(
block_arena
,
fcx
):
(
TypedArena
<
_
>
,
FunctionContext
);
block_arena
=
TypedArena
::
new
();
fcx
=
FunctionContext
::
new
(
ccx
,
lloncefn
,
fn_ty
,
None
,
&
block_arena
);
let
mut
bcx
=
fcx
.init
(
false
);
let
bcx
=
fcx
.init
(
false
);
// the first argument (`self`) will be the (by value) closure env.
...
...
@@ -381,9 +382,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let
llenv
=
if
env_arg
.is_indirect
()
{
llargs
[
self_idx
]
}
else
{
let
scratch
=
alloc_ty
(
bcx
,
closure_ty
,
"self"
);
let
scratch
=
alloc_ty
(
&
bcx
,
closure_ty
,
"self"
);
let
mut
llarg_idx
=
self_idx
;
env_arg
.store_fn_arg
(
&
bcx
.build
()
,
&
mut
llarg_idx
,
scratch
);
env_arg
.store_fn_arg
(
&
bcx
,
&
mut
llarg_idx
,
scratch
);
scratch
};
...
...
@@ -413,11 +414,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let
self_scope
=
fcx
.push_custom_cleanup_scope
();
fcx
.schedule_drop_mem
(
self_scope
,
llenv
,
closure_ty
);
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[
self_idx
..
],
dest
)
.bcx
;
let
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[
self_idx
..
],
dest
)
.0
;
fcx
.pop_and_trans_custom_cleanup_scope
(
bcx
,
self_scope
);
let
bcx
=
fcx
.pop_and_trans_custom_cleanup_scope
(
bcx
,
self_scope
);
fcx
.finish
(
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
ccx
.instances
()
.borrow_mut
()
.insert
(
method_instance
,
lloncefn
);
...
...
@@ -522,7 +523,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let
(
block_arena
,
fcx
):
(
TypedArena
<
_
>
,
FunctionContext
);
block_arena
=
TypedArena
::
new
();
fcx
=
FunctionContext
::
new
(
ccx
,
llfn
,
fn_ty
,
None
,
&
block_arena
);
let
mut
bcx
=
fcx
.init
(
false
);
let
bcx
=
fcx
.init
(
false
);
let
llargs
=
get_params
(
fcx
.llfn
);
...
...
@@ -530,7 +531,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let
llfnpointer
=
llfnpointer
.unwrap_or_else
(||
{
// the first argument (`self`) will be ptr to the fn pointer
if
is_by_ref
{
Load
(
bcx
,
llargs
[
self_idx
])
Load
(
&
bcx
,
llargs
[
self_idx
])
}
else
{
llargs
[
self_idx
]
}
...
...
@@ -542,9 +543,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
data
:
Fn
(
llfnpointer
),
ty
:
bare_fn_ty
};
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[(
self_idx
+
1
)
..
],
dest
)
.bcx
;
fcx
.finish
(
bcx
,
DebugLoc
::
None
);
let
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[(
self_idx
+
1
)
..
],
dest
)
.0
;
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
ccx
.fn_pointer_shims
()
.borrow_mut
()
.insert
(
bare_fn_ty_maybe_ref
,
llfn
);
...
...
@@ -653,12 +653,12 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// ______________________________________________________________________
// Translating calls
fn
trans_call_inner
<
'a
,
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
trans_call_inner
<
'a
,
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
debug_loc
:
DebugLoc
,
callee
:
Callee
<
'tcx
>
,
args
:
&
[
ValueRef
],
opt_llretslot
:
Option
<
ValueRef
>
)
->
Result
<
'blk
,
'tcx
>
{
->
(
BlockAndBuilder
<
'blk
,
'tcx
>
,
ValueRef
)
{
// Introduce a temporary cleanup scope that will contain cleanups
// for the arguments while they are being evaluated. The purpose
// this cleanup is to ensure that, should a panic occur while
...
...
@@ -666,7 +666,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// cleaned up. If no panic occurs, the values are handed off to
// the callee, and hence none of the cleanups in this temporary
// scope will ever execute.
let
fcx
=
bcx
.fcx
;
let
fcx
=
&
bcx
.fcx
()
;
let
ccx
=
fcx
.ccx
;
let
fn_ret
=
callee
.ty
.fn_ret
();
...
...
@@ -689,7 +689,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if
fn_ty
.ret
.is_indirect
()
{
let
mut
llretslot
=
opt_llretslot
.unwrap
();
if
let
Some
(
ty
)
=
fn_ty
.ret.cast
{
llretslot
=
PointerCast
(
bcx
,
llretslot
,
ty
.ptr_to
());
llretslot
=
PointerCast
(
&
bcx
,
llretslot
,
ty
.ptr_to
());
}
llargs
.push
(
llretslot
);
}
...
...
@@ -698,9 +698,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
Virtual
(
idx
)
=>
{
llargs
.push
(
args
[
0
]);
let
fn_ptr
=
meth
::
get_virtual_method
(
bcx
,
args
[
1
],
idx
);
let
llty
=
fn_ty
.llvm_type
(
bcx
.ccx
())
.ptr_to
();
callee
=
Fn
(
PointerCast
(
bcx
,
fn_ptr
,
llty
));
let
fn_ptr
=
meth
::
get_virtual_method
(
&
bcx
,
args
[
1
],
idx
);
let
llty
=
fn_ty
.llvm_type
(
&
bcx
.ccx
())
.ptr_to
();
callee
=
Fn
(
PointerCast
(
&
bcx
,
fn_ptr
,
llty
));
llargs
.extend_from_slice
(
&
args
[
2
..
]);
}
_
=>
llargs
.extend_from_slice
(
args
)
...
...
@@ -712,7 +712,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
};
let
(
llret
,
bcx
)
=
base
::
invoke
(
bcx
,
llfn
,
&
llargs
,
debug_loc
);
if
!
bcx
.
unreachable
.get
()
{
if
!
bcx
.
is_unreachable
()
{
fn_ty
.apply_attrs_callsite
(
llret
);
// If the function we just called does not use an outpointer,
...
...
@@ -722,14 +722,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// u64.
if
!
fn_ty
.ret
.is_indirect
()
{
if
let
Some
(
llretslot
)
=
opt_llretslot
{
fn_ty
.ret
.store
(
&
bcx
.build
()
,
llret
,
llretslot
);
fn_ty
.ret
.store
(
&
bcx
,
llret
,
llretslot
);
}
}
}
if
fn_ret
.0
.is_never
()
{
Unreachable
(
bcx
);
assert
!
(
!
bcx
.is_terminated
());
bcx
.set_unreachable
();
bcx
.unreachable
();
}
Result
::
new
(
bcx
,
llret
)
(
bcx
,
llret
)
}
src/librustc_trans/cleanup.rs
浏览文件 @
bf7d4534
...
...
@@ -120,7 +120,7 @@
use
base
;
use
build
;
use
common
;
use
common
::{
Block
,
FunctionContext
,
LandingPad
};
use
common
::{
Block
AndBuilder
,
FunctionContext
,
LandingPad
};
use
debuginfo
::{
DebugLoc
};
use
glue
;
use
type_
::
Type
;
...
...
@@ -190,9 +190,9 @@ pub fn pop_custom_cleanup_scope(&self,
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit.
pub
fn
pop_and_trans_custom_cleanup_scope
(
&
self
,
bcx
:
Block
<
'blk
,
'tcx
>
,
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
custom_scope
:
CustomScopeIndex
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
debug!
(
"pop_and_trans_custom_cleanup_scope({:?})"
,
custom_scope
);
assert
!
(
self
.is_valid_to_pop_custom_scope
(
custom_scope
));
...
...
@@ -339,11 +339,11 @@ fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
/// Generates the cleanups for `scope` into `bcx`
fn
trans_scope_cleanups
(
&
self
,
// cannot borrow self, will recurse
bcx
:
Block
<
'blk
,
'tcx
>
,
scope
:
&
CleanupScope
<
'tcx
>
)
->
Block
<
'blk
,
'tcx
>
{
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
scope
:
&
CleanupScope
<
'tcx
>
)
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
mut
bcx
=
bcx
;
if
!
bcx
.
unreachable
.get
()
{
if
!
bcx
.
is_unreachable
()
{
for
cleanup
in
scope
.cleanups
.iter
()
.rev
()
{
bcx
=
cleanup
.trans
(
bcx
,
scope
.debug_loc
);
}
...
...
@@ -419,21 +419,21 @@ fn trans_cleanups_to_exit_scope(&'blk self,
UnwindExit
(
val
)
=>
{
// Generate a block that will resume unwinding to the
// calling function
let
bcx
=
self
.new_block
(
"resume"
);
let
bcx
=
self
.new_block
(
"resume"
)
.build
()
;
match
val
{
UnwindKind
::
LandingPad
=>
{
let
addr
=
self
.landingpad_alloca
.get
()
.unwrap
();
let
lp
=
build
::
Load
(
bcx
,
addr
);
base
::
call_lifetime_end
(
bcx
,
addr
);
base
::
trans_unwind_resume
(
bcx
,
lp
);
let
lp
=
build
::
Load
(
&
bcx
,
addr
);
base
::
call_lifetime_end
(
&
bcx
,
addr
);
base
::
trans_unwind_resume
(
&
bcx
,
lp
);
}
UnwindKind
::
CleanupPad
(
_
)
=>
{
let
pad
=
build
::
CleanupPad
(
bcx
,
None
,
&
[]);
build
::
CleanupRet
(
bcx
,
pad
,
None
);
let
pad
=
build
::
CleanupPad
(
&
bcx
,
None
,
&
[]);
build
::
CleanupRet
(
&
bcx
,
pad
,
None
);
}
}
prev_llbb
=
bcx
.llbb
;
prev_llbb
=
bcx
.llbb
()
;
break
;
}
}
...
...
@@ -484,16 +484,17 @@ fn trans_cleanups_to_exit_scope(&'blk self,
let
name
=
scope
.block_name
(
"clean"
);
debug!
(
"generating cleanups for {}"
,
name
);
let
bcx_in
=
self
.new_block
(
&
name
[
..
]);
let
exit_label
=
label
.start
(
bcx_in
);
let
bcx_in
=
self
.new_block
(
&
name
[
..
])
.build
();
let
exit_label
=
label
.start
(
&
bcx_in
);
let
next_llbb
=
bcx_in
.llbb
();
let
mut
bcx_out
=
bcx_in
;
let
len
=
scope
.cleanups
.len
();
for
cleanup
in
scope
.cleanups
.iter
()
.rev
()
.take
(
len
-
skip
)
{
bcx_out
=
cleanup
.trans
(
bcx_out
,
scope
.debug_loc
);
}
skip
=
0
;
exit_label
.branch
(
bcx_out
,
prev_llbb
);
prev_llbb
=
bcx_in
.
llbb
;
exit_label
.branch
(
&
bcx_out
,
prev_llbb
);
prev_llbb
=
next_
llbb
;
scope
.add_cached_early_exit
(
exit_label
,
prev_llbb
,
len
);
}
...
...
@@ -527,13 +528,13 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
Some
(
llbb
)
=>
return
llbb
,
None
=>
{
let
name
=
last_scope
.block_name
(
"unwind"
);
pad_bcx
=
self
.new_block
(
&
name
[
..
]);
last_scope
.cached_landing_pad
=
Some
(
pad_bcx
.llbb
);
pad_bcx
=
self
.new_block
(
&
name
[
..
])
.build
()
;
last_scope
.cached_landing_pad
=
Some
(
pad_bcx
.llbb
()
);
}
}
};
let
llpersonality
=
pad_bcx
.fcx
.eh_personality
();
let
llpersonality
=
pad_bcx
.fcx
()
.eh_personality
();
let
val
=
if
base
::
wants_msvc_seh
(
self
.ccx
.sess
())
{
// A cleanup pad requires a personality function to be specified, so
...
...
@@ -541,8 +542,8 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
// creation of the landingpad instruction). We then create a
// cleanuppad instruction which has no filters to run cleanup on all
// exceptions.
build
::
SetPersonalityFn
(
pad_bcx
,
llpersonality
);
let
llretval
=
build
::
CleanupPad
(
pad_bcx
,
None
,
&
[]);
build
::
SetPersonalityFn
(
&
pad_bcx
,
llpersonality
);
let
llretval
=
build
::
CleanupPad
(
&
pad_bcx
,
None
,
&
[]);
UnwindKind
::
CleanupPad
(
llretval
)
}
else
{
// The landing pad return type (the type being propagated). Not sure
...
...
@@ -553,31 +554,31 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
false
);
// The only landing pad clause will be 'cleanup'
let
llretval
=
build
::
LandingPad
(
pad_bcx
,
llretty
,
llpersonality
,
1
);
let
llretval
=
build
::
LandingPad
(
&
pad_bcx
,
llretty
,
llpersonality
,
1
);
// The landing pad block is a cleanup
build
::
SetCleanup
(
pad_bcx
,
llretval
);
build
::
SetCleanup
(
&
pad_bcx
,
llretval
);
let
addr
=
match
self
.landingpad_alloca
.get
()
{
Some
(
addr
)
=>
addr
,
None
=>
{
let
addr
=
base
::
alloca
(
pad_bcx
,
common
::
val_ty
(
llretval
),
let
addr
=
base
::
alloca
(
&
pad_bcx
,
common
::
val_ty
(
llretval
),
""
);
base
::
call_lifetime_start
(
pad_bcx
,
addr
);
base
::
call_lifetime_start
(
&
pad_bcx
,
addr
);
self
.landingpad_alloca
.set
(
Some
(
addr
));
addr
}
};
build
::
Store
(
pad_bcx
,
llretval
,
addr
);
build
::
Store
(
&
pad_bcx
,
llretval
,
addr
);
UnwindKind
::
LandingPad
};
// Generate the cleanup block and branch to it.
let
label
=
UnwindExit
(
val
);
let
cleanup_llbb
=
self
.trans_cleanups_to_exit_scope
(
label
);
label
.branch
(
pad_bcx
,
cleanup_llbb
);
label
.branch
(
&
pad_bcx
,
cleanup_llbb
);
return
pad_bcx
.llbb
;
return
pad_bcx
.llbb
()
;
}
}
...
...
@@ -628,7 +629,7 @@ impl EarlyExitLabel {
/// Transitions from an exit label to other exit labels depend on the type
/// of label. For example with MSVC exceptions unwind exit labels will use
/// the `cleanupret` instruction instead of the `br` instruction.
fn
branch
(
&
self
,
from_bcx
:
Block
,
to_llbb
:
BasicBlockRef
)
{
fn
branch
(
&
self
,
from_bcx
:
&
BlockAndBuilder
,
to_llbb
:
BasicBlockRef
)
{
if
let
UnwindExit
(
UnwindKind
::
CleanupPad
(
pad
))
=
*
self
{
build
::
CleanupRet
(
from_bcx
,
pad
,
Some
(
to_llbb
));
}
else
{
...
...
@@ -647,15 +648,15 @@ fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) {
///
/// Returns a new label which will can be used to cache `bcx` in the list of
/// early exits.
fn
start
(
&
self
,
bcx
:
Block
)
->
EarlyExitLabel
{
fn
start
(
&
self
,
bcx
:
&
BlockAndBuilder
)
->
EarlyExitLabel
{
match
*
self
{
UnwindExit
(
UnwindKind
::
CleanupPad
(
..
))
=>
{
let
pad
=
build
::
CleanupPad
(
bcx
,
None
,
&
[]);
bcx
.
lpad
.set
(
Some
(
bcx
.fcx
.lpad_arena
.alloc
(
LandingPad
::
msvc
(
pad
))));
bcx
.
set_lpad_ref
(
Some
(
bcx
.fcx
()
.lpad_arena
.alloc
(
LandingPad
::
msvc
(
pad
))));
UnwindExit
(
UnwindKind
::
CleanupPad
(
pad
))
}
UnwindExit
(
UnwindKind
::
LandingPad
)
=>
{
bcx
.
lpad
.set
(
Some
(
bcx
.fcx
.lpad_arena
.alloc
(
LandingPad
::
gnu
())));
bcx
.
set_lpad_ref
(
Some
(
bcx
.fcx
()
.lpad_arena
.alloc
(
LandingPad
::
gnu
())));
*
self
}
}
...
...
@@ -685,20 +686,19 @@ pub struct DropValue<'tcx> {
impl
<
'tcx
>
DropValue
<
'tcx
>
{
fn
trans
<
'blk
>
(
&
self
,
bcx
:
Block
<
'blk
,
'tcx
>
,
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
debug_loc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
skip_dtor
=
self
.skip_dtor
;
let
_
icx
=
if
skip_dtor
{
base
::
push_ctxt
(
"<DropValue as Cleanup>::trans skip_dtor=true"
)
}
else
{
base
::
push_ctxt
(
"<DropValue as Cleanup>::trans skip_dtor=false"
)
};
let
bcx
=
if
self
.is_immediate
{
if
self
.is_immediate
{
glue
::
drop_ty_immediate
(
bcx
,
self
.val
,
self
.ty
,
debug_loc
,
self
.skip_dtor
)
}
else
{
glue
::
drop_ty_core
(
bcx
,
self
.val
,
self
.ty
,
debug_loc
,
self
.skip_dtor
)
};
bcx
}
}
}
src/librustc_trans/common.rs
浏览文件 @
bf7d4534
...
...
@@ -441,6 +441,7 @@ pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
// code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph.
#[must_use]
pub
struct
BlockS
<
'blk
,
'tcx
:
'blk
>
{
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
...
...
@@ -555,6 +556,7 @@ fn drop(&mut self) {
}
}
#[must_use]
pub
struct
BlockAndBuilder
<
'blk
,
'tcx
:
'blk
>
{
bcx
:
Block
<
'blk
,
'tcx
>
,
owned_builder
:
OwnedBuilder
<
'blk
,
'tcx
>
,
...
...
@@ -597,10 +599,24 @@ pub fn at_start<F, R>(&self, f: F) -> R
// Methods delegated to bcx
pub
fn
terminate
(
&
self
)
{
debug!
(
"terminate({})"
,
self
.bcx
.to_str
());
self
.bcx.terminated
.set
(
true
);
}
pub
fn
set_unreachable
(
&
self
)
{
debug!
(
"set_unreachable({})"
,
self
.bcx
.to_str
());
self
.bcx.unreachable
.set
(
true
);
}
pub
fn
is_unreachable
(
&
self
)
->
bool
{
self
.bcx.unreachable
.get
()
}
pub
fn
is_terminated
(
&
self
)
->
bool
{
self
.bcx.terminated
.get
()
}
pub
fn
ccx
(
&
self
)
->
&
'blk
CrateContext
<
'blk
,
'tcx
>
{
self
.bcx
.ccx
()
}
...
...
@@ -700,20 +716,6 @@ fn clone(&self) -> LandingPad {
}
}
pub
struct
Result
<
'blk
,
'tcx
:
'blk
>
{
pub
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
val
:
ValueRef
}
impl
<
'b
,
'tcx
>
Result
<
'b
,
'tcx
>
{
pub
fn
new
(
bcx
:
Block
<
'b
,
'tcx
>
,
val
:
ValueRef
)
->
Result
<
'b
,
'tcx
>
{
Result
{
bcx
:
bcx
,
val
:
val
,
}
}
}
pub
fn
val_ty
(
v
:
ValueRef
)
->
Type
{
unsafe
{
Type
::
from_ref
(
llvm
::
LLVMTypeOf
(
v
))
...
...
@@ -1016,7 +1018,7 @@ pub fn langcall(tcx: TyCtxt,
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
pub
fn
build_unchecked_lshift
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
build_unchecked_lshift
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
binop_debug_loc
:
DebugLoc
)
->
ValueRef
{
...
...
@@ -1026,7 +1028,7 @@ pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
build
::
Shl
(
bcx
,
lhs
,
rhs
,
binop_debug_loc
)
}
pub
fn
build_unchecked_rshift
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
build_unchecked_rshift
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
lhs_t
:
Ty
<
'tcx
>
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
...
...
@@ -1042,17 +1044,19 @@ pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
}
fn
shift_mask_rhs
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
shift_mask_rhs
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
let
rhs_llty
=
val_ty
(
rhs
);
build
::
And
(
bcx
,
rhs
,
shift_mask_val
(
bcx
,
rhs_llty
,
rhs_llty
,
false
),
debug_loc
)
}
pub
fn
shift_mask_val
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
llty
:
Type
,
mask_llty
:
Type
,
invert
:
bool
)
->
ValueRef
{
pub
fn
shift_mask_val
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
llty
:
Type
,
mask_llty
:
Type
,
invert
:
bool
)
->
ValueRef
{
let
kind
=
llty
.kind
();
match
kind
{
TypeKind
::
Integer
=>
{
...
...
src/librustc_trans/debuginfo/mod.rs
浏览文件 @
bf7d4534
...
...
@@ -27,7 +27,7 @@
use
rustc
::
ty
::
subst
::
Substs
;
use
abi
::
Abi
;
use
common
::{
CrateContext
,
FunctionContext
,
Block
,
Block
AndBuilder
};
use
common
::{
CrateContext
,
FunctionContext
,
BlockAndBuilder
};
use
monomorphize
::{
self
,
Instance
};
use
rustc
::
ty
::{
self
,
Ty
};
use
rustc
::
mir
;
...
...
@@ -441,7 +441,7 @@ fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>,
}
}
pub
fn
declare_local
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
declare_local
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
variable_name
:
ast
::
Name
,
variable_type
:
Ty
<
'tcx
>
,
scope_metadata
:
DIScope
,
...
...
@@ -494,16 +494,16 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
address_operations
.as_ptr
(),
address_operations
.len
()
as
c_uint
,
debug_loc
,
bcx
.llbb
);
bcx
.llbb
()
);
llvm
::
LLVMSetInstDebugLocation
(
::
build
::
B
(
bcx
)
.llbuilder
,
instr
);
llvm
::
LLVMSetInstDebugLocation
(
bcx
.llbuilder
,
instr
);
}
}
}
match
variable_kind
{
ArgumentVariable
(
_
)
|
CapturedVariable
=>
{
assert
!
(
!
bcx
.fcx
assert
!
(
!
bcx
.fcx
()
.debug_context
.get_ref
(
span
)
.source_locations_enabled
...
...
src/librustc_trans/glue.rs
浏览文件 @
bf7d4534
...
...
@@ -38,38 +38,39 @@
use
arena
::
TypedArena
;
use
syntax_pos
::
DUMMY_SP
;
pub
fn
trans_exchange_free_dyn
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
trans_exchange_free_dyn
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
size
:
ValueRef
,
align
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"trans_exchange_free"
);
let
def_id
=
langcall
(
bcx
.tcx
(),
None
,
""
,
ExchangeFreeFnLangItem
);
let
args
=
[
PointerCast
(
bcx
,
v
,
Type
::
i8p
(
bcx
.ccx
())),
size
,
align
];
let
args
=
[
PointerCast
(
&
bcx
,
v
,
Type
::
i8p
(
bcx
.ccx
())),
size
,
align
];
Callee
::
def
(
bcx
.ccx
(),
def_id
,
bcx
.tcx
()
.intern_substs
(
&
[]))
.call
(
bcx
,
debug_loc
,
&
args
,
None
)
.
bcx
.call
(
bcx
,
debug_loc
,
&
args
,
None
)
.
0
}
pub
fn
trans_exchange_free
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
trans_exchange_free
<
'blk
,
'tcx
>
(
cx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
size
:
u64
,
align
:
u32
,
debug_loc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
let
ccx
=
cx
.ccx
();
trans_exchange_free_dyn
(
cx
,
v
,
C_uint
(
c
x
.ccx
()
,
size
),
C_uint
(
c
x
.ccx
()
,
align
),
C_uint
(
c
cx
,
size
),
C_uint
(
c
cx
,
align
),
debug_loc
)
}
pub
fn
trans_exchange_free_ty
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
trans_exchange_free_ty
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
ptr
:
ValueRef
,
content_ty
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
assert
!
(
type_is_sized
(
bcx
.ccx
()
.tcx
(),
content_ty
));
let
sizing_type
=
sizing_type_of
(
bcx
.ccx
(),
content_ty
);
let
content_size
=
llsize_of_alloc
(
bcx
.ccx
(),
sizing_type
);
...
...
@@ -129,23 +130,23 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
pub
fn
drop_ty
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
drop_ty
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
debug_loc
:
DebugLoc
)
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
drop_ty_core
(
bcx
,
v
,
t
,
debug_loc
,
false
)
}
pub
fn
drop_ty_core
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
drop_ty_core
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
,
skip_dtor
:
bool
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
// NB: v is an *alias* of type t here, not a direct value.
debug!
(
"drop_ty_core(t={:?}, skip_dtor={})"
,
t
,
skip_dtor
);
let
_
icx
=
push_ctxt
(
"drop_ty"
);
if
bcx
.fcx
.type_needs_drop
(
t
)
{
if
bcx
.fcx
()
.type_needs_drop
(
t
)
{
let
ccx
=
bcx
.ccx
();
let
g
=
if
skip_dtor
{
DropGlueKind
::
TyContents
(
t
)
...
...
@@ -155,29 +156,29 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let
glue
=
get_drop_glue_core
(
ccx
,
g
);
let
glue_type
=
get_drop_glue_type
(
ccx
.tcx
(),
t
);
let
ptr
=
if
glue_type
!=
t
{
PointerCast
(
bcx
,
v
,
type_of
(
ccx
,
glue_type
)
.ptr_to
())
PointerCast
(
&
bcx
,
v
,
type_of
(
ccx
,
glue_type
)
.ptr_to
())
}
else
{
v
};
// No drop-hint ==> call standard drop glue
Call
(
bcx
,
glue
,
&
[
ptr
],
debug_loc
);
Call
(
&
bcx
,
glue
,
&
[
ptr
],
debug_loc
);
}
bcx
}
pub
fn
drop_ty_immediate
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
drop_ty_immediate
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
,
skip_dtor
:
bool
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"drop_ty_immediate"
);
let
vp
=
alloc_ty
(
bcx
,
t
,
""
);
call_lifetime_start
(
bcx
,
vp
);
store_ty
(
bcx
,
v
,
vp
,
t
);
let
vp
=
alloc_ty
(
&
bcx
,
t
,
""
);
call_lifetime_start
(
&
bcx
,
vp
);
store_ty
(
&
bcx
,
v
,
vp
,
t
);
let
bcx
=
drop_ty_core
(
bcx
,
vp
,
t
,
debug_loc
,
skip_dtor
);
call_lifetime_end
(
bcx
,
vp
);
call_lifetime_end
(
&
bcx
,
vp
);
bcx
}
...
...
@@ -248,14 +249,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// type, so we don't need to explicitly cast the function parameter.
let
bcx
=
make_drop_glue
(
bcx
,
get_param
(
llfn
,
0
),
g
);
fcx
.finish
(
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
}
fn
trans_custom_dtor
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
trans_custom_dtor
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
v0
:
ValueRef
,
shallow_drop
:
bool
)
->
Block
<
'blk
,
'tcx
>
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
debug!
(
"trans_custom_dtor t: {}"
,
t
);
let
tcx
=
bcx
.tcx
();
...
...
@@ -269,12 +270,12 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
//
// FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code.
let
contents_scope
=
bcx
.fcx
.push_custom_cleanup_scope
();
let
contents_scope
=
bcx
.fcx
()
.push_custom_cleanup_scope
();
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
if
!
shallow_drop
{
bcx
.fcx
.schedule_drop_adt_contents
(
contents_scope
,
v0
,
t
);
bcx
.fcx
()
.schedule_drop_adt_contents
(
contents_scope
,
v0
,
t
);
}
let
(
sized_args
,
unsized_args
);
...
...
@@ -284,8 +285,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
else
{
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
unsized_args
=
[
Load
(
bcx
,
get_dataptr
(
bcx
,
v0
)),
Load
(
bcx
,
get_meta
(
bcx
,
v0
))
Load
(
&
bcx
,
get_dataptr
(
&
bcx
,
v0
)),
Load
(
&
bcx
,
get_meta
(
&
bcx
,
v0
))
];
&
unsized_args
};
...
...
@@ -300,9 +301,9 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
};
let
dtor_did
=
def
.destructor
()
.unwrap
();
bcx
=
Callee
::
def
(
bcx
.ccx
(),
dtor_did
,
vtbl
.substs
)
.call
(
bcx
,
DebugLoc
::
None
,
args
,
None
)
.
bcx
;
.call
(
bcx
,
DebugLoc
::
None
,
args
,
None
)
.
0
;
bcx
.fcx
.pop_and_trans_custom_cleanup_scope
(
bcx
,
contents_scope
)
bcx
.fcx
()
.pop_and_trans_custom_cleanup_scope
(
bcx
,
contents_scope
)
}
pub
fn
size_and_align_of_dst
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
...
...
@@ -416,10 +417,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
}
}
fn
make_drop_glue
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
make_drop_glue
<
'blk
,
'tcx
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
v0
:
ValueRef
,
g
:
DropGlueKind
<
'tcx
>
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
t
=
g
.ty
();
let
skip_dtor
=
match
g
{
DropGlueKind
::
Ty
(
_
)
=>
false
,
DropGlueKind
::
TyContents
(
_
)
=>
true
};
...
...
@@ -438,27 +439,28 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// a safe-guard, assert TyBox not used with TyContents.
assert
!
(
!
skip_dtor
);
if
!
type_is_sized
(
bcx
.tcx
(),
content_ty
)
{
let
llval
=
get_dataptr
(
bcx
,
v0
);
let
llbox
=
Load
(
bcx
,
llval
);
let
llval
=
get_dataptr
(
&
bcx
,
v0
);
let
llbox
=
Load
(
&
bcx
,
llval
);
let
bcx
=
drop_ty
(
bcx
,
v0
,
content_ty
,
DebugLoc
::
None
);
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
let
info
=
get_meta
(
bcx
,
v0
);
let
info
=
Load
(
bcx
,
info
);
let
(
llsize
,
llalign
)
=
size_and_align_of_dst
(
&
bcx
.build
(),
content_ty
,
info
);
let
info
=
get_meta
(
&
bcx
,
v0
);
let
info
=
Load
(
&
bcx
,
info
);
let
(
llsize
,
llalign
)
=
size_and_align_of_dst
(
&
bcx
,
content_ty
,
info
);
// `Box<ZeroSizeType>` does not allocate.
let
needs_free
=
ICmp
(
bcx
,
llvm
::
IntNE
,
llsize
,
C_uint
(
bcx
.ccx
(),
0u64
),
DebugLoc
::
None
);
let
needs_free
=
ICmp
(
&
bcx
,
llvm
::
IntNE
,
llsize
,
C_uint
(
bcx
.ccx
(),
0u64
),
DebugLoc
::
None
);
with_cond
(
bcx
,
needs_free
,
|
bcx
|
{
trans_exchange_free_dyn
(
bcx
,
llbox
,
llsize
,
llalign
,
DebugLoc
::
None
)
})
}
else
{
let
llval
=
v0
;
let
llbox
=
Load
(
bcx
,
llval
);
let
llbox
=
Load
(
&
bcx
,
llval
);
let
bcx
=
drop_ty
(
bcx
,
llbox
,
content_ty
,
DebugLoc
::
None
);
trans_exchange_free_ty
(
bcx
,
llbox
,
content_ty
,
DebugLoc
::
None
)
}
...
...
@@ -469,12 +471,12 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// okay with always calling the Drop impl, if any.
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
assert
!
(
!
skip_dtor
);
let
data_ptr
=
get_dataptr
(
bcx
,
v0
);
let
vtable_ptr
=
Load
(
bcx
,
get_meta
(
bcx
,
v0
));
let
dtor
=
Load
(
bcx
,
vtable_ptr
);
Call
(
bcx
,
let
data_ptr
=
get_dataptr
(
&
bcx
,
v0
);
let
vtable_ptr
=
Load
(
&
bcx
,
get_meta
(
&
bcx
,
v0
));
let
dtor
=
Load
(
&
bcx
,
vtable_ptr
);
Call
(
&
bcx
,
dtor
,
&
[
PointerCast
(
bcx
,
Load
(
bcx
,
data_ptr
),
Type
::
i8p
(
bcx
.ccx
()))],
&
[
PointerCast
(
&
bcx
,
Load
(
&
bcx
,
data_ptr
),
Type
::
i8p
(
bcx
.ccx
()))],
DebugLoc
::
None
);
bcx
}
...
...
@@ -485,7 +487,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
bcx
}
_
=>
{
if
bcx
.fcx
.type_needs_drop
(
t
)
{
if
bcx
.fcx
()
.type_needs_drop
(
t
)
{
drop_structural_ty
(
bcx
,
v0
,
t
)
}
else
{
bcx
...
...
@@ -495,27 +497,26 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}
// Iterates through the elements of a structural type, dropping them.
fn
drop_structural_ty
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
fn
drop_structural_ty
<
'blk
,
'tcx
>
(
cx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
av
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"drop_structural_ty"
);
fn
iter_variant
<
'blk
,
'tcx
>
(
cx
:
Block
<
'blk
,
'tcx
>
,
fn
iter_variant
<
'blk
,
'tcx
>
(
cx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
t
:
Ty
<
'tcx
>
,
av
:
adt
::
MaybeSizedValue
,
variant
:
&
'tcx
ty
::
VariantDef
,
substs
:
&
Substs
<
'tcx
>
)
->
Block
<
'blk
,
'tcx
>
{
->
Block
AndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"iter_variant"
);
let
tcx
=
cx
.tcx
();
let
mut
cx
=
cx
;
for
(
i
,
field
)
in
variant
.fields
.iter
()
.enumerate
()
{
let
arg
=
monomorphize
::
field_ty
(
tcx
,
substs
,
field
);
cx
=
drop_ty
(
cx
,
adt
::
trans_field_ptr
(
cx
,
t
,
av
,
Disr
::
from
(
variant
.disr_val
),
i
),
arg
,
DebugLoc
::
None
);
let
field_ptr
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
av
,
Disr
::
from
(
variant
.disr_val
),
i
);
cx
=
drop_ty
(
cx
,
field_ptr
,
arg
,
DebugLoc
::
None
);
}
return
cx
;
}
...
...
@@ -524,8 +525,8 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
adt
::
MaybeSizedValue
::
sized
(
av
)
}
else
{
// FIXME(#36457) -- we should pass unsized values as two arguments
let
data
=
Load
(
cx
,
get_dataptr
(
cx
,
av
));
let
info
=
Load
(
cx
,
get_meta
(
cx
,
av
));
let
data
=
Load
(
&
cx
,
get_dataptr
(
&
cx
,
av
));
let
info
=
Load
(
&
cx
,
get_meta
(
&
cx
,
av
));
adt
::
MaybeSizedValue
::
unsized_
(
data
,
info
)
};
...
...
@@ -533,12 +534,12 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
match
t
.sty
{
ty
::
TyClosure
(
def_id
,
substs
)
=>
{
for
(
i
,
upvar_ty
)
in
substs
.upvar_tys
(
def_id
,
cx
.tcx
())
.enumerate
()
{
let
llupvar
=
adt
::
trans_field_ptr
(
cx
,
t
,
value
,
Disr
(
0
),
i
);
let
llupvar
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
value
,
Disr
(
0
),
i
);
cx
=
drop_ty
(
cx
,
llupvar
,
upvar_ty
,
DebugLoc
::
None
);
}
}
ty
::
TyArray
(
_
,
n
)
=>
{
let
base
=
get_dataptr
(
cx
,
value
.value
);
let
base
=
get_dataptr
(
&
cx
,
value
.value
);
let
len
=
C_uint
(
cx
.ccx
(),
n
);
let
unit_ty
=
t
.sequence_element_type
(
cx
.tcx
());
cx
=
tvec
::
slice_for_each
(
cx
,
base
,
unit_ty
,
len
,
...
...
@@ -551,7 +552,7 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
}
ty
::
TyTuple
(
ref
args
)
=>
{
for
(
i
,
arg
)
in
args
.iter
()
.enumerate
()
{
let
llfld_a
=
adt
::
trans_field_ptr
(
cx
,
t
,
value
,
Disr
(
0
),
i
);
let
llfld_a
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
value
,
Disr
(
0
),
i
);
cx
=
drop_ty
(
cx
,
llfld_a
,
*
arg
,
DebugLoc
::
None
);
}
}
...
...
@@ -559,15 +560,15 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
AdtKind
::
Struct
=>
{
let
VariantInfo
{
fields
,
discr
}
=
VariantInfo
::
from_ty
(
cx
.tcx
(),
t
,
None
);
for
(
i
,
&
Field
(
_
,
field_ty
))
in
fields
.iter
()
.enumerate
()
{
let
llfld_a
=
adt
::
trans_field_ptr
(
cx
,
t
,
value
,
Disr
::
from
(
discr
),
i
);
let
llfld_a
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
value
,
Disr
::
from
(
discr
),
i
);
let
val
=
if
type_is_sized
(
cx
.tcx
(),
field_ty
)
{
llfld_a
}
else
{
// FIXME(#36457) -- we should pass unsized values as two arguments
let
scratch
=
alloc_ty
(
cx
,
field_ty
,
"__fat_ptr_iter"
);
Store
(
cx
,
llfld_a
,
get_dataptr
(
cx
,
scratch
));
Store
(
cx
,
value
.meta
,
get_meta
(
cx
,
scratch
));
let
scratch
=
alloc_ty
(
&
cx
,
field_ty
,
"__fat_ptr_iter"
);
Store
(
&
cx
,
llfld_a
,
get_dataptr
(
&
cx
,
scratch
));
Store
(
&
cx
,
value
.meta
,
get_meta
(
&
cx
,
scratch
));
scratch
};
cx
=
drop_ty
(
cx
,
val
,
field_ty
,
DebugLoc
::
None
);
...
...
@@ -577,14 +578,14 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
bug!
(
"Union in `glue::drop_structural_ty`"
);
}
AdtKind
::
Enum
=>
{
let
fcx
=
cx
.fcx
;
let
fcx
=
cx
.fcx
()
;
let
ccx
=
fcx
.ccx
;
let
n_variants
=
adt
.variants
.len
();
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
match
adt
::
trans_switch
(
cx
,
t
,
av
,
false
)
{
match
adt
::
trans_switch
(
&
cx
,
t
,
av
,
false
)
{
(
adt
::
BranchKind
::
Single
,
None
)
=>
{
if
n_variants
!=
0
{
assert
!
(
n_variants
==
1
);
...
...
@@ -593,7 +594,8 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
}
}
(
adt
::
BranchKind
::
Switch
,
Some
(
lldiscrim_a
))
=>
{
cx
=
drop_ty
(
cx
,
lldiscrim_a
,
cx
.tcx
()
.types.isize
,
DebugLoc
::
None
);
let
tcx
=
cx
.tcx
();
cx
=
drop_ty
(
cx
,
lldiscrim_a
,
tcx
.types.isize
,
DebugLoc
::
None
);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
...
...
@@ -608,23 +610,19 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
// from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let
ret_void_cx
=
fcx
.new_block
(
"enum-iter-ret-void"
);
RetVoid
(
ret_void_cx
,
DebugLoc
::
None
);
let
llswitch
=
Switch
(
cx
,
lldiscrim_a
,
ret_void_cx
.llbb
,
n_variants
);
let
next_cx
=
fcx
.new_block
(
"enum-iter-next"
);
let
ret_void_cx
=
fcx
.new_block
(
"enum-iter-ret-void"
)
.build
()
;
RetVoid
(
&
ret_void_cx
,
DebugLoc
::
None
);
let
llswitch
=
Switch
(
&
cx
,
lldiscrim_a
,
ret_void_cx
.llbb
()
,
n_variants
);
let
next_cx
=
fcx
.new_block
(
"enum-iter-next"
)
.build
()
;
for
variant
in
&
adt
.variants
{
let
variant_cx
=
fcx
.new_block
(
&
format!
(
"enum-iter-variant-{}"
,
&
variant
.disr_val
.to_string
()));
let
case_val
=
adt
::
trans_case
(
cx
,
t
,
Disr
::
from
(
variant
.disr_val
));
AddCase
(
llswitch
,
case_val
,
variant_cx
.llbb
);
let
variant_cx
=
iter_variant
(
variant_cx
,
t
,
value
,
variant
,
substs
);
Br
(
variant_cx
,
next_cx
.llbb
,
DebugLoc
::
None
);
let
variant_cx_name
=
format!
(
"enum-iter-variant-{}"
,
&
variant
.disr_val
.to_string
());
let
variant_cx
=
fcx
.new_block
(
&
variant_cx_name
)
.build
();
let
case_val
=
adt
::
trans_case
(
&
cx
,
t
,
Disr
::
from
(
variant
.disr_val
));
AddCase
(
llswitch
,
case_val
,
variant_cx
.llbb
());
let
variant_cx
=
iter_variant
(
variant_cx
,
t
,
value
,
variant
,
substs
);
Br
(
&
variant_cx
,
next_cx
.llbb
(),
DebugLoc
::
None
);
}
cx
=
next_cx
;
}
...
...
src/librustc_trans/intrinsic.rs
浏览文件 @
bf7d4534
...
...
@@ -87,14 +87,13 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs
pub
fn
trans_intrinsic_call
<
'a
,
'blk
,
'tcx
>
(
mut
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
trans_intrinsic_call
<
'a
,
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
callee_ty
:
Ty
<
'tcx
>
,
fn_ty
:
&
FnType
,
llargs
:
&
[
ValueRef
],
llresult
:
ValueRef
,
call_debug_location
:
DebugLoc
)
->
Result
<
'blk
,
'tcx
>
{
let
fcx
=
bcx
.fcx
;
call_debug_location
:
DebugLoc
)
{
let
fcx
=
bcx
.fcx
();
let
ccx
=
fcx
.ccx
;
let
tcx
=
bcx
.tcx
();
...
...
@@ -122,11 +121,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
if
name
==
"abort"
{
let
llfn
=
ccx
.get_intrinsic
(
&
(
"llvm.trap"
));
Call
(
bcx
,
llfn
,
&
[],
call_debug_location
);
Unreachable
(
bcx
);
return
Result
::
new
(
bcx
,
C_undef
(
Type
::
nil
(
ccx
)
.ptr_to
()));
return
;
}
else
if
name
==
"unreachable"
{
Unreachable
(
bcx
);
return
Result
::
new
(
bcx
,
C_nil
(
ccx
))
;
// FIXME: do nothing?
return
;
}
let
llret_ty
=
type_of
::
type_of
(
ccx
,
ret_ty
);
...
...
@@ -145,8 +143,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
Call
(
bcx
,
expect
,
&
[
llargs
[
0
],
C_bool
(
ccx
,
false
)],
call_debug_location
)
}
(
_
,
"try"
)
=>
{
bcx
=
try_intrinsic
(
bcx
,
llargs
[
0
],
llargs
[
1
],
llargs
[
2
],
llresult
,
call_debug_location
);
try_intrinsic
(
bcx
,
llargs
[
0
],
llargs
[
1
],
llargs
[
2
],
llresult
,
call_debug_location
);
C_nil
(
ccx
)
}
(
_
,
"breakpoint"
)
=>
{
...
...
@@ -162,7 +159,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let
tp_ty
=
substs
.type_at
(
0
);
if
!
type_is_sized
(
tcx
,
tp_ty
)
{
let
(
llsize
,
_
)
=
glue
::
size_and_align_of_dst
(
&
bcx
.build
()
,
tp_ty
,
llargs
[
1
]);
glue
::
size_and_align_of_dst
(
bcx
,
tp_ty
,
llargs
[
1
]);
llsize
}
else
{
let
lltp_ty
=
type_of
::
type_of
(
ccx
,
tp_ty
);
...
...
@@ -177,7 +174,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let
tp_ty
=
substs
.type_at
(
0
);
if
!
type_is_sized
(
tcx
,
tp_ty
)
{
let
(
_
,
llalign
)
=
glue
::
size_and_align_of_dst
(
&
bcx
.build
()
,
tp_ty
,
llargs
[
1
]);
glue
::
size_and_align_of_dst
(
bcx
,
tp_ty
,
llargs
[
1
]);
llalign
}
else
{
C_uint
(
ccx
,
type_of
::
align_of
(
ccx
,
tp_ty
))
...
...
@@ -188,25 +185,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let
lltp_ty
=
type_of
::
type_of
(
ccx
,
tp_ty
);
C_uint
(
ccx
,
machine
::
llalign_of_pref
(
ccx
,
lltp_ty
))
}
(
_
,
"drop_in_place"
)
=>
{
let
tp_ty
=
substs
.type_at
(
0
);
let
is_sized
=
type_is_sized
(
tcx
,
tp_ty
);
let
ptr
=
if
is_sized
{
llargs
[
0
]
}
else
{
// FIXME(#36457) -- we should pass unsized values as two arguments
let
scratch
=
alloc_ty
(
bcx
,
tp_ty
,
"drop"
);
call_lifetime_start
(
bcx
,
scratch
);
Store
(
bcx
,
llargs
[
0
],
get_dataptr
(
bcx
,
scratch
));
Store
(
bcx
,
llargs
[
1
],
get_meta
(
bcx
,
scratch
));
scratch
};
glue
::
drop_ty
(
bcx
,
ptr
,
tp_ty
,
call_debug_location
);
if
!
is_sized
{
call_lifetime_end
(
bcx
,
ptr
);
}
C_nil
(
ccx
)
}
(
_
,
"type_name"
)
=>
{
let
tp_ty
=
substs
.type_at
(
0
);
let
ty_name
=
Symbol
::
intern
(
&
tp_ty
.to_string
())
.as_str
();
...
...
@@ -230,7 +208,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(
_
,
"needs_drop"
)
=>
{
let
tp_ty
=
substs
.type_at
(
0
);
C_bool
(
ccx
,
bcx
.fcx
.type_needs_drop
(
tp_ty
))
C_bool
(
ccx
,
bcx
.fcx
()
.type_needs_drop
(
tp_ty
))
}
(
_
,
"offset"
)
=>
{
let
ptr
=
llargs
[
0
];
...
...
@@ -613,7 +591,7 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
// qux` to be converted into `foo, bar, baz, qux`, integer
// arguments to be truncated as needed and pointers to be
// cast.
fn
modify_as_needed
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
modify_as_needed
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
t
:
&
intrinsics
::
Type
,
arg_type
:
Ty
<
'tcx
>
,
llarg
:
ValueRef
)
...
...
@@ -627,7 +605,7 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD
// etc.
assert
!
(
!
bcx
.fcx
.type_needs_drop
(
arg_type
));
assert
!
(
!
bcx
.fcx
()
.type_needs_drop
(
arg_type
));
let
arg
=
adt
::
MaybeSizedValue
::
sized
(
llarg
);
(
0
..
contents
.len
())
.map
(|
i
|
{
...
...
@@ -718,11 +696,9 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
store_ty
(
bcx
,
llval
,
llresult
,
ret_ty
);
}
}
Result
::
new
(
bcx
,
llresult
)
}
fn
copy_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
copy_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
allow_overlap
:
bool
,
volatile
:
bool
,
tp_ty
:
Ty
<
'tcx
>
,
...
...
@@ -759,7 +735,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_debug_location
)
}
fn
memset_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
memset_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
volatile
:
bool
,
tp_ty
:
Ty
<
'tcx
>
,
dst
:
ValueRef
,
...
...
@@ -788,7 +764,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_debug_location
)
}
fn
count_zeros_intrinsic
(
bcx
:
Block
,
fn
count_zeros_intrinsic
(
bcx
:
&
BlockAndBuilder
,
name
:
&
str
,
val
:
ValueRef
,
call_debug_location
:
DebugLoc
)
...
...
@@ -798,7 +774,7 @@ fn count_zeros_intrinsic(bcx: Block,
Call
(
bcx
,
llfn
,
&
[
val
,
y
],
call_debug_location
)
}
fn
with_overflow_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
with_overflow_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
name
:
&
str
,
a
:
ValueRef
,
b
:
ValueRef
,
...
...
@@ -817,20 +793,21 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
C_nil
(
bcx
.ccx
())
}
fn
try_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
func
:
ValueRef
,
data
:
ValueRef
,
local_ptr
:
ValueRef
,
dest
:
ValueRef
,
dloc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
fn
try_intrinsic
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
func
:
ValueRef
,
data
:
ValueRef
,
local_ptr
:
ValueRef
,
dest
:
ValueRef
,
dloc
:
DebugLoc
)
{
if
bcx
.sess
()
.no_landing_pads
()
{
Call
(
bcx
,
func
,
&
[
data
],
dloc
);
Store
(
bcx
,
C_null
(
Type
::
i8p
(
bcx
.ccx
())),
dest
);
bcx
Store
(
bcx
,
C_null
(
Type
::
i8p
(
&
bcx
.ccx
())),
dest
);
}
else
if
wants_msvc_seh
(
bcx
.sess
())
{
trans_msvc_try
(
bcx
,
func
,
data
,
local_ptr
,
dest
,
dloc
)
trans_msvc_try
(
bcx
,
func
,
data
,
local_ptr
,
dest
,
dloc
)
;
}
else
{
trans_gnu_try
(
bcx
,
func
,
data
,
local_ptr
,
dest
,
dloc
)
trans_gnu_try
(
bcx
,
func
,
data
,
local_ptr
,
dest
,
dloc
)
;
}
}
...
...
@@ -841,26 +818,26 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn
trans_msvc_try
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
trans_msvc_try
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
func
:
ValueRef
,
data
:
ValueRef
,
local_ptr
:
ValueRef
,
dest
:
ValueRef
,
dloc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
let
llfn
=
get_rust_try_fn
(
bcx
.fcx
,
&
mut
|
bcx
|
{
dloc
:
DebugLoc
)
{
let
llfn
=
get_rust_try_fn
(
bcx
.fcx
()
,
&
mut
|
bcx
|
{
let
ccx
=
bcx
.ccx
();
let
dloc
=
DebugLoc
::
None
;
SetPersonalityFn
(
bcx
,
bcx
.fcx
.eh_personality
());
SetPersonalityFn
(
&
bcx
,
bcx
.fcx
()
.eh_personality
());
let
normal
=
bcx
.fcx
.new_block
(
"normal"
);
let
catchswitch
=
bcx
.fcx
.new_block
(
"catchswitch"
);
let
catchpad
=
bcx
.fcx
.new_block
(
"catchpad"
);
let
caught
=
bcx
.fcx
.new_block
(
"caught"
);
let
normal
=
bcx
.fcx
()
.new_block
(
"normal"
)
.build
(
);
let
catchswitch
=
bcx
.fcx
()
.new_block
(
"catchswitch"
)
.build
(
);
let
catchpad
=
bcx
.fcx
()
.new_block
(
"catchpad"
)
.build
(
);
let
caught
=
bcx
.fcx
()
.new_block
(
"caught"
)
.build
(
);
let
func
=
llvm
::
get_param
(
bcx
.fcx.llfn
,
0
);
let
data
=
llvm
::
get_param
(
bcx
.fcx.llfn
,
1
);
let
local_ptr
=
llvm
::
get_param
(
bcx
.fcx.llfn
,
2
);
let
func
=
llvm
::
get_param
(
bcx
.fcx
()
.llfn
,
0
);
let
data
=
llvm
::
get_param
(
bcx
.fcx
()
.llfn
,
1
);
let
local_ptr
=
llvm
::
get_param
(
bcx
.fcx
()
.llfn
,
2
);
// We're generating an IR snippet that looks like:
//
...
...
@@ -902,37 +879,36 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
//
// More information can be found in libstd's seh.rs implementation.
let
i64p
=
Type
::
i64
(
ccx
)
.ptr_to
();
let
slot
=
Alloca
(
bcx
,
i64p
,
"slot"
);
Invoke
(
bcx
,
func
,
&
[
data
],
normal
.llbb
,
catchswitch
.llbb
,
dloc
);
let
slot
=
Alloca
(
&
bcx
,
i64p
,
"slot"
);
Invoke
(
&
bcx
,
func
,
&
[
data
],
normal
.llbb
(),
catchswitch
.llbb
()
,
dloc
);
Ret
(
normal
,
C_i32
(
ccx
,
0
),
dloc
);
Ret
(
&
normal
,
C_i32
(
ccx
,
0
),
dloc
);
let
cs
=
CatchSwitch
(
catchswitch
,
None
,
None
,
1
);
AddHandler
(
catchswitch
,
cs
,
catchpad
.llbb
);
let
cs
=
CatchSwitch
(
&
catchswitch
,
None
,
None
,
1
);
AddHandler
(
&
catchswitch
,
cs
,
catchpad
.llbb
()
);
let
tcx
=
ccx
.tcx
();
let
tydesc
=
match
tcx
.lang_items
.msvc_try_filter
()
{
Some
(
did
)
=>
::
consts
::
get_static
(
ccx
,
did
),
None
=>
bug!
(
"msvc_try_filter not defined"
),
};
let
tok
=
CatchPad
(
catchpad
,
cs
,
&
[
tydesc
,
C_i32
(
ccx
,
0
),
slot
]);
let
addr
=
Load
(
catchpad
,
slot
);
let
arg1
=
Load
(
catchpad
,
addr
);
let
tok
=
CatchPad
(
&
catchpad
,
cs
,
&
[
tydesc
,
C_i32
(
ccx
,
0
),
slot
]);
let
addr
=
Load
(
&
catchpad
,
slot
);
let
arg1
=
Load
(
&
catchpad
,
addr
);
let
val1
=
C_i32
(
ccx
,
1
);
let
arg2
=
Load
(
catchpad
,
InBoundsGEP
(
catchpad
,
addr
,
&
[
val1
]));
let
local_ptr
=
BitCast
(
catchpad
,
local_ptr
,
i64p
);
Store
(
catchpad
,
arg1
,
local_ptr
);
Store
(
catchpad
,
arg2
,
InBoundsGEP
(
catchpad
,
local_ptr
,
&
[
val1
]));
CatchRet
(
catchpad
,
tok
,
caught
.llbb
);
let
arg2
=
Load
(
&
catchpad
,
InBoundsGEP
(
&
catchpad
,
addr
,
&
[
val1
]));
let
local_ptr
=
BitCast
(
&
catchpad
,
local_ptr
,
i64p
);
Store
(
&
catchpad
,
arg1
,
local_ptr
);
Store
(
&
catchpad
,
arg2
,
InBoundsGEP
(
&
catchpad
,
local_ptr
,
&
[
val1
]));
CatchRet
(
&
catchpad
,
tok
,
caught
.llbb
()
);
Ret
(
caught
,
C_i32
(
ccx
,
1
),
dloc
);
Ret
(
&
caught
,
C_i32
(
ccx
,
1
),
dloc
);
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let
ret
=
Call
(
bcx
,
llfn
,
&
[
func
,
data
,
local_ptr
],
dloc
);
Store
(
bcx
,
ret
,
dest
);
return
bcx
}
// Definition of the standard "try" function for Rust using the GNU-like model
...
...
@@ -946,13 +922,13 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn
trans_gnu_try
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
fn
trans_gnu_try
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
func
:
ValueRef
,
data
:
ValueRef
,
local_ptr
:
ValueRef
,
dest
:
ValueRef
,
dloc
:
DebugLoc
)
->
Block
<
'blk
,
'tcx
>
{
let
llfn
=
get_rust_try_fn
(
bcx
.fcx
,
&
mut
|
bcx
|
{
dloc
:
DebugLoc
)
{
let
llfn
=
get_rust_try_fn
(
bcx
.fcx
()
,
&
mut
|
bcx
|
{
let
ccx
=
bcx
.ccx
();
let
dloc
=
DebugLoc
::
None
;
...
...
@@ -973,14 +949,14 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
let
then
=
bcx
.fcx
.new_block
(
"then"
);
let
catch
=
bcx
.fcx
.new_block
(
"catch"
);
let
then
=
bcx
.fcx
()
.new_block
(
"then"
)
.build
(
);
let
catch
=
bcx
.fcx
()
.new_block
(
"catch"
)
.build
(
);
let
func
=
llvm
::
get_param
(
bcx
.fcx.llfn
,
0
);
let
data
=
llvm
::
get_param
(
bcx
.fcx.llfn
,
1
);
let
local_ptr
=
llvm
::
get_param
(
bcx
.fcx.llfn
,
2
);
Invoke
(
bcx
,
func
,
&
[
data
],
then
.llbb
,
catch
.llbb
,
dloc
);
Ret
(
then
,
C_i32
(
ccx
,
0
),
dloc
);
let
func
=
llvm
::
get_param
(
bcx
.fcx
()
.llfn
,
0
);
let
data
=
llvm
::
get_param
(
bcx
.fcx
()
.llfn
,
1
);
let
local_ptr
=
llvm
::
get_param
(
bcx
.fcx
()
.llfn
,
2
);
Invoke
(
&
bcx
,
func
,
&
[
data
],
then
.llbb
(),
catch
.llbb
()
,
dloc
);
Ret
(
&
then
,
C_i32
(
ccx
,
0
),
dloc
);
// Type indicator for the exception being thrown.
//
...
...
@@ -990,18 +966,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// rust_try ignores the selector.
let
lpad_ty
=
Type
::
struct_
(
ccx
,
&
[
Type
::
i8p
(
ccx
),
Type
::
i32
(
ccx
)],
false
);
let
vals
=
LandingPad
(
catch
,
lpad_ty
,
bcx
.fcx
.eh_personality
(),
1
);
AddClause
(
catch
,
vals
,
C_null
(
Type
::
i8p
(
ccx
)));
let
ptr
=
ExtractValue
(
catch
,
vals
,
0
);
Store
(
catch
,
ptr
,
BitCast
(
catch
,
local_ptr
,
Type
::
i8p
(
ccx
)
.ptr_to
()));
Ret
(
catch
,
C_i32
(
ccx
,
1
),
dloc
);
let
vals
=
LandingPad
(
&
catch
,
lpad_ty
,
bcx
.fcx
()
.eh_personality
(),
1
);
AddClause
(
&
catch
,
vals
,
C_null
(
Type
::
i8p
(
ccx
)));
let
ptr
=
ExtractValue
(
&
catch
,
vals
,
0
);
Store
(
&
catch
,
ptr
,
BitCast
(
&
catch
,
local_ptr
,
Type
::
i8p
(
ccx
)
.ptr_to
()));
Ret
(
&
catch
,
C_i32
(
ccx
,
1
),
dloc
);
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let
ret
=
Call
(
bcx
,
llfn
,
&
[
func
,
data
,
local_ptr
],
dloc
);
Store
(
bcx
,
ret
,
dest
);
return
bcx
;
}
// Helper function to give a Block to a closure to translate a shim function.
...
...
@@ -1010,7 +985,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name
:
&
str
,
inputs
:
Vec
<
Ty
<
'tcx
>>
,
output
:
Ty
<
'tcx
>
,
trans
:
&
mut
for
<
'b
>
FnMut
(
Block
<
'b
,
'tcx
>
))
trans
:
&
mut
for
<
'b
>
FnMut
(
Block
AndBuilder
<
'b
,
'tcx
>
))
->
ValueRef
{
let
ccx
=
fcx
.ccx
;
let
sig
=
ccx
.tcx
()
.mk_fn_sig
(
inputs
.into_iter
(),
output
,
false
);
...
...
@@ -1035,7 +1010,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
//
// This function is only generated once and is then cached.
fn
get_rust_try_fn
<
'a
,
'tcx
>
(
fcx
:
&
FunctionContext
<
'a
,
'tcx
>
,
trans
:
&
mut
for
<
'b
>
FnMut
(
Block
<
'b
,
'tcx
>
))
trans
:
&
mut
for
<
'b
>
FnMut
(
Block
AndBuilder
<
'b
,
'tcx
>
))
->
ValueRef
{
let
ccx
=
fcx
.ccx
;
if
let
Some
(
llfn
)
=
ccx
.rust_try_fn
()
.get
()
{
...
...
@@ -1060,16 +1035,16 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
span_err!
(
a
,
b
,
E0511
,
"{}"
,
c
);
}
fn
generic_simd_intrinsic
<
'blk
,
'tcx
,
'a
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
name
:
&
str
,
callee_ty
:
Ty
<
'tcx
>
,
llargs
:
&
[
ValueRef
],
ret_ty
:
Ty
<
'tcx
>
,
llret_ty
:
Type
,
call_debug_location
:
DebugLoc
,
span
:
Span
)
->
ValueRef
{
fn
generic_simd_intrinsic
<
'blk
,
'tcx
,
'a
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
name
:
&
str
,
callee_ty
:
Ty
<
'tcx
>
,
llargs
:
&
[
ValueRef
],
ret_ty
:
Ty
<
'tcx
>
,
llret_ty
:
Type
,
call_debug_location
:
DebugLoc
,
span
:
Span
)
->
ValueRef
{
// macros for error handling:
macro_rules!
emit_error
{
(
$msg
:
tt
)
=>
{
...
...
src/librustc_trans/meth.rs
浏览文件 @
bf7d4534
...
...
@@ -32,7 +32,7 @@
const
VTABLE_OFFSET
:
usize
=
3
;
/// Extracts a method from a trait object's vtable, at the specified index.
pub
fn
get_virtual_method
<
'blk
,
'tcx
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
get_virtual_method
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
llvtable
:
ValueRef
,
vtable_index
:
usize
)
->
ValueRef
{
...
...
@@ -94,9 +94,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
let
dest
=
fcx
.llretslotptr
.get
();
let
llargs
=
get_params
(
fcx
.llfn
);
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[
fcx
.fn_ty.ret
.is_indirect
()
as
usize
..
],
dest
)
.
bcx
;
&
llargs
[
fcx
.fn_ty.ret
.is_indirect
()
as
usize
..
],
dest
)
.
0
;
fcx
.finish
(
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
llfn
}
...
...
src/librustc_trans/mir/block.rs
浏览文件 @
bf7d4534
...
...
@@ -40,6 +40,7 @@
use
super
::
operand
::
OperandValue
::{
Pair
,
Ref
,
Immediate
};
use
std
::
cell
::
Ref
as
CellRef
;
use
std
::
ptr
;
impl
<
'bcx
,
'tcx
>
MirContext
<
'bcx
,
'tcx
>
{
pub
fn
trans_block
(
&
mut
self
,
bb
:
mir
::
BasicBlock
)
{
...
...
@@ -121,10 +122,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
let
ps
=
self
.get_personality_slot
(
&
bcx
);
let
lp
=
bcx
.load
(
ps
);
bcx
.with_block
(|
bcx
|
{
base
::
call_lifetime_end
(
bcx
,
ps
);
base
::
trans_unwind_resume
(
bcx
,
lp
);
});
base
::
call_lifetime_end
(
&
bcx
,
ps
);
base
::
trans_unwind_resume
(
&
bcx
,
lp
);
}
}
...
...
@@ -143,9 +142,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
mir
::
TerminatorKind
::
Switch
{
ref
discr
,
ref
adt_def
,
ref
targets
}
=>
{
let
discr_lvalue
=
self
.trans_lvalue
(
&
bcx
,
discr
);
let
ty
=
discr_lvalue
.ty
.to_ty
(
bcx
.tcx
());
let
discr
=
bcx
.with_block
(|
bcx
|
adt
::
trans_get_discr
(
bcx
,
ty
,
discr_lvalue
.llval
,
None
,
true
)
);
let
discr
=
adt
::
trans_get_discr
(
&
bcx
,
ty
,
discr_lvalue
.llval
,
None
,
true
);
let
mut
bb_hist
=
FxHashMap
();
for
target
in
targets
{
...
...
@@ -169,8 +166,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
for
(
adt_variant
,
&
target
)
in
adt_def
.variants
.iter
()
.zip
(
targets
)
{
if
default_bb
!=
Some
(
target
)
{
let
llbb
=
llblock
(
self
,
target
);
let
llval
=
bcx
.with_block
(|
bcx
|
adt
::
trans_case
(
bcx
,
ty
,
Disr
::
from
(
adt_variant
.disr_val
)));
let
llval
=
adt
::
trans_case
(
&
bcx
,
ty
,
Disr
::
from
(
adt_variant
.disr_val
));
build
::
AddCase
(
switch
,
llval
,
llbb
)
}
}
...
...
@@ -179,7 +175,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
mir
::
TerminatorKind
::
SwitchInt
{
ref
discr
,
switch_ty
,
ref
values
,
ref
targets
}
=>
{
let
(
otherwise
,
targets
)
=
targets
.split_last
()
.unwrap
();
let
discr
=
bcx
.load
(
self
.trans_lvalue
(
&
bcx
,
discr
)
.llval
);
let
discr
=
b
cx
.with_block
(|
bcx
|
base
::
to_immediate
(
bcx
,
discr
,
switch_ty
)
);
let
discr
=
b
ase
::
to_immediate
(
&
bcx
,
discr
,
switch_ty
);
let
switch
=
bcx
.switch
(
discr
,
llblock
(
self
,
*
otherwise
),
values
.len
());
for
(
value
,
target
)
in
values
.iter
()
.zip
(
targets
)
{
let
val
=
Const
::
from_constval
(
bcx
.ccx
(),
value
.clone
(),
switch_ty
);
...
...
@@ -259,13 +255,11 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
// but I am shooting for a quick fix to #35546
// here that can be cleanly backported to beta, so
// I want to avoid touching all of trans.
bcx
.with_block
(|
bcx
|
{
let
scratch
=
base
::
alloc_ty
(
bcx
,
ty
,
"drop"
);
base
::
call_lifetime_start
(
bcx
,
scratch
);
build
::
Store
(
bcx
,
lvalue
.llval
,
base
::
get_dataptr
(
bcx
,
scratch
));
build
::
Store
(
bcx
,
lvalue
.llextra
,
base
::
get_meta
(
bcx
,
scratch
));
scratch
})
let
scratch
=
base
::
alloc_ty
(
&
bcx
,
ty
,
"drop"
);
base
::
call_lifetime_start
(
&
bcx
,
scratch
);
build
::
Store
(
&
bcx
,
lvalue
.llval
,
base
::
get_dataptr
(
&
bcx
,
scratch
));
build
::
Store
(
&
bcx
,
lvalue
.llextra
,
base
::
get_meta
(
&
bcx
,
scratch
));
scratch
};
if
let
Some
(
unwind
)
=
unwind
{
bcx
.invoke
(
drop_fn
,
...
...
@@ -443,6 +437,65 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
return
;
}
// FIXME: This should proxy to the drop glue in the future when the ABI matches;
// most of the below code was copied from the match arm for TerminatorKind::Drop.
if
intrinsic
==
Some
(
"drop_in_place"
)
{
let
&
(
_
,
target
)
=
destination
.as_ref
()
.unwrap
();
let
ty
=
if
let
ty
::
TyFnDef
(
_
,
substs
,
_
)
=
callee
.ty.sty
{
substs
.type_at
(
0
)
}
else
{
bug!
(
"Unexpected ty: {}"
,
callee
.ty
);
};
// Double check for necessity to drop
if
!
glue
::
type_needs_drop
(
bcx
.tcx
(),
ty
)
{
funclet_br
(
self
,
bcx
,
target
);
return
;
}
let
ptr
=
self
.trans_operand
(
&
bcx
,
&
args
[
0
]);
let
(
llval
,
llextra
)
=
match
ptr
.val
{
Immediate
(
llptr
)
=>
(
llptr
,
ptr
::
null_mut
()),
Pair
(
llptr
,
llextra
)
=>
(
llptr
,
llextra
),
Ref
(
_
)
=>
bug!
(
"Deref of by-Ref type {:?}"
,
ptr
.ty
)
};
let
drop_fn
=
glue
::
get_drop_glue
(
bcx
.ccx
(),
ty
);
let
drop_ty
=
glue
::
get_drop_glue_type
(
bcx
.tcx
(),
ty
);
let
is_sized
=
common
::
type_is_sized
(
bcx
.tcx
(),
ty
);
let
llvalue
=
if
is_sized
{
if
drop_ty
!=
ty
{
bcx
.pointercast
(
llval
,
type_of
::
type_of
(
bcx
.ccx
(),
drop_ty
)
.ptr_to
())
}
else
{
llval
}
}
else
{
// FIXME(#36457) Currently drop glue takes sized
// values as a `*(data, meta)`, but elsewhere in
// MIR we pass `(data, meta)` as two separate
// arguments. It would be better to fix drop glue,
// but I am shooting for a quick fix to #35546
// here that can be cleanly backported to beta, so
// I want to avoid touching all of trans.
let
scratch
=
base
::
alloc_ty
(
&
bcx
,
ty
,
"drop"
);
base
::
call_lifetime_start
(
&
bcx
,
scratch
);
build
::
Store
(
&
bcx
,
llval
,
base
::
get_dataptr
(
&
bcx
,
scratch
));
build
::
Store
(
&
bcx
,
llextra
,
base
::
get_meta
(
&
bcx
,
scratch
));
scratch
};
if
let
Some
(
unwind
)
=
*
cleanup
{
bcx
.invoke
(
drop_fn
,
&
[
llvalue
],
self
.blocks
[
target
]
.llbb
,
llblock
(
self
,
unwind
),
cleanup_bundle
);
}
else
{
bcx
.call
(
drop_fn
,
&
[
llvalue
],
cleanup_bundle
);
funclet_br
(
self
,
bcx
,
target
);
}
return
;
}
if
intrinsic
==
Some
(
"transmute"
)
{
let
&
(
ref
dest
,
target
)
=
destination
.as_ref
()
.unwrap
();
self
.with_lvalue_ref
(
&
bcx
,
dest
,
|
this
,
dest
|
{
...
...
@@ -537,10 +590,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
bug!
(
"Cannot use direct operand with an intrinsic call"
)
};
bcx
.with_block
(|
bcx
|
{
trans_intrinsic_call
(
bcx
,
callee
.ty
,
&
fn_ty
,
&
llargs
,
dest
,
debug_loc
);
});
trans_intrinsic_call
(
&
bcx
,
callee
.ty
,
&
fn_ty
,
&
llargs
,
dest
,
debug_loc
);
if
let
ReturnDest
::
IndirectOperand
(
dst
,
_
)
=
ret_dest
{
// Make a fake operand for store_return
...
...
@@ -554,8 +604,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
if
let
Some
((
_
,
target
))
=
*
destination
{
funclet_br
(
self
,
bcx
,
target
);
}
else
{
// trans_intrinsic_call already used Unreachable.
// bcx.unreachable();
bcx
.unreachable
();
}
return
;
...
...
@@ -620,9 +669,7 @@ fn trans_argument(&mut self,
let
(
ptr
,
meta
)
=
(
a
,
b
);
if
*
next_idx
==
0
{
if
let
Virtual
(
idx
)
=
*
callee
{
let
llfn
=
bcx
.with_block
(|
bcx
|
{
meth
::
get_virtual_method
(
bcx
,
meta
,
idx
)
});
let
llfn
=
meth
::
get_virtual_method
(
bcx
,
meta
,
idx
);
let
llty
=
fn_ty
.llvm_type
(
bcx
.ccx
())
.ptr_to
();
*
callee
=
Fn
(
bcx
.pointercast
(
llfn
,
llty
));
}
...
...
@@ -768,12 +815,10 @@ fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRe
slot
}
else
{
let
llretty
=
Type
::
struct_
(
ccx
,
&
[
Type
::
i8p
(
ccx
),
Type
::
i32
(
ccx
)],
false
);
bcx
.with_block
(|
bcx
|
{
let
slot
=
base
::
alloca
(
bcx
,
llretty
,
"personalityslot"
);
self
.llpersonalityslot
=
Some
(
slot
);
base
::
call_lifetime_start
(
bcx
,
slot
);
slot
})
let
slot
=
base
::
alloca
(
bcx
,
llretty
,
"personalityslot"
);
self
.llpersonalityslot
=
Some
(
slot
);
base
::
call_lifetime_start
(
bcx
,
slot
);
slot
}
}
...
...
@@ -863,18 +908,14 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
return
if
fn_ret_ty
.is_indirect
()
{
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let
tmp
=
bcx
.with_block
(|
bcx
|
{
base
::
alloc_ty
(
bcx
,
ret_ty
,
"tmp_ret"
)
});
let
tmp
=
base
::
alloc_ty
(
bcx
,
ret_ty
,
"tmp_ret"
);
llargs
.push
(
tmp
);
ReturnDest
::
IndirectOperand
(
tmp
,
index
)
}
else
if
is_intrinsic
{
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let
tmp
=
bcx
.with_block
(|
bcx
|
{
base
::
alloc_ty
(
bcx
,
ret_ty
,
"tmp_ret"
)
});
let
tmp
=
base
::
alloc_ty
(
bcx
,
ret_ty
,
"tmp_ret"
);
ReturnDest
::
IndirectOperand
(
tmp
,
index
)
}
else
{
ReturnDest
::
DirectOperand
(
index
)
...
...
@@ -939,9 +980,7 @@ fn store_return(&mut self,
DirectOperand
(
index
)
=>
{
// If there is a cast, we have to store and reload.
let
op
=
if
ret_ty
.cast
.is_some
()
{
let
tmp
=
bcx
.with_block
(|
bcx
|
{
base
::
alloc_ty
(
bcx
,
op
.ty
,
"tmp_ret"
)
});
let
tmp
=
base
::
alloc_ty
(
bcx
,
op
.ty
,
"tmp_ret"
);
ret_ty
.store
(
bcx
,
op
.immediate
(),
tmp
);
self
.trans_load
(
bcx
,
tmp
,
op
.ty
)
}
else
{
...
...
src/librustc_trans/mir/lvalue.rs
浏览文件 @
bf7d4534
...
...
@@ -50,7 +50,7 @@ pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
->
LvalueRef
<
'tcx
>
{
assert
!
(
!
ty
.has_erasable_regions
());
let
lltemp
=
b
cx
.with_block
(|
bcx
|
base
::
alloc_ty
(
bcx
,
ty
,
name
)
);
let
lltemp
=
b
ase
::
alloc_ty
(
bcx
,
ty
,
name
);
LvalueRef
::
new_sized
(
lltemp
,
LvalueTy
::
from_ty
(
ty
))
}
...
...
src/librustc_trans/mir/mod.rs
浏览文件 @
bf7d4534
...
...
@@ -181,7 +181,7 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
///////////////////////////////////////////////////////////////////////////
pub
fn
trans_mir
<
'blk
,
'tcx
:
'blk
>
(
fcx
:
&
'blk
FunctionContext
<
'blk
,
'tcx
>
)
{
let
bcx
=
fcx
.init
(
true
)
.build
()
;
let
bcx
=
fcx
.init
(
true
);
let
mir
=
bcx
.mir
();
// Analyze the temps to determine which must be lvalues
...
...
@@ -240,11 +240,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
if
dbg
{
let
dbg_loc
=
mircx
.debug_loc
(
source_info
);
if
let
DebugLoc
::
ScopeAt
(
scope
,
span
)
=
dbg_loc
{
bcx
.with_block
(|
bcx
|
{
declare_local
(
bcx
,
name
,
ty
,
scope
,
VariableAccess
::
DirectVariable
{
alloca
:
lvalue
.llval
},
VariableKind
::
LocalVariable
,
span
);
});
declare_local
(
&
bcx
,
name
,
ty
,
scope
,
VariableAccess
::
DirectVariable
{
alloca
:
lvalue
.llval
},
VariableKind
::
LocalVariable
,
span
);
}
else
{
panic!
(
"Unexpected"
);
}
...
...
@@ -353,9 +351,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
_
=>
bug!
(
"spread argument isn't a tuple?!"
)
};
let
lltemp
=
bcx
.with_block
(|
bcx
|
{
base
::
alloc_ty
(
bcx
,
arg_ty
,
&
format!
(
"arg{}"
,
arg_index
))
});
let
lltemp
=
base
::
alloc_ty
(
&
bcx
,
arg_ty
,
&
format!
(
"arg{}"
,
arg_index
));
for
(
i
,
&
tupled_arg_ty
)
in
tupled_arg_tys
.iter
()
.enumerate
()
{
let
dst
=
bcx
.struct_gep
(
lltemp
,
i
);
let
arg
=
&
fcx
.fn_ty.args
[
idx
];
...
...
@@ -376,7 +372,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
// Now that we have one alloca that contains the aggregate value,
// we can create one debuginfo entry for the argument.
bcx
.with_block
(|
bcx
|
arg_scope
.map
(|
scope
|
{
arg_scope
.map
(|
scope
|
{
let
variable_access
=
VariableAccess
::
DirectVariable
{
alloca
:
lltemp
};
...
...
@@ -384,7 +380,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
arg_ty
,
scope
,
variable_access
,
VariableKind
::
ArgumentVariable
(
arg_index
+
1
),
bcx
.fcx
()
.span
.unwrap_or
(
DUMMY_SP
));
})
)
;
});
return
LocalRef
::
Lvalue
(
LvalueRef
::
new_sized
(
lltemp
,
LvalueTy
::
from_ty
(
arg_ty
)));
}
...
...
@@ -433,9 +429,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
};
return
LocalRef
::
Operand
(
Some
(
operand
.unpack_if_pair
(
bcx
)));
}
else
{
let
lltemp
=
bcx
.with_block
(|
bcx
|
{
base
::
alloc_ty
(
bcx
,
arg_ty
,
&
format!
(
"arg{}"
,
arg_index
))
});
let
lltemp
=
base
::
alloc_ty
(
&
bcx
,
arg_ty
,
&
format!
(
"arg{}"
,
arg_index
));
if
common
::
type_is_fat_ptr
(
tcx
,
arg_ty
)
{
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
...
...
@@ -453,7 +447,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
}
lltemp
};
bcx
.with_block
(|
bcx
|
arg_scope
.map
(|
scope
|
{
arg_scope
.map
(|
scope
|
{
// Is this a regular argument?
if
arg_index
>
0
||
mir
.upvar_decls
.is_empty
()
{
declare_local
(
bcx
,
arg_decl
.name
.unwrap_or
(
keywords
::
Invalid
.name
()),
arg_ty
,
...
...
@@ -531,7 +525,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
VariableKind
::
CapturedVariable
,
bcx
.fcx
()
.span
.unwrap_or
(
DUMMY_SP
));
}
})
)
;
});
LocalRef
::
Lvalue
(
LvalueRef
::
new_sized
(
llval
,
LvalueTy
::
from_ty
(
arg_ty
)))
})
.collect
()
}
...
...
src/librustc_trans/mir/operand.rs
浏览文件 @
bf7d4534
...
...
@@ -14,7 +14,7 @@
use
rustc_data_structures
::
indexed_vec
::
Idx
;
use
base
;
use
common
::{
self
,
Block
,
Block
AndBuilder
};
use
common
::{
self
,
BlockAndBuilder
};
use
value
::
Value
;
use
type_of
;
use
type_
::
Type
;
...
...
@@ -247,11 +247,11 @@ pub fn store_operand(&mut self,
operand
:
OperandRef
<
'tcx
>
)
{
debug!
(
"store_operand: operand={:?} lldest={:?}"
,
operand
,
lldest
);
bcx
.with_block
(|
bcx
|
self
.store_operand_direct
(
bcx
,
lldest
,
operand
)
)
self
.store_operand_direct
(
bcx
,
lldest
,
operand
)
}
pub
fn
store_operand_direct
(
&
mut
self
,
bcx
:
Block
<
'bcx
,
'tcx
>
,
bcx
:
&
BlockAndBuilder
<
'bcx
,
'tcx
>
,
lldest
:
ValueRef
,
operand
:
OperandRef
<
'tcx
>
)
{
...
...
src/librustc_trans/mir/rvalue.rs
浏览文件 @
bf7d4534
...
...
@@ -17,7 +17,7 @@
use
asm
;
use
base
;
use
callee
::
Callee
;
use
common
::{
self
,
val_ty
,
C_bool
,
C_null
,
C_uint
,
BlockAndBuilder
,
Result
};
use
common
::{
self
,
val_ty
,
C_bool
,
C_null
,
C_uint
,
BlockAndBuilder
};
use
common
::{
C_integral
};
use
debuginfo
::
DebugLoc
;
use
adt
;
...
...
@@ -70,30 +70,28 @@ pub fn trans_rvalue(&mut self,
// so the (generic) MIR may not be able to expand it.
let
operand
=
self
.trans_operand
(
&
bcx
,
source
);
let
operand
=
operand
.pack_if_pair
(
&
bcx
);
bcx
.with_block
(|
bcx
|
{
match
operand
.val
{
OperandValue
::
Pair
(
..
)
=>
bug!
(),
OperandValue
::
Immediate
(
llval
)
=>
{
// unsize from an immediate structure. We don't
// really need a temporary alloca here, but
// avoiding it would require us to have
// `coerce_unsized_into` use extractvalue to
// index into the struct, and this case isn't
// important enough for it.
debug!
(
"trans_rvalue: creating ugly alloca"
);
let
lltemp
=
base
::
alloc_ty
(
bcx
,
operand
.ty
,
"__unsize_temp"
);
base
::
store_ty
(
bcx
,
llval
,
lltemp
,
operand
.ty
);
base
::
coerce_unsized_into
(
bcx
,
lltemp
,
operand
.ty
,
dest
.llval
,
cast_ty
);
}
OperandValue
::
Ref
(
llref
)
=>
{
base
::
coerce_unsized_into
(
bcx
,
llref
,
operand
.ty
,
dest
.llval
,
cast_ty
);
}
match
operand
.val
{
OperandValue
::
Pair
(
..
)
=>
bug!
(),
OperandValue
::
Immediate
(
llval
)
=>
{
// unsize from an immediate structure. We don't
// really need a temporary alloca here, but
// avoiding it would require us to have
// `coerce_unsized_into` use extractvalue to
// index into the struct, and this case isn't
// important enough for it.
debug!
(
"trans_rvalue: creating ugly alloca"
);
let
lltemp
=
base
::
alloc_ty
(
&
bcx
,
operand
.ty
,
"__unsize_temp"
);
base
::
store_ty
(
&
bcx
,
llval
,
lltemp
,
operand
.ty
);
base
::
coerce_unsized_into
(
&
bcx
,
lltemp
,
operand
.ty
,
dest
.llval
,
cast_ty
);
}
});
OperandValue
::
Ref
(
llref
)
=>
{
base
::
coerce_unsized_into
(
&
bcx
,
llref
,
operand
.ty
,
dest
.llval
,
cast_ty
);
}
}
bcx
}
...
...
@@ -102,11 +100,9 @@ pub fn trans_rvalue(&mut self,
let
size
=
count
.value
.as_u64
(
bcx
.tcx
()
.sess.target.uint_type
);
let
size
=
C_uint
(
bcx
.ccx
(),
size
);
let
base
=
base
::
get_dataptr_builder
(
&
bcx
,
dest
.llval
);
let
bcx
=
bcx
.map_block
(|
block
|
{
tvec
::
slice_for_each
(
block
,
base
,
tr_elem
.ty
,
size
,
|
block
,
llslot
|
{
self
.store_operand_direct
(
block
,
llslot
,
tr_elem
);
block
})
let
bcx
=
tvec
::
slice_for_each
(
bcx
,
base
,
tr_elem
.ty
,
size
,
|
bcx
,
llslot
|
{
self
.store_operand_direct
(
&
bcx
,
llslot
,
tr_elem
);
bcx
});
bcx
}
...
...
@@ -115,10 +111,8 @@ pub fn trans_rvalue(&mut self,
match
*
kind
{
mir
::
AggregateKind
::
Adt
(
adt_def
,
variant_index
,
_
,
active_field_index
)
=>
{
let
disr
=
Disr
::
from
(
adt_def
.variants
[
variant_index
]
.disr_val
);
bcx
.with_block
(|
bcx
|
{
adt
::
trans_set_discr
(
bcx
,
dest
.ty
.to_ty
(
bcx
.tcx
()),
dest
.llval
,
Disr
::
from
(
disr
));
});
adt
::
trans_set_discr
(
&
bcx
,
dest
.ty
.to_ty
(
bcx
.tcx
()),
dest
.llval
,
Disr
::
from
(
disr
));
for
(
i
,
operand
)
in
operands
.iter
()
.enumerate
()
{
let
op
=
self
.trans_operand
(
&
bcx
,
operand
);
// Do not generate stores and GEPis for zero-sized fields.
...
...
@@ -171,10 +165,7 @@ pub fn trans_rvalue(&mut self,
self
.trans_operand
(
&
bcx
,
input
)
.immediate
()
})
.collect
();
bcx
.with_block
(|
bcx
|
{
asm
::
trans_inline_asm
(
bcx
,
asm
,
outputs
,
input_vals
);
});
asm
::
trans_inline_asm
(
&
bcx
,
asm
,
outputs
,
input_vals
);
bcx
}
...
...
@@ -238,10 +229,8 @@ pub fn trans_rvalue_operand(&mut self,
}
OperandValue
::
Immediate
(
lldata
)
=>
{
// "standard" unsize
let
(
lldata
,
llextra
)
=
bcx
.with_block
(|
bcx
|
{
base
::
unsize_thin_ptr
(
bcx
,
lldata
,
operand
.ty
,
cast_ty
)
});
let
(
lldata
,
llextra
)
=
base
::
unsize_thin_ptr
(
&
bcx
,
lldata
,
operand
.ty
,
cast_ty
);
OperandValue
::
Pair
(
lldata
,
llextra
)
}
OperandValue
::
Ref
(
_
)
=>
{
...
...
@@ -281,9 +270,7 @@ pub fn trans_rvalue_operand(&mut self,
let
discr
=
match
operand
.val
{
OperandValue
::
Immediate
(
llval
)
=>
llval
,
OperandValue
::
Ref
(
llptr
)
=>
{
bcx
.with_block
(|
bcx
|
{
adt
::
trans_get_discr
(
bcx
,
operand
.ty
,
llptr
,
None
,
true
)
})
adt
::
trans_get_discr
(
&
bcx
,
operand
.ty
,
llptr
,
None
,
true
)
}
OperandValue
::
Pair
(
..
)
=>
bug!
(
"Unexpected Pair operand"
)
};
...
...
@@ -468,19 +455,16 @@ pub fn trans_rvalue_operand(&mut self,
let
llalign
=
C_uint
(
bcx
.ccx
(),
align
);
let
llty_ptr
=
llty
.ptr_to
();
let
box_ty
=
bcx
.tcx
()
.mk_box
(
content_ty
);
let
mut
llval
=
None
;
let
bcx
=
bcx
.map_block
(|
bcx
|
{
let
Result
{
bcx
,
val
}
=
base
::
malloc_raw_dyn
(
bcx
,
llty_ptr
,
box_ty
,
llsize
,
llalign
,
debug_loc
);
llval
=
Some
(
val
);
bcx
});
let
val
=
base
::
malloc_raw_dyn
(
&
bcx
,
llty_ptr
,
box_ty
,
llsize
,
llalign
,
debug_loc
);
let
operand
=
OperandRef
{
val
:
OperandValue
::
Immediate
(
llval
.unwrap
()
),
val
:
OperandValue
::
Immediate
(
val
),
ty
:
box_ty
,
};
(
bcx
,
operand
)
...
...
@@ -543,21 +527,21 @@ pub fn trans_scalar_binop(&mut self,
mir
::
BinOp
::
BitAnd
=>
bcx
.and
(
lhs
,
rhs
),
mir
::
BinOp
::
BitXor
=>
bcx
.xor
(
lhs
,
rhs
),
mir
::
BinOp
::
Shl
=>
{
bcx
.with_block
(|
bcx
|
{
common
::
build_unchecked_lshift
(
bcx
,
lhs
,
rhs
,
DebugLoc
::
None
)
}
)
common
::
build_unchecked_lshift
(
&
bcx
,
lhs
,
rhs
,
DebugLoc
::
None
)
}
mir
::
BinOp
::
Shr
=>
{
bcx
.with_block
(|
bcx
|
{
common
::
build_unchecked_rshift
(
bcx
,
input_ty
,
lhs
,
rhs
,
DebugLoc
::
None
)
}
)
common
::
build_unchecked_rshift
(
bcx
,
input_ty
,
lhs
,
rhs
,
DebugLoc
::
None
)
}
mir
::
BinOp
::
Ne
|
mir
::
BinOp
::
Lt
|
mir
::
BinOp
::
Gt
|
mir
::
BinOp
::
Eq
|
mir
::
BinOp
::
Le
|
mir
::
BinOp
::
Ge
=>
if
is_nil
{
...
...
@@ -677,9 +661,7 @@ pub fn trans_scalar_checked_binop(&mut self,
mir
::
BinOp
::
Shl
|
mir
::
BinOp
::
Shr
=>
{
let
lhs_llty
=
val_ty
(
lhs
);
let
rhs_llty
=
val_ty
(
rhs
);
let
invert_mask
=
bcx
.with_block
(|
bcx
|
{
common
::
shift_mask_val
(
bcx
,
lhs_llty
,
rhs_llty
,
true
)
});
let
invert_mask
=
common
::
shift_mask_val
(
&
bcx
,
lhs_llty
,
rhs_llty
,
true
);
let
outer_bits
=
bcx
.and
(
rhs
,
invert_mask
);
let
of
=
bcx
.icmp
(
llvm
::
IntNE
,
outer_bits
,
C_null
(
rhs_llty
));
...
...
src/librustc_trans/mir/statement.rs
浏览文件 @
bf7d4534
...
...
@@ -63,12 +63,10 @@ pub fn trans_statement(&mut self,
mir
::
StatementKind
::
SetDiscriminant
{
ref
lvalue
,
variant_index
}
=>
{
let
ty
=
self
.monomorphized_lvalue_ty
(
lvalue
);
let
lvalue_transed
=
self
.trans_lvalue
(
&
bcx
,
lvalue
);
bcx
.with_block
(|
bcx
|
adt
::
trans_set_discr
(
bcx
,
ty
,
lvalue_transed
.llval
,
Disr
::
from
(
variant_index
))
);
adt
::
trans_set_discr
(
&
bcx
,
ty
,
lvalue_transed
.llval
,
Disr
::
from
(
variant_index
));
bcx
}
mir
::
StatementKind
::
StorageLive
(
ref
lvalue
)
=>
{
...
...
src/librustc_trans/tvec.rs
浏览文件 @
bf7d4534
...
...
@@ -18,16 +18,16 @@
use
debuginfo
::
DebugLoc
;
use
rustc
::
ty
::
Ty
;
pub
fn
slice_for_each
<
'blk
,
'tcx
,
F
>
(
bcx
:
Block
<
'blk
,
'tcx
>
,
pub
fn
slice_for_each
<
'blk
,
'tcx
,
F
>
(
bcx
:
Block
AndBuilder
<
'blk
,
'tcx
>
,
data_ptr
:
ValueRef
,
unit_ty
:
Ty
<
'tcx
>
,
len
:
ValueRef
,
f
:
F
)
->
Block
<
'blk
,
'tcx
>
where
F
:
FnOnce
(
Block
<
'blk
,
'tcx
>
,
ValueRef
)
->
Block
<
'blk
,
'tcx
>
,
->
Block
AndBuilder
<
'blk
,
'tcx
>
where
F
:
FnOnce
(
BlockAndBuilder
<
'blk
,
'tcx
>
,
ValueRef
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
,
{
let
_
icx
=
push_ctxt
(
"tvec::slice_for_each"
);
let
fcx
=
bcx
.fcx
;
let
fcx
=
bcx
.fcx
()
;
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let
zst
=
type_is_zero_size
(
bcx
.ccx
(),
unit_ty
);
...
...
@@ -37,27 +37,33 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
InBoundsGEP
(
bcx
,
a
,
&
[
b
])
};
let
header_bcx
=
fcx
.new_block
(
"slice_loop_header"
);
let
body_bcx
=
fcx
.new_block
(
"slice_loop_body"
);
let
next_bcx
=
fcx
.new_block
(
"slice_loop_next"
);
let
body_bcx
=
fcx
.new_block
(
"slice_loop_body"
)
.build
(
);
let
next_bcx
=
fcx
.new_block
(
"slice_loop_next"
)
.build
(
);
let
header_bcx
=
fcx
.new_block
(
"slice_loop_header"
)
.build
(
);
let
start
=
if
zst
{
C_uint
(
bcx
.ccx
(),
0
as
usize
)
}
else
{
data_ptr
};
let
end
=
add
(
bcx
,
start
,
len
);
let
end
=
add
(
&
bcx
,
start
,
len
);
Br
(
bcx
,
header_bcx
.llbb
,
DebugLoc
::
None
);
let
current
=
Phi
(
header_bcx
,
val_ty
(
start
),
&
[
start
],
&
[
bcx
.llbb
]);
Br
(
&
bcx
,
header_bcx
.llbb
()
,
DebugLoc
::
None
);
let
current
=
Phi
(
&
header_bcx
,
val_ty
(
start
),
&
[
start
],
&
[
bcx
.llbb
()
]);
let
keep_going
=
ICmp
(
header_bcx
,
llvm
::
IntNE
,
current
,
end
,
DebugLoc
::
None
);
CondBr
(
header_bcx
,
keep_going
,
body_bcx
.llbb
,
next_bcx
.llbb
,
DebugLoc
::
None
);
ICmp
(
&
header_bcx
,
llvm
::
IntNE
,
current
,
end
,
DebugLoc
::
None
);
CondBr
(
&
header_bcx
,
keep_going
,
body_bcx
.llbb
(),
next_bcx
.llbb
()
,
DebugLoc
::
None
);
let
body_bcx
=
f
(
body_bcx
,
if
zst
{
data_ptr
}
else
{
current
});
let
next
=
add
(
body_bcx
,
current
,
C_uint
(
bcx
.ccx
(),
1u
size
));
AddIncomingToPhi
(
current
,
next
,
body_bcx
.llbb
);
Br
(
body_bcx
,
header_bcx
.llbb
,
DebugLoc
::
None
);
// FIXME(simulacrum): The code below is identical to the closure (add) above, but using the
// closure doesn't compile due to body_bcx still being borrowed when dropped.
let
next
=
if
zst
{
Add
(
&
body_bcx
,
current
,
C_uint
(
bcx
.ccx
(),
1u
size
),
DebugLoc
::
None
)
}
else
{
InBoundsGEP
(
&
body_bcx
,
current
,
&
[
C_uint
(
bcx
.ccx
(),
1u
size
)])
};
AddIncomingToPhi
(
current
,
next
,
body_bcx
.llbb
());
Br
(
&
body_bcx
,
header_bcx
.llbb
(),
DebugLoc
::
None
);
next_bcx
}
src/librustc_trans/value.rs
浏览文件 @
bf7d4534
...
...
@@ -11,7 +11,7 @@
use
llvm
;
use
llvm
::{
UseRef
,
ValueRef
};
use
basic_block
::
BasicBlock
;
use
common
::
Block
;
use
common
::
Block
AndBuilder
;
use
std
::
fmt
;
...
...
@@ -65,11 +65,11 @@ pub fn erase_from_parent(self) {
/// This only performs a search for a trivially dominating store. The store
/// must be the only user of this value, and there must not be any conditional
/// branches between the store and the given block.
pub
fn
get_dominating_store
(
self
,
bcx
:
Block
)
->
Option
<
Value
>
{
pub
fn
get_dominating_store
(
self
,
bcx
:
&
BlockAndBuilder
)
->
Option
<
Value
>
{
match
self
.get_single_user
()
.and_then
(|
user
|
user
.as_store_inst
())
{
Some
(
store
)
=>
{
store
.get_parent
()
.and_then
(|
store_bb
|
{
let
mut
bb
=
BasicBlock
(
bcx
.llbb
);
let
mut
bb
=
BasicBlock
(
bcx
.llbb
()
);
let
mut
ret
=
Some
(
store
);
while
bb
.get
()
!=
store_bb
.get
()
{
match
bb
.get_single_predecessor
()
{
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录