Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
int
Rust
提交
59ef51c1
R
Rust
项目概览
int
/
Rust
接近 1 年 前同步成功
通知
1
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
Rust
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
59ef51c1
编写于
12月 11, 2016
作者:
M
Mark-Simulacrum
提交者:
Mark Simulacrum
12月 20, 2016
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Replace build.rs with calling functions on builder directly
上级
3f17ab96
变更
19
展开全部
隐藏空白更改
内联
并排
Showing
19 changed file
with
373 addition
and
1167 deletion
+373
-1167
src/librustc_trans/abi.rs
src/librustc_trans/abi.rs
+1
-2
src/librustc_trans/adt.rs
src/librustc_trans/adt.rs
+14
-15
src/librustc_trans/asm.rs
src/librustc_trans/asm.rs
+11
-11
src/librustc_trans/base.rs
src/librustc_trans/base.rs
+62
-59
src/librustc_trans/build.rs
src/librustc_trans/build.rs
+0
-734
src/librustc_trans/builder.rs
src/librustc_trans/builder.rs
+14
-0
src/librustc_trans/callee.rs
src/librustc_trans/callee.rs
+9
-13
src/librustc_trans/cleanup.rs
src/librustc_trans/cleanup.rs
+16
-20
src/librustc_trans/common.rs
src/librustc_trans/common.rs
+17
-15
src/librustc_trans/glue.rs
src/librustc_trans/glue.rs
+45
-59
src/librustc_trans/intrinsic.rs
src/librustc_trans/intrinsic.rs
+145
-166
src/librustc_trans/lib.rs
src/librustc_trans/lib.rs
+0
-1
src/librustc_trans/meth.rs
src/librustc_trans/meth.rs
+3
-6
src/librustc_trans/mir/block.rs
src/librustc_trans/mir/block.rs
+9
-9
src/librustc_trans/mir/mod.rs
src/librustc_trans/mir/mod.rs
+1
-2
src/librustc_trans/mir/operand.rs
src/librustc_trans/mir/operand.rs
+2
-3
src/librustc_trans/mir/rvalue.rs
src/librustc_trans/mir/rvalue.rs
+7
-32
src/librustc_trans/mir/statement.rs
src/librustc_trans/mir/statement.rs
+4
-5
src/librustc_trans/tvec.rs
src/librustc_trans/tvec.rs
+13
-15
未找到文件。
src/librustc_trans/abi.rs
浏览文件 @
59ef51c1
...
...
@@ -10,7 +10,6 @@
use
llvm
::{
self
,
ValueRef
,
Integer
,
Pointer
,
Float
,
Double
,
Struct
,
Array
,
Vector
,
AttributePlace
};
use
base
;
use
build
::
AllocaFcx
;
use
common
::{
type_is_fat_ptr
,
BlockAndBuilder
,
C_uint
};
use
context
::
CrateContext
;
use
cabi_x86
;
...
...
@@ -278,7 +277,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let
llscratch
=
AllocaFcx
(
bcx
.fcx
(),
ty
,
"abi_cast"
);
let
llscratch
=
bcx
.fcx
()
.alloca
(
ty
,
"abi_cast"
);
base
::
Lifetime
::
Start
.call
(
bcx
,
llscratch
);
// ...where we first store the value...
...
...
src/librustc_trans/adt.rs
浏览文件 @
59ef51c1
...
...
@@ -48,7 +48,6 @@
use
llvm
::{
ValueRef
,
True
,
IntEQ
,
IntNE
};
use
rustc
::
ty
::
layout
;
use
rustc
::
ty
::{
self
,
Ty
,
AdtKind
};
use
build
::
*
;
use
common
::
*
;
use
debuginfo
::
DebugLoc
;
use
glue
;
...
...
@@ -348,7 +347,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
load_discr
(
bcx
,
discr
,
scrutinee
,
min
,
max
,
range_assert
)
}
layout
::
General
{
discr
,
..
}
=>
{
let
ptr
=
StructGEP
(
bcx
,
scrutinee
,
0
);
let
ptr
=
bcx
.struct_gep
(
scrutinee
,
0
);
load_discr
(
bcx
,
discr
,
ptr
,
0
,
def
.variants
.len
()
as
u64
-
1
,
range_assert
)
}
...
...
@@ -358,7 +357,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
let
llptrty
=
type_of
::
sizing_type_of
(
bcx
.ccx
(),
monomorphize
::
field_ty
(
bcx
.ccx
()
.tcx
(),
substs
,
&
def
.variants
[
nndiscr
as
usize
]
.fields
[
0
]));
ICmp
(
bcx
,
cmp
,
Load
(
bcx
,
scrutinee
),
C_null
(
llptrty
),
DebugLoc
::
None
)
bcx
.icmp
(
cmp
,
bcx
.load
(
scrutinee
),
C_null
(
llptrty
)
)
}
layout
::
StructWrappedNullablePointer
{
nndiscr
,
ref
discrfield
,
..
}
=>
{
struct_wrapped_nullable_bitdiscr
(
bcx
,
nndiscr
,
discrfield
,
scrutinee
)
...
...
@@ -367,7 +366,7 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
};
match
cast_to
{
None
=>
val
,
Some
(
llty
)
=>
if
is_discr_signed
(
&
l
)
{
SExt
(
bcx
,
val
,
llty
)
}
else
{
ZExt
(
bcx
,
val
,
llty
)
}
Some
(
llty
)
=>
if
is_discr_signed
(
&
l
)
{
bcx
.sext
(
val
,
llty
)
}
else
{
bcx
.zext
(
val
,
llty
)
}
}
}
...
...
@@ -377,11 +376,11 @@ fn struct_wrapped_nullable_bitdiscr(
discrfield
:
&
layout
::
FieldPath
,
scrutinee
:
ValueRef
)
->
ValueRef
{
let
llptrptr
=
GEPi
(
bcx
,
scrutinee
,
let
llptrptr
=
bcx
.gepi
(
scrutinee
,
&
discrfield
.iter
()
.map
(|
f
|
*
f
as
usize
)
.collect
::
<
Vec
<
_
>>
()[
..
]);
let
llptr
=
Load
(
bcx
,
llptrptr
);
let
llptr
=
bcx
.load
(
llptrptr
);
let
cmp
=
if
nndiscr
==
0
{
IntEQ
}
else
{
IntNE
};
ICmp
(
bcx
,
cmp
,
llptr
,
C_null
(
val_ty
(
llptr
)),
DebugLoc
::
None
)
bcx
.icmp
(
cmp
,
llptr
,
C_null
(
val_ty
(
llptr
))
)
}
/// Helper for cases where the discriminant is simply loaded.
...
...
@@ -401,11 +400,11 @@ fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
Load
(
bcx
,
ptr
)
bcx
.load
(
ptr
)
}
else
{
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
LoadRangeAssert
(
bcx
,
ptr
,
min
,
max
.wrapping_add
(
1
),
/* signed: */
True
)
bcx
.load_range_assert
(
ptr
,
min
,
max
.wrapping_add
(
1
),
/* signed: */
True
)
}
}
...
...
@@ -440,12 +439,12 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
match
*
l
{
layout
::
CEnum
{
discr
,
min
,
max
,
..
}
=>
{
assert_discr_in_range
(
Disr
(
min
),
Disr
(
max
),
to
);
Store
(
bcx
,
C_integral
(
Type
::
from_integer
(
bcx
.ccx
(),
discr
),
to
.0
,
true
),
bcx
.store
(
C_integral
(
Type
::
from_integer
(
bcx
.ccx
(),
discr
),
to
.0
,
true
),
val
);
}
layout
::
General
{
discr
,
..
}
=>
{
Store
(
bcx
,
C_integral
(
Type
::
from_integer
(
bcx
.ccx
(),
discr
),
to
.0
,
true
),
StructGEP
(
bcx
,
val
,
0
));
bcx
.store
(
C_integral
(
Type
::
from_integer
(
bcx
.ccx
(),
discr
),
to
.0
,
true
),
bcx
.struct_gep
(
val
,
0
));
}
layout
::
Univariant
{
..
}
|
layout
::
UntaggedUnion
{
..
}
...
...
@@ -456,7 +455,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
let
nnty
=
compute_fields
(
bcx
.ccx
(),
t
,
nndiscr
as
usize
,
false
)[
0
];
if
to
.0
!=
nndiscr
{
let
llptrty
=
type_of
::
sizing_type_of
(
bcx
.ccx
(),
nnty
);
Store
(
bcx
,
C_null
(
llptrty
),
val
);
bcx
.store
(
C_null
(
llptrty
),
val
);
}
}
layout
::
StructWrappedNullablePointer
{
nndiscr
,
ref
discrfield
,
ref
nonnull
,
..
}
=>
{
...
...
@@ -472,9 +471,9 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx
base
::
call_memset
(
bcx
,
llptr
,
fill_byte
,
size
,
align
,
false
);
}
else
{
let
path
=
discrfield
.iter
()
.map
(|
&
i
|
i
as
usize
)
.collect
::
<
Vec
<
_
>>
();
let
llptrptr
=
GEPi
(
bcx
,
val
,
&
path
[
..
]);
let
llptrptr
=
bcx
.gepi
(
val
,
&
path
[
..
]);
let
llptrty
=
val_ty
(
llptrptr
)
.element_type
();
Store
(
bcx
,
C_null
(
llptrty
),
llptrptr
);
bcx
.store
(
C_null
(
llptrty
),
llptrptr
);
}
}
}
...
...
src/librustc_trans/asm.rs
浏览文件 @
59ef51c1
...
...
@@ -12,7 +12,6 @@
use
llvm
::{
self
,
ValueRef
};
use
base
;
use
build
::
*
;
use
common
::
*
;
use
type_of
;
use
type_
::
Type
;
...
...
@@ -90,20 +89,21 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
let
asm
=
CString
::
new
(
ia
.asm
.as_str
()
.as_bytes
())
.unwrap
();
let
constraint_cstr
=
CString
::
new
(
all_constraints
)
.unwrap
();
let
r
=
InlineAsmCall
(
bcx
,
asm
.as_ptr
(),
constraint_cstr
.as_ptr
(),
&
inputs
,
output_type
,
ia
.volatile
,
ia
.alignstack
,
dialect
);
let
r
=
bcx
.inline_asm_call
(
asm
.as_ptr
(),
constraint_cstr
.as_ptr
(),
&
inputs
,
output_type
,
ia
.volatile
,
ia
.alignstack
,
dialect
);
// Again, based on how many outputs we have
let
outputs
=
ia
.outputs
.iter
()
.zip
(
&
outputs
)
.filter
(|
&
(
ref
o
,
_
)|
!
o
.is_indirect
);
for
(
i
,
(
_
,
&
(
val
,
_
)))
in
outputs
.enumerate
()
{
let
v
=
if
num_outputs
==
1
{
r
}
else
{
ExtractValue
(
bcx
,
r
,
i
)
};
Store
(
bcx
,
v
,
val
);
let
v
=
if
num_outputs
==
1
{
r
}
else
{
bcx
.extract_value
(
r
,
i
)
};
bcx
.store
(
v
,
val
);
}
// Store expn_id in a metadata node so we can map LLVM errors
...
...
src/librustc_trans/base.rs
浏览文件 @
59ef51c1
...
...
@@ -51,7 +51,6 @@
use
abi
::{
self
,
Abi
,
FnType
};
use
adt
;
use
attributes
;
use
build
::
*
;
use
builder
::{
Builder
,
noname
};
use
callee
::{
Callee
};
use
common
::{
BlockAndBuilder
,
C_bool
,
C_bytes_in_context
,
C_i32
,
C_uint
};
...
...
@@ -174,11 +173,11 @@ fn drop(&mut self) {
}
pub
fn
get_meta
(
bcx
:
&
BlockAndBuilder
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
StructGEP
(
bcx
,
fat_ptr
,
abi
::
FAT_PTR_EXTRA
)
bcx
.struct_gep
(
fat_ptr
,
abi
::
FAT_PTR_EXTRA
)
}
pub
fn
get_dataptr
(
bcx
:
&
BlockAndBuilder
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
StructGEP
(
bcx
,
fat_ptr
,
abi
::
FAT_PTR_ADDR
)
bcx
.struct_gep
(
fat_ptr
,
abi
::
FAT_PTR_ADDR
)
}
pub
fn
get_meta_builder
(
b
:
&
Builder
,
fat_ptr
:
ValueRef
)
->
ValueRef
{
...
...
@@ -207,15 +206,14 @@ pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
llty_ptr
:
Type
,
info_ty
:
Ty
<
'tcx
>
,
size
:
ValueRef
,
align
:
ValueRef
,
debug_loc
:
DebugLoc
)
align
:
ValueRef
)
->
ValueRef
{
let
_
icx
=
push_ctxt
(
"malloc_raw_exchange"
);
// Allocate space:
let
def_id
=
require_alloc_fn
(
bcx
,
info_ty
,
ExchangeMallocFnLangItem
);
let
r
=
Callee
::
def
(
bcx
.ccx
(),
def_id
,
bcx
.tcx
()
.intern_substs
(
&
[]))
.reify
(
bcx
.ccx
());
PointerCast
(
bcx
,
Call
(
bcx
,
r
,
&
[
size
,
align
],
debug_loc
),
llty_ptr
)
bcx
.pointercast
(
bcx
.call
(
r
,
&
[
size
,
align
],
bcx
.lpad
()
.and_then
(|
b
|
b
.bundle
())
),
llty_ptr
)
}
...
...
@@ -258,13 +256,12 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
rhs
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
ret_ty
:
Type
,
op
:
hir
::
BinOp_
,
debug_loc
:
DebugLoc
)
op
:
hir
::
BinOp_
)
->
ValueRef
{
let
signed
=
match
t
.sty
{
ty
::
TyFloat
(
_
)
=>
{
let
cmp
=
bin_op_to_fcmp_predicate
(
op
);
return
SExt
(
bcx
,
FCmp
(
bcx
,
cmp
,
lhs
,
rhs
,
debug_loc
),
ret_ty
);
return
bcx
.sext
(
bcx
.fcmp
(
cmp
,
lhs
,
rhs
),
ret_ty
);
},
ty
::
TyUint
(
_
)
=>
false
,
ty
::
TyInt
(
_
)
=>
true
,
...
...
@@ -276,7 +273,7 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// to get the correctly sized type. This will compile to a single instruction
// once the IR is converted to assembly if the SIMD instruction is supported
// by the target architecture.
SExt
(
bcx
,
ICmp
(
bcx
,
cmp
,
lhs
,
rhs
,
debug_loc
),
ret_ty
)
bcx
.sext
(
bcx
.icmp
(
cmp
,
lhs
,
rhs
),
ret_ty
)
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
...
...
@@ -326,8 +323,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
&
ty
::
TyRawPtr
(
ty
::
TypeAndMut
{
ty
:
b
,
..
}))
=>
{
assert
!
(
common
::
type_is_sized
(
bcx
.tcx
(),
a
));
let
ptr_ty
=
type_of
::
in_memory_type_of
(
bcx
.ccx
(),
b
)
.ptr_to
();
(
PointerCast
(
bcx
,
src
,
ptr_ty
),
unsized_info
(
bcx
.ccx
(),
a
,
b
,
None
))
(
bcx
.pointercast
(
src
,
ptr_ty
),
unsized_info
(
bcx
.ccx
(),
a
,
b
,
None
))
}
_
=>
bug!
(
"unsize_thin_ptr: called on bad types"
),
}
...
...
@@ -352,7 +348,7 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// the types match up.
let
(
base
,
info
)
=
load_fat_ptr
(
bcx
,
src
,
src_ty
);
let
llcast_ty
=
type_of
::
fat_ptr_base_ty
(
bcx
.ccx
(),
dst_ty
);
let
base
=
PointerCast
(
bcx
,
base
,
llcast_ty
);
let
base
=
bcx
.pointercast
(
base
,
llcast_ty
);
(
base
,
info
)
}
else
{
let
base
=
load_ty
(
bcx
,
src
,
src_ty
);
...
...
@@ -414,8 +410,10 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx
}
}
pub
fn
cast_shift_expr_rhs
(
cx
:
&
BlockAndBuilder
,
op
:
hir
::
BinOp_
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
cast_shift_rhs
(
op
,
lhs
,
rhs
,
|
a
,
b
|
Trunc
(
cx
,
a
,
b
),
|
a
,
b
|
ZExt
(
cx
,
a
,
b
))
pub
fn
cast_shift_expr_rhs
(
cx
:
&
BlockAndBuilder
,
op
:
hir
::
BinOp_
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
cast_shift_rhs
(
op
,
lhs
,
rhs
,
|
a
,
b
|
cx
.trunc
(
a
,
b
),
|
a
,
b
|
cx
.zext
(
a
,
b
))
}
pub
fn
cast_shift_const_rhs
(
op
:
hir
::
BinOp_
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
...
...
@@ -463,8 +461,7 @@ fn cast_shift_rhs<F, G>(op: hir::BinOp_,
pub
fn
invoke
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
llfn
:
ValueRef
,
llargs
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
llargs
:
&
[
ValueRef
])
->
(
ValueRef
,
BlockAndBuilder
<
'blk
,
'tcx
>
)
{
let
_
icx
=
push_ctxt
(
"invoke_"
);
if
need_invoke
(
&
bcx
)
{
...
...
@@ -475,12 +472,13 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
let
normal_bcx
=
bcx
.fcx
()
.new_block
(
"normal-return"
);
let
landing_pad
=
bcx
.fcx
()
.get_landing_pad
();
let
llresult
=
Invoke
(
&
bcx
,
llfn
,
&
llargs
[
..
],
normal_bcx
.llbb
,
landing_pad
,
debug_loc
);
let
llresult
=
bcx
.invoke
(
llfn
,
&
llargs
[
..
],
normal_bcx
.llbb
,
landing_pad
,
bcx
.lpad
()
.and_then
(|
b
|
b
.bundle
())
);
return
(
llresult
,
normal_bcx
.build
());
}
else
{
debug!
(
"calling {:?} at {:?}"
,
Value
(
llfn
),
bcx
.llbb
());
...
...
@@ -488,7 +486,7 @@ pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
debug!
(
"arg: {:?}"
,
Value
(
llarg
));
}
let
llresult
=
Call
(
&
bcx
,
llfn
,
&
llargs
[
..
],
debug_loc
);
let
llresult
=
bcx
.call
(
llfn
,
&
llargs
[
..
],
bcx
.lpad
()
.and_then
(|
b
|
b
.bundle
())
);
return
(
llresult
,
bcx
);
}
}
...
...
@@ -518,7 +516,9 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
pub
fn
load_ty
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
->
ValueRef
{
pub
fn
load_ty
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ptr
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
->
ValueRef
{
load_ty_builder
(
cx
,
ptr
,
t
)
}
...
...
@@ -557,15 +557,17 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
pub
fn
store_ty
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
dst
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
pub
fn
store_ty
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
dst
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
debug!
(
"store_ty: {:?} : {:?} <- {:?}"
,
Value
(
dst
),
t
,
Value
(
v
));
if
common
::
type_is_fat_ptr
(
cx
.tcx
(),
t
)
{
let
lladdr
=
ExtractValue
(
cx
,
v
,
abi
::
FAT_PTR_ADDR
);
let
llextra
=
ExtractValue
(
cx
,
v
,
abi
::
FAT_PTR_EXTRA
);
let
lladdr
=
cx
.extract_value
(
v
,
abi
::
FAT_PTR_ADDR
);
let
llextra
=
cx
.extract_value
(
v
,
abi
::
FAT_PTR_EXTRA
);
store_fat_ptr
(
cx
,
lladdr
,
llextra
,
dst
,
t
);
}
else
{
Store
(
cx
,
from_immediate
(
cx
,
v
),
dst
);
cx
.store
(
from_immediate
(
cx
,
v
),
dst
);
}
}
...
...
@@ -575,8 +577,8 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>,
dst
:
ValueRef
,
_
ty
:
Ty
<
'tcx
>
)
{
// FIXME: emit metadata
Store
(
cx
,
data
,
get_dataptr
(
cx
,
dst
));
Store
(
cx
,
extra
,
get_meta
(
cx
,
dst
));
cx
.store
(
data
,
get_dataptr
(
cx
,
dst
));
cx
.store
(
extra
,
get_meta
(
cx
,
dst
));
}
pub
fn
load_fat_ptr
<
'blk
,
'tcx
>
(
cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
...
...
@@ -609,7 +611,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>(
pub
fn
from_immediate
(
bcx
:
&
BlockAndBuilder
,
val
:
ValueRef
)
->
ValueRef
{
if
val_ty
(
val
)
==
Type
::
i1
(
bcx
.ccx
())
{
ZExt
(
bcx
,
val
,
Type
::
i8
(
bcx
.ccx
()))
bcx
.zext
(
val
,
Type
::
i8
(
bcx
.ccx
()))
}
else
{
val
}
...
...
@@ -617,7 +619,7 @@ pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
pub
fn
to_immediate
(
bcx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ty
:
Ty
)
->
ValueRef
{
if
ty
.is_bool
()
{
Trunc
(
bcx
,
val
,
Type
::
i1
(
bcx
.ccx
()))
bcx
.trunc
(
val
,
Type
::
i1
(
bcx
.ccx
()))
}
else
{
val
}
...
...
@@ -637,9 +639,9 @@ pub fn with_cond<'blk, 'tcx, F>(
let
fcx
=
bcx
.fcx
();
let
next_cx
=
fcx
.new_block
(
"next"
)
.build
();
let
cond_cx
=
fcx
.new_block
(
"cond"
)
.build
();
CondBr
(
&
bcx
,
val
,
cond_cx
.llbb
(),
next_cx
.llbb
(),
DebugLoc
::
None
);
bcx
.cond_br
(
val
,
cond_cx
.llbb
(),
next_cx
.llbb
()
);
let
after_cx
=
f
(
cond_cx
);
Br
(
&
after_cx
,
next_cx
.llbb
(),
DebugLoc
::
None
);
after_cx
.br
(
next_cx
.llbb
()
);
next_cx
}
...
...
@@ -702,8 +704,9 @@ pub fn trans_unwind_resume(bcx: &BlockAndBuilder, lpval: ValueRef) {
if
!
bcx
.sess
()
.target.target.options.custom_unwind_resume
{
bcx
.resume
(
lpval
);
}
else
{
let
exc_ptr
=
ExtractValue
(
bcx
,
lpval
,
0
);
Call
(
bcx
,
bcx
.fcx
()
.eh_unwind_resume
()
.reify
(
bcx
.ccx
()),
&
[
exc_ptr
],
DebugLoc
::
None
);
let
exc_ptr
=
bcx
.extract_value
(
lpval
,
0
);
bcx
.call
(
bcx
.fcx
()
.eh_unwind_resume
()
.reify
(
bcx
.ccx
()),
&
[
exc_ptr
],
bcx
.lpad
()
.and_then
(|
b
|
b
.bundle
()));
}
}
...
...
@@ -725,7 +728,9 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
b
.call
(
memcpy
,
&
[
dst_ptr
,
src_ptr
,
size
,
align
,
volatile
],
None
);
}
pub
fn
memcpy_ty
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
dst
:
ValueRef
,
src
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
pub
fn
memcpy_ty
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
dst
:
ValueRef
,
src
:
ValueRef
,
t
:
Ty
<
'tcx
>
)
{
let
_
icx
=
push_ctxt
(
"memcpy_ty"
);
let
ccx
=
bcx
.ccx
();
...
...
@@ -792,7 +797,7 @@ pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
pub
fn
alloca
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
let
_
icx
=
push_ctxt
(
"alloca"
);
DebugLoc
::
None
.apply
(
cx
.fcx
());
Alloca
(
cx
,
ty
,
name
)
cx
.fcx
()
.alloca
(
ty
,
name
)
}
impl
<
'blk
,
'tcx
>
FunctionContext
<
'blk
,
'tcx
>
{
...
...
@@ -863,7 +868,7 @@ pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> {
// Use a dummy instruction as the insertion point for all allocas.
// This is later removed in FunctionContext::cleanup.
self
.alloca_insert_pt
.set
(
Some
(
unsafe
{
Load
(
&
entry_bcx
,
C_null
(
Type
::
i8p
(
self
.ccx
)));
entry_bcx
.load
(
C_null
(
Type
::
i8p
(
self
.ccx
)));
llvm
::
LLVMGetFirstInstruction
(
entry_bcx
.llbb
())
}));
...
...
@@ -881,7 +886,7 @@ pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> {
let
slot
=
if
self
.fn_ty.ret
.is_indirect
()
{
get_param
(
self
.llfn
,
0
)
}
else
{
AllocaFcx
(
self
,
llty
,
"sret_slot"
)
self
.alloca
(
llty
,
"sret_slot"
)
};
self
.llretslotptr
.set
(
Some
(
slot
));
...
...
@@ -892,21 +897,19 @@ pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> {
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
/// and builds the return block.
pub
fn
finish
(
&
'blk
self
,
ret_cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ret_debug_loc
:
DebugLoc
)
{
pub
fn
finish
(
&
'blk
self
,
ret_cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
)
{
let
_
icx
=
push_ctxt
(
"FunctionContext::finish"
);
self
.build_return_block
(
ret_cx
,
ret_debug_loc
);
self
.build_return_block
(
ret_cx
);
DebugLoc
::
None
.apply
(
self
);
self
.cleanup
();
}
// Builds the return block for a function.
pub
fn
build_return_block
(
&
self
,
ret_cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
ret_debug_location
:
DebugLoc
)
{
pub
fn
build_return_block
(
&
self
,
ret_cx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
)
{
if
self
.llretslotptr
.get
()
.is_none
()
||
self
.fn_ty.ret
.is_indirect
()
{
return
RetVoid
(
ret_cx
,
ret_debug_location
);
return
ret_cx
.ret_void
(
);
}
let
retslot
=
self
.llretslotptr
.get
()
.unwrap
();
...
...
@@ -925,13 +928,13 @@ pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>,
}
if
self
.fn_ty.ret
.is_indirect
()
{
Store
(
ret_cx
,
retval
,
get_param
(
self
.llfn
,
0
));
RetVoid
(
ret_cx
,
ret_debug_location
)
ret_cx
.store
(
retval
,
get_param
(
self
.llfn
,
0
));
ret_cx
.ret_void
(
)
}
else
{
if
llty
==
Type
::
i1
(
self
.ccx
)
{
retval
=
Trunc
(
ret_cx
,
retval
,
llty
);
retval
=
ret_cx
.trunc
(
retval
,
llty
);
}
Ret
(
ret_cx
,
retval
,
ret_debug_location
)
ret_cx
.ret
(
retval
)
}
}
(
_
,
cast_ty
)
if
self
.fn_ty.ret
.is_indirect
()
=>
{
...
...
@@ -941,24 +944,24 @@ pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>,
let
llalign
=
llalign_of_min
(
self
.ccx
,
self
.fn_ty.ret.ty
);
call_memcpy
(
&
ret_cx
,
get_param
(
self
.llfn
,
0
),
retslot
,
llsz
,
llalign
as
u32
);
RetVoid
(
ret_cx
,
ret_debug_location
)
ret_cx
.ret_void
(
)
}
(
_
,
Some
(
cast_ty
))
=>
{
let
load
=
Load
(
ret_cx
,
PointerCast
(
ret_cx
,
retslot
,
cast_ty
.ptr_to
()));
let
load
=
ret_cx
.load
(
ret_cx
.pointercast
(
retslot
,
cast_ty
.ptr_to
()));
let
llalign
=
llalign_of_min
(
self
.ccx
,
self
.fn_ty.ret.ty
);
unsafe
{
llvm
::
LLVMSetAlignment
(
load
,
llalign
);
}
Ret
(
ret_cx
,
load
,
ret_debug_location
)
ret_cx
.ret
(
load
)
}
(
_
,
None
)
=>
{
let
retval
=
if
llty
==
Type
::
i1
(
self
.ccx
)
{
let
val
=
LoadRangeAssert
(
ret_cx
,
retslot
,
0
,
2
,
llvm
::
False
);
Trunc
(
ret_cx
,
val
,
llty
)
let
val
=
ret_cx
.load_range_assert
(
retslot
,
0
,
2
,
llvm
::
False
);
ret_cx
.trunc
(
val
,
llty
)
}
else
{
Load
(
ret_cx
,
retslot
)
ret_cx
.load
(
retslot
)
};
Ret
(
ret_cx
,
retval
,
ret_debug_location
)
ret_cx
.ret
(
retval
)
}
}
}
...
...
@@ -1056,7 +1059,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
adt
::
trans_set_discr
(
&
bcx
,
sig
.output
(),
dest
,
disr
);
}
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
);
}
pub
fn
llvm_linkage_by_name
(
name
:
&
str
)
->
Option
<
Linkage
>
{
...
...
src/librustc_trans/build.rs
已删除
100644 → 0
浏览文件 @
3f17ab96
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(dead_code)]
// FFI wrappers
#![allow(non_snake_case)]
use
llvm
;
use
llvm
::{
AtomicRmwBinOp
,
AtomicOrdering
,
SynchronizationScope
,
AsmDialect
};
use
llvm
::{
Opcode
,
IntPredicate
,
RealPredicate
};
use
llvm
::{
ValueRef
,
BasicBlockRef
};
use
common
::
*
;
use
syntax_pos
::
Span
;
use
type_
::
Type
;
use
value
::
Value
;
use
debuginfo
::
DebugLoc
;
use
libc
::{
c_uint
,
c_char
};
pub
fn
RetVoid
(
cx
:
&
BlockAndBuilder
,
debug_loc
:
DebugLoc
)
{
debug_loc
.apply
(
cx
.fcx
());
cx
.ret_void
();
}
pub
fn
Ret
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
{
debug_loc
.apply
(
cx
.fcx
());
cx
.ret
(
v
);
}
pub
fn
AggregateRet
(
cx
:
&
BlockAndBuilder
,
ret_vals
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
{
debug_loc
.apply
(
cx
.fcx
());
cx
.aggregate_ret
(
ret_vals
);
}
pub
fn
Br
(
cx
:
&
BlockAndBuilder
,
dest
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
{
debug_loc
.apply
(
cx
.fcx
());
cx
.br
(
dest
);
}
pub
fn
CondBr
(
cx
:
&
BlockAndBuilder
,
if_
:
ValueRef
,
then
:
BasicBlockRef
,
else_
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
{
debug_loc
.apply
(
cx
.fcx
());
cx
.cond_br
(
if_
,
then
,
else_
);
}
pub
fn
Switch
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
else_
:
BasicBlockRef
,
num_cases
:
usize
)
->
ValueRef
{
cx
.switch
(
v
,
else_
,
num_cases
)
}
pub
fn
AddCase
(
s
:
ValueRef
,
on_val
:
ValueRef
,
dest
:
BasicBlockRef
)
{
unsafe
{
if
llvm
::
LLVMIsUndef
(
s
)
==
llvm
::
True
{
return
;
}
llvm
::
LLVMAddCase
(
s
,
on_val
,
dest
);
}
}
pub
fn
IndirectBr
(
cx
:
&
BlockAndBuilder
,
addr
:
ValueRef
,
num_dests
:
usize
,
debug_loc
:
DebugLoc
)
{
debug_loc
.apply
(
cx
.fcx
());
cx
.indirect_br
(
addr
,
num_dests
);
}
pub
fn
Invoke
(
cx
:
&
BlockAndBuilder
,
fn_
:
ValueRef
,
args
:
&
[
ValueRef
],
then
:
BasicBlockRef
,
catch
:
BasicBlockRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug!
(
"Invoke({:?} with arguments ({}))"
,
Value
(
fn_
),
args
.iter
()
.map
(|
a
|
{
format!
(
"{:?}"
,
Value
(
*
a
))
})
.collect
::
<
Vec
<
String
>>
()
.join
(
", "
));
debug_loc
.apply
(
cx
.fcx
());
let
bundle
=
cx
.lpad
()
.and_then
(|
b
|
b
.bundle
());
cx
.invoke
(
fn_
,
args
,
then
,
catch
,
bundle
)
}
/* Arithmetic */
pub
fn
Add
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.add
(
lhs
,
rhs
)
}
pub
fn
NSWAdd
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswadd
(
lhs
,
rhs
)
}
pub
fn
NUWAdd
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwadd
(
lhs
,
rhs
)
}
pub
fn
FAdd
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fadd
(
lhs
,
rhs
)
}
pub
fn
FAddFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fadd_fast
(
lhs
,
rhs
)
}
pub
fn
Sub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.sub
(
lhs
,
rhs
)
}
pub
fn
NSWSub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswsub
(
lhs
,
rhs
)
}
pub
fn
NUWSub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwsub
(
lhs
,
rhs
)
}
pub
fn
FSub
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fsub
(
lhs
,
rhs
)
}
pub
fn
FSubFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fsub_fast
(
lhs
,
rhs
)
}
pub
fn
Mul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.mul
(
lhs
,
rhs
)
}
pub
fn
NSWMul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswmul
(
lhs
,
rhs
)
}
pub
fn
NUWMul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwmul
(
lhs
,
rhs
)
}
pub
fn
FMul
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fmul
(
lhs
,
rhs
)
}
pub
fn
FMulFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fmul_fast
(
lhs
,
rhs
)
}
pub
fn
UDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.udiv
(
lhs
,
rhs
)
}
pub
fn
SDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.sdiv
(
lhs
,
rhs
)
}
pub
fn
ExactSDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.exactsdiv
(
lhs
,
rhs
)
}
pub
fn
FDiv
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fdiv
(
lhs
,
rhs
)
}
pub
fn
FDivFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fdiv_fast
(
lhs
,
rhs
)
}
pub
fn
URem
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.urem
(
lhs
,
rhs
)
}
pub
fn
SRem
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.srem
(
lhs
,
rhs
)
}
pub
fn
FRem
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.frem
(
lhs
,
rhs
)
}
pub
fn
FRemFast
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.frem_fast
(
lhs
,
rhs
)
}
pub
fn
Shl
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.shl
(
lhs
,
rhs
)
}
pub
fn
LShr
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.lshr
(
lhs
,
rhs
)
}
pub
fn
AShr
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.ashr
(
lhs
,
rhs
)
}
pub
fn
And
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.and
(
lhs
,
rhs
)
}
pub
fn
Or
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.or
(
lhs
,
rhs
)
}
pub
fn
Xor
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.xor
(
lhs
,
rhs
)
}
pub
fn
BinOp
(
cx
:
&
BlockAndBuilder
,
op
:
Opcode
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.binop
(
op
,
lhs
,
rhs
)
}
pub
fn
Neg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.neg
(
v
)
}
pub
fn
NSWNeg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nswneg
(
v
)
}
pub
fn
NUWNeg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.nuwneg
(
v
)
}
pub
fn
FNeg
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fneg
(
v
)
}
pub
fn
Not
(
cx
:
&
BlockAndBuilder
,
v
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.not
(
v
)
}
pub
fn
Alloca
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
AllocaFcx
(
cx
.fcx
(),
ty
,
name
)
}
pub
fn
AllocaFcx
(
fcx
:
&
FunctionContext
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
let
b
=
fcx
.ccx
.builder
();
b
.position_before
(
fcx
.alloca_insert_pt
.get
()
.unwrap
());
DebugLoc
::
None
.apply
(
fcx
);
b
.alloca
(
ty
,
name
)
}
pub
fn
Free
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
)
{
cx
.free
(
pointer_val
)
}
pub
fn
Load
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
)
->
ValueRef
{
cx
.load
(
pointer_val
)
}
pub
fn
VolatileLoad
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
)
->
ValueRef
{
cx
.volatile_load
(
pointer_val
)
}
pub
fn
AtomicLoad
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
,
order
:
AtomicOrdering
)
->
ValueRef
{
cx
.atomic_load
(
pointer_val
,
order
)
}
pub
fn
LoadRangeAssert
(
cx
:
&
BlockAndBuilder
,
pointer_val
:
ValueRef
,
lo
:
u64
,
hi
:
u64
,
signed
:
llvm
::
Bool
)
->
ValueRef
{
cx
.load_range_assert
(
pointer_val
,
lo
,
hi
,
signed
)
}
pub
fn
LoadNonNull
(
cx
:
&
BlockAndBuilder
,
ptr
:
ValueRef
)
->
ValueRef
{
cx
.load_nonnull
(
ptr
)
}
pub
fn
Store
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ptr
:
ValueRef
)
->
ValueRef
{
cx
.store
(
val
,
ptr
)
}
pub
fn
VolatileStore
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ptr
:
ValueRef
)
->
ValueRef
{
cx
.volatile_store
(
val
,
ptr
)
}
pub
fn
AtomicStore
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
ptr
:
ValueRef
,
order
:
AtomicOrdering
)
{
cx
.atomic_store
(
val
,
ptr
,
order
)
}
pub
fn
GEP
(
cx
:
&
BlockAndBuilder
,
pointer
:
ValueRef
,
indices
:
&
[
ValueRef
])
->
ValueRef
{
cx
.gep
(
pointer
,
indices
)
}
// Simple wrapper around GEP that takes an array of ints and wraps them
// in C_i32()
#[inline]
pub
fn
GEPi
(
cx
:
&
BlockAndBuilder
,
base
:
ValueRef
,
ixs
:
&
[
usize
])
->
ValueRef
{
cx
.gepi
(
base
,
ixs
)
}
pub
fn
InBoundsGEP
(
cx
:
&
BlockAndBuilder
,
pointer
:
ValueRef
,
indices
:
&
[
ValueRef
])
->
ValueRef
{
cx
.inbounds_gep
(
pointer
,
indices
)
}
pub
fn
StructGEP
(
cx
:
&
BlockAndBuilder
,
pointer
:
ValueRef
,
idx
:
usize
)
->
ValueRef
{
cx
.struct_gep
(
pointer
,
idx
)
}
pub
fn
GlobalString
(
cx
:
&
BlockAndBuilder
,
_
str
:
*
const
c_char
)
->
ValueRef
{
cx
.global_string
(
_
str
)
}
pub
fn
GlobalStringPtr
(
cx
:
&
BlockAndBuilder
,
_
str
:
*
const
c_char
)
->
ValueRef
{
cx
.global_string_ptr
(
_
str
)
}
/* Casts */
pub
fn
Trunc
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.trunc
(
val
,
dest_ty
)
}
pub
fn
ZExt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.zext
(
val
,
dest_ty
)
}
pub
fn
SExt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.sext
(
val
,
dest_ty
)
}
pub
fn
FPToUI
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fptoui
(
val
,
dest_ty
)
}
pub
fn
FPToSI
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fptosi
(
val
,
dest_ty
)
}
pub
fn
UIToFP
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.uitofp
(
val
,
dest_ty
)
}
pub
fn
SIToFP
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.sitofp
(
val
,
dest_ty
)
}
pub
fn
FPTrunc
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fptrunc
(
val
,
dest_ty
)
}
pub
fn
FPExt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fpext
(
val
,
dest_ty
)
}
pub
fn
PtrToInt
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.ptrtoint
(
val
,
dest_ty
)
}
pub
fn
IntToPtr
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.inttoptr
(
val
,
dest_ty
)
}
pub
fn
BitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.bitcast
(
val
,
dest_ty
)
}
pub
fn
ZExtOrBitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.zext_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
SExtOrBitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.sext_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
TruncOrBitCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.trunc_or_bitcast
(
val
,
dest_ty
)
}
pub
fn
Cast
(
cx
:
&
BlockAndBuilder
,
op
:
Opcode
,
val
:
ValueRef
,
dest_ty
:
Type
,
_
:
*
const
u8
)
->
ValueRef
{
cx
.cast
(
op
,
val
,
dest_ty
)
}
pub
fn
PointerCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.pointercast
(
val
,
dest_ty
)
}
pub
fn
IntCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.intcast
(
val
,
dest_ty
)
}
pub
fn
FPCast
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
,
dest_ty
:
Type
)
->
ValueRef
{
cx
.fpcast
(
val
,
dest_ty
)
}
/* Comparisons */
pub
fn
ICmp
(
cx
:
&
BlockAndBuilder
,
op
:
IntPredicate
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.icmp
(
op
,
lhs
,
rhs
)
}
pub
fn
FCmp
(
cx
:
&
BlockAndBuilder
,
op
:
RealPredicate
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
cx
.fcmp
(
op
,
lhs
,
rhs
)
}
/* Miscellaneous instructions */
pub
fn
EmptyPhi
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
)
->
ValueRef
{
cx
.empty_phi
(
ty
)
}
pub
fn
Phi
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
vals
:
&
[
ValueRef
],
bbs
:
&
[
BasicBlockRef
])
->
ValueRef
{
cx
.phi
(
ty
,
vals
,
bbs
)
}
pub
fn
AddIncomingToPhi
(
phi
:
ValueRef
,
val
:
ValueRef
,
bb
:
BasicBlockRef
)
{
unsafe
{
if
llvm
::
LLVMIsUndef
(
phi
)
==
llvm
::
True
{
return
;
}
llvm
::
LLVMAddIncoming
(
phi
,
&
val
,
&
bb
,
1
as
c_uint
);
}
}
pub
fn
add_span_comment
(
cx
:
&
BlockAndBuilder
,
sp
:
Span
,
text
:
&
str
)
{
cx
.add_span_comment
(
sp
,
text
)
}
pub
fn
add_comment
(
cx
:
&
BlockAndBuilder
,
text
:
&
str
)
{
cx
.add_comment
(
text
)
}
pub
fn
InlineAsmCall
(
cx
:
&
BlockAndBuilder
,
asm
:
*
const
c_char
,
cons
:
*
const
c_char
,
inputs
:
&
[
ValueRef
],
output
:
Type
,
volatile
:
bool
,
alignstack
:
bool
,
dia
:
AsmDialect
)
->
ValueRef
{
cx
.inline_asm_call
(
asm
,
cons
,
inputs
,
output
,
volatile
,
alignstack
,
dia
)
}
pub
fn
Call
(
cx
:
&
BlockAndBuilder
,
fn_
:
ValueRef
,
args
:
&
[
ValueRef
],
debug_loc
:
DebugLoc
)
->
ValueRef
{
debug_loc
.apply
(
cx
.fcx
());
let
bundle
=
cx
.lpad
()
.and_then
(|
b
|
b
.bundle
());
cx
.call
(
fn_
,
args
,
bundle
)
}
pub
fn
AtomicFence
(
cx
:
&
BlockAndBuilder
,
order
:
AtomicOrdering
,
scope
:
SynchronizationScope
)
{
cx
.atomic_fence
(
order
,
scope
)
}
pub
fn
Select
(
cx
:
&
BlockAndBuilder
,
if_
:
ValueRef
,
then
:
ValueRef
,
else_
:
ValueRef
)
->
ValueRef
{
cx
.select
(
if_
,
then
,
else_
)
}
pub
fn
VAArg
(
cx
:
&
BlockAndBuilder
,
list
:
ValueRef
,
ty
:
Type
)
->
ValueRef
{
cx
.va_arg
(
list
,
ty
)
}
pub
fn
ExtractElement
(
cx
:
&
BlockAndBuilder
,
vec_val
:
ValueRef
,
index
:
ValueRef
)
->
ValueRef
{
cx
.extract_element
(
vec_val
,
index
)
}
pub
fn
InsertElement
(
cx
:
&
BlockAndBuilder
,
vec_val
:
ValueRef
,
elt_val
:
ValueRef
,
index
:
ValueRef
)
->
ValueRef
{
cx
.insert_element
(
vec_val
,
elt_val
,
index
)
}
pub
fn
ShuffleVector
(
cx
:
&
BlockAndBuilder
,
v1
:
ValueRef
,
v2
:
ValueRef
,
mask
:
ValueRef
)
->
ValueRef
{
cx
.shuffle_vector
(
v1
,
v2
,
mask
)
}
pub
fn
VectorSplat
(
cx
:
&
BlockAndBuilder
,
num_elts
:
usize
,
elt_val
:
ValueRef
)
->
ValueRef
{
cx
.vector_splat
(
num_elts
,
elt_val
)
}
pub
fn
ExtractValue
(
cx
:
&
BlockAndBuilder
,
agg_val
:
ValueRef
,
index
:
usize
)
->
ValueRef
{
cx
.extract_value
(
agg_val
,
index
)
}
pub
fn
InsertValue
(
cx
:
&
BlockAndBuilder
,
agg_val
:
ValueRef
,
elt_val
:
ValueRef
,
index
:
usize
)
->
ValueRef
{
cx
.insert_value
(
agg_val
,
elt_val
,
index
)
}
pub
fn
IsNull
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
)
->
ValueRef
{
cx
.is_null
(
val
)
}
pub
fn
IsNotNull
(
cx
:
&
BlockAndBuilder
,
val
:
ValueRef
)
->
ValueRef
{
cx
.is_not_null
(
val
)
}
pub
fn
PtrDiff
(
cx
:
&
BlockAndBuilder
,
lhs
:
ValueRef
,
rhs
:
ValueRef
)
->
ValueRef
{
cx
.ptrdiff
(
lhs
,
rhs
)
}
pub
fn
Trap
(
cx
:
&
BlockAndBuilder
)
{
cx
.trap
();
}
pub
fn
LandingPad
(
cx
:
&
BlockAndBuilder
,
ty
:
Type
,
pers_fn
:
ValueRef
,
num_clauses
:
usize
)
->
ValueRef
{
cx
.landing_pad
(
ty
,
pers_fn
,
num_clauses
,
cx
.fcx
()
.llfn
)
}
pub
fn
AddClause
(
cx
:
&
BlockAndBuilder
,
landing_pad
:
ValueRef
,
clause
:
ValueRef
)
{
cx
.add_clause
(
landing_pad
,
clause
)
}
pub
fn
SetCleanup
(
cx
:
&
BlockAndBuilder
,
landing_pad
:
ValueRef
)
{
cx
.set_cleanup
(
landing_pad
)
}
pub
fn
SetPersonalityFn
(
cx
:
&
BlockAndBuilder
,
f
:
ValueRef
)
{
cx
.set_personality_fn
(
f
)
}
// Atomic Operations
pub
fn
AtomicCmpXchg
(
cx
:
&
BlockAndBuilder
,
dst
:
ValueRef
,
cmp
:
ValueRef
,
src
:
ValueRef
,
order
:
AtomicOrdering
,
failure_order
:
AtomicOrdering
,
weak
:
llvm
::
Bool
)
->
ValueRef
{
cx
.atomic_cmpxchg
(
dst
,
cmp
,
src
,
order
,
failure_order
,
weak
)
}
pub
fn
AtomicRMW
(
cx
:
&
BlockAndBuilder
,
op
:
AtomicRmwBinOp
,
dst
:
ValueRef
,
src
:
ValueRef
,
order
:
AtomicOrdering
)
->
ValueRef
{
cx
.atomic_rmw
(
op
,
dst
,
src
,
order
)
}
pub
fn
CleanupPad
(
cx
:
&
BlockAndBuilder
,
parent
:
Option
<
ValueRef
>
,
args
:
&
[
ValueRef
])
->
ValueRef
{
cx
.cleanup_pad
(
parent
,
args
)
}
pub
fn
CleanupRet
(
cx
:
&
BlockAndBuilder
,
cleanup
:
ValueRef
,
unwind
:
Option
<
BasicBlockRef
>
)
->
ValueRef
{
cx
.cleanup_ret
(
cleanup
,
unwind
)
}
pub
fn
CatchPad
(
cx
:
&
BlockAndBuilder
,
parent
:
ValueRef
,
args
:
&
[
ValueRef
])
->
ValueRef
{
cx
.catch_pad
(
parent
,
args
)
}
pub
fn
CatchRet
(
cx
:
&
BlockAndBuilder
,
pad
:
ValueRef
,
unwind
:
BasicBlockRef
)
->
ValueRef
{
cx
.catch_ret
(
pad
,
unwind
)
}
pub
fn
CatchSwitch
(
cx
:
&
BlockAndBuilder
,
parent
:
Option
<
ValueRef
>
,
unwind
:
Option
<
BasicBlockRef
>
,
num_handlers
:
usize
)
->
ValueRef
{
cx
.catch_switch
(
parent
,
unwind
,
num_handlers
)
}
pub
fn
AddHandler
(
cx
:
&
BlockAndBuilder
,
catch_switch
:
ValueRef
,
handler
:
BasicBlockRef
)
{
cx
.add_handler
(
catch_switch
,
handler
)
}
src/librustc_trans/builder.rs
浏览文件 @
59ef51c1
...
...
@@ -1103,6 +1103,20 @@ pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) {
}
}
pub
fn
add_case
(
s
:
ValueRef
,
on_val
:
ValueRef
,
dest
:
BasicBlockRef
)
{
unsafe
{
if
llvm
::
LLVMIsUndef
(
s
)
==
llvm
::
True
{
return
;
}
llvm
::
LLVMAddCase
(
s
,
on_val
,
dest
)
}
}
pub
fn
add_incoming_to_phi
(
phi
:
ValueRef
,
val
:
ValueRef
,
bb
:
BasicBlockRef
)
{
unsafe
{
if
llvm
::
LLVMIsUndef
(
phi
)
==
llvm
::
True
{
return
;
}
llvm
::
LLVMAddIncoming
(
phi
,
&
val
,
&
bb
,
1
as
c_uint
);
}
}
/// Returns the ptr value that should be used for storing `val`.
fn
check_store
<
'b
>
(
&
self
,
val
:
ValueRef
,
...
...
src/librustc_trans/callee.rs
浏览文件 @
59ef51c1
...
...
@@ -25,12 +25,10 @@
use
attributes
;
use
base
;
use
base
::
*
;
use
build
::
*
;
use
common
::{
self
,
Block
,
BlockAndBuilder
,
CrateContext
,
FunctionContext
,
SharedCrateContext
};
use
consts
;
use
debuginfo
::
DebugLoc
;
use
declare
;
use
value
::
Value
;
use
meth
;
...
...
@@ -210,11 +208,10 @@ pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
/// into memory somewhere. Nonetheless we return the actual return value of the
/// function.
pub
fn
call
<
'a
,
'blk
>
(
self
,
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
debug_loc
:
DebugLoc
,
args
:
&
[
ValueRef
],
dest
:
Option
<
ValueRef
>
)
->
(
BlockAndBuilder
<
'blk
,
'tcx
>
,
ValueRef
)
{
trans_call_inner
(
bcx
,
debug_loc
,
self
,
args
,
dest
)
trans_call_inner
(
bcx
,
self
,
args
,
dest
)
}
/// Turn the callee into a function pointer.
...
...
@@ -414,11 +411,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let
self_scope
=
fcx
.push_custom_cleanup_scope
();
fcx
.schedule_drop_mem
(
self_scope
,
llenv
,
closure_ty
);
let
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[
self_idx
..
],
dest
)
.0
;
let
bcx
=
callee
.call
(
bcx
,
&
llargs
[
self_idx
..
],
dest
)
.0
;
let
bcx
=
fcx
.pop_and_trans_custom_cleanup_scope
(
bcx
,
self_scope
);
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
);
ccx
.instances
()
.borrow_mut
()
.insert
(
method_instance
,
lloncefn
);
...
...
@@ -531,7 +528,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let
llfnpointer
=
llfnpointer
.unwrap_or_else
(||
{
// the first argument (`self`) will be ptr to the fn pointer
if
is_by_ref
{
Load
(
&
bcx
,
llargs
[
self_idx
])
bcx
.load
(
llargs
[
self_idx
])
}
else
{
llargs
[
self_idx
]
}
...
...
@@ -543,8 +540,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
data
:
Fn
(
llfnpointer
),
ty
:
bare_fn_ty
};
let
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[(
self_idx
+
1
)
..
],
dest
)
.0
;
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
let
bcx
=
callee
.call
(
bcx
,
&
llargs
[(
self_idx
+
1
)
..
],
dest
)
.0
;
fcx
.finish
(
&
bcx
);
ccx
.fn_pointer_shims
()
.borrow_mut
()
.insert
(
bare_fn_ty_maybe_ref
,
llfn
);
...
...
@@ -654,7 +651,6 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// Translating calls
fn
trans_call_inner
<
'a
,
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
debug_loc
:
DebugLoc
,
callee
:
Callee
<
'tcx
>
,
args
:
&
[
ValueRef
],
opt_llretslot
:
Option
<
ValueRef
>
)
...
...
@@ -689,7 +685,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
if
fn_ty
.ret
.is_indirect
()
{
let
mut
llretslot
=
opt_llretslot
.unwrap
();
if
let
Some
(
ty
)
=
fn_ty
.ret.cast
{
llretslot
=
PointerCast
(
&
bcx
,
llretslot
,
ty
.ptr_to
());
llretslot
=
bcx
.pointercast
(
llretslot
,
ty
.ptr_to
());
}
llargs
.push
(
llretslot
);
}
...
...
@@ -700,7 +696,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
let
fn_ptr
=
meth
::
get_virtual_method
(
&
bcx
,
args
[
1
],
idx
);
let
llty
=
fn_ty
.llvm_type
(
&
bcx
.ccx
())
.ptr_to
();
callee
=
Fn
(
PointerCast
(
&
bcx
,
fn_ptr
,
llty
));
callee
=
Fn
(
bcx
.pointercast
(
fn_ptr
,
llty
));
llargs
.extend_from_slice
(
&
args
[
2
..
]);
}
_
=>
llargs
.extend_from_slice
(
args
)
...
...
@@ -711,7 +707,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
_
=>
bug!
(
"expected fn pointer callee, found {:?}"
,
callee
)
};
let
(
llret
,
bcx
)
=
base
::
invoke
(
bcx
,
llfn
,
&
llargs
,
debug_loc
);
let
(
llret
,
bcx
)
=
base
::
invoke
(
bcx
,
llfn
,
&
llargs
);
fn_ty
.apply_attrs_callsite
(
llret
);
// If the function we just called does not use an outpointer,
...
...
src/librustc_trans/cleanup.rs
浏览文件 @
59ef51c1
...
...
@@ -118,7 +118,6 @@
use
llvm
::{
BasicBlockRef
,
ValueRef
};
use
base
;
use
build
;
use
common
;
use
common
::{
BlockAndBuilder
,
FunctionContext
,
LandingPad
};
use
debuginfo
::{
DebugLoc
};
...
...
@@ -344,7 +343,7 @@ fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
let
mut
bcx
=
bcx
;
for
cleanup
in
scope
.cleanups
.iter
()
.rev
()
{
bcx
=
cleanup
.trans
(
bcx
,
scope
.debug_loc
);
bcx
=
cleanup
.trans
(
bcx
);
}
bcx
}
...
...
@@ -422,13 +421,13 @@ fn trans_cleanups_to_exit_scope(&'blk self,
UnwindKind
::
LandingPad
=>
{
let
addr
=
self
.landingpad_alloca
.get
()
.unwrap
();
let
lp
=
b
uild
::
Load
(
&
bcx
,
addr
);
let
lp
=
b
cx
.load
(
addr
);
base
::
call_lifetime_end
(
&
bcx
,
addr
);
base
::
trans_unwind_resume
(
&
bcx
,
lp
);
}
UnwindKind
::
CleanupPad
(
_
)
=>
{
let
pad
=
b
uild
::
CleanupPad
(
&
bcx
,
None
,
&
[]);
b
uild
::
CleanupRet
(
&
bcx
,
pad
,
None
);
let
pad
=
b
cx
.cleanup_pad
(
None
,
&
[]);
b
cx
.cleanup_ret
(
pad
,
None
);
}
}
prev_llbb
=
bcx
.llbb
();
...
...
@@ -488,7 +487,7 @@ fn trans_cleanups_to_exit_scope(&'blk self,
let
mut
bcx_out
=
bcx_in
;
let
len
=
scope
.cleanups
.len
();
for
cleanup
in
scope
.cleanups
.iter
()
.rev
()
.take
(
len
-
skip
)
{
bcx_out
=
cleanup
.trans
(
bcx_out
,
scope
.debug_loc
);
bcx_out
=
cleanup
.trans
(
bcx_out
);
}
skip
=
0
;
exit_label
.branch
(
&
bcx_out
,
prev_llbb
);
...
...
@@ -540,8 +539,8 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
// creation of the landingpad instruction). We then create a
// cleanuppad instruction which has no filters to run cleanup on all
// exceptions.
build
::
SetPersonalityFn
(
&
pad_bcx
,
llpersonality
);
let
llretval
=
build
::
CleanupPad
(
&
pad_bcx
,
None
,
&
[]);
pad_bcx
.set_personality_fn
(
llpersonality
);
let
llretval
=
pad_bcx
.cleanup_pad
(
None
,
&
[]);
UnwindKind
::
CleanupPad
(
llretval
)
}
else
{
// The landing pad return type (the type being propagated). Not sure
...
...
@@ -552,10 +551,10 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
false
);
// The only landing pad clause will be 'cleanup'
let
llretval
=
build
::
LandingPad
(
&
pad_bcx
,
llretty
,
llpersonality
,
1
);
let
llretval
=
pad_bcx
.landing_pad
(
llretty
,
llpersonality
,
1
,
pad_bcx
.fcx
()
.llfn
);
// The landing pad block is a cleanup
build
::
SetCleanup
(
&
pad_bcx
,
llretval
);
pad_bcx
.set_cleanup
(
llretval
);
let
addr
=
match
self
.landingpad_alloca
.get
()
{
Some
(
addr
)
=>
addr
,
...
...
@@ -567,7 +566,7 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
addr
}
};
build
::
Store
(
&
pad_bcx
,
llretval
,
addr
);
pad_bcx
.store
(
llretval
,
addr
);
UnwindKind
::
LandingPad
};
...
...
@@ -629,9 +628,9 @@ impl EarlyExitLabel {
/// the `cleanupret` instruction instead of the `br` instruction.
fn
branch
(
&
self
,
from_bcx
:
&
BlockAndBuilder
,
to_llbb
:
BasicBlockRef
)
{
if
let
UnwindExit
(
UnwindKind
::
CleanupPad
(
pad
))
=
*
self
{
build
::
CleanupRet
(
from_bcx
,
pad
,
Some
(
to_llbb
));
from_bcx
.cleanup_ret
(
pad
,
Some
(
to_llbb
));
}
else
{
build
::
Br
(
from_bcx
,
to_llbb
,
DebugLoc
::
None
);
from_bcx
.br
(
to_llbb
);
}
}
...
...
@@ -649,7 +648,7 @@ fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) {
fn
start
(
&
self
,
bcx
:
&
BlockAndBuilder
)
->
EarlyExitLabel
{
match
*
self
{
UnwindExit
(
UnwindKind
::
CleanupPad
(
..
))
=>
{
let
pad
=
b
uild
::
CleanupPad
(
bcx
,
None
,
&
[]);
let
pad
=
b
cx
.cleanup_pad
(
None
,
&
[]);
bcx
.set_lpad_ref
(
Some
(
bcx
.fcx
()
.lpad_arena
.alloc
(
LandingPad
::
msvc
(
pad
))));
UnwindExit
(
UnwindKind
::
CleanupPad
(
pad
))
}
...
...
@@ -683,10 +682,7 @@ pub struct DropValue<'tcx> {
}
impl
<
'tcx
>
DropValue
<
'tcx
>
{
fn
trans
<
'blk
>
(
&
self
,
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
debug_loc
:
DebugLoc
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
fn
trans
<
'blk
>
(
&
self
,
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
let
skip_dtor
=
self
.skip_dtor
;
let
_
icx
=
if
skip_dtor
{
base
::
push_ctxt
(
"<DropValue as Cleanup>::trans skip_dtor=true"
)
...
...
@@ -694,9 +690,9 @@ fn trans<'blk>(&self,
base
::
push_ctxt
(
"<DropValue as Cleanup>::trans skip_dtor=false"
)
};
if
self
.is_immediate
{
glue
::
drop_ty_immediate
(
bcx
,
self
.val
,
self
.ty
,
debug_loc
,
self
.skip_dtor
)
glue
::
drop_ty_immediate
(
bcx
,
self
.val
,
self
.ty
,
self
.skip_dtor
)
}
else
{
glue
::
drop_ty_core
(
bcx
,
self
.val
,
self
.ty
,
debug_loc
,
self
.skip_dtor
)
glue
::
drop_ty_core
(
bcx
,
self
.val
,
self
.ty
,
self
.skip_dtor
)
}
}
}
src/librustc_trans/common.rs
浏览文件 @
59ef51c1
...
...
@@ -26,12 +26,11 @@
use
rustc
::
ty
::
subst
::
Substs
;
use
abi
::{
Abi
,
FnType
};
use
base
;
use
build
;
use
builder
::
Builder
;
use
callee
::
Callee
;
use
cleanup
;
use
consts
;
use
debuginfo
::{
self
,
DebugLoc
}
;
use
debuginfo
;
use
declare
;
use
machine
;
use
monomorphize
;
...
...
@@ -434,6 +433,12 @@ pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
unwresume
.set
(
Some
(
llfn
));
Callee
::
ptr
(
llfn
,
ty
)
}
pub
fn
alloca
(
&
self
,
ty
:
Type
,
name
:
&
str
)
->
ValueRef
{
let
b
=
self
.ccx
.builder
();
b
.position_before
(
self
.alloca_insert_pt
.get
()
.unwrap
());
b
.alloca
(
ty
,
name
)
}
}
// Basic block context. We create a block context for each basic block
...
...
@@ -998,35 +1003,32 @@ pub fn langcall(tcx: TyCtxt,
pub
fn
build_unchecked_lshift
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
binop_debug_loc
:
DebugLoc
)
->
ValueRef
{
rhs
:
ValueRef
)
->
ValueRef
{
let
rhs
=
base
::
cast_shift_expr_rhs
(
bcx
,
hir
::
BinOp_
::
BiShl
,
lhs
,
rhs
);
// #1877, #10183: Ensure that input is always valid
let
rhs
=
shift_mask_rhs
(
bcx
,
rhs
,
binop_debug_loc
);
b
uild
::
Shl
(
bcx
,
lhs
,
rhs
,
binop_debug_loc
)
let
rhs
=
shift_mask_rhs
(
bcx
,
rhs
);
b
cx
.shl
(
lhs
,
rhs
)
}
pub
fn
build_unchecked_rshift
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
lhs_t
:
Ty
<
'tcx
>
,
lhs
:
ValueRef
,
rhs
:
ValueRef
,
binop_debug_loc
:
DebugLoc
)
->
ValueRef
{
rhs
:
ValueRef
)
->
ValueRef
{
let
rhs
=
base
::
cast_shift_expr_rhs
(
bcx
,
hir
::
BinOp_
::
BiShr
,
lhs
,
rhs
);
// #1877, #10183: Ensure that input is always valid
let
rhs
=
shift_mask_rhs
(
bcx
,
rhs
,
binop_debug_loc
);
let
rhs
=
shift_mask_rhs
(
bcx
,
rhs
);
let
is_signed
=
lhs_t
.is_signed
();
if
is_signed
{
b
uild
::
AShr
(
bcx
,
lhs
,
rhs
,
binop_debug_loc
)
b
cx
.ashr
(
lhs
,
rhs
)
}
else
{
b
uild
::
LShr
(
bcx
,
lhs
,
rhs
,
binop_debug_loc
)
b
cx
.lshr
(
lhs
,
rhs
)
}
}
fn
shift_mask_rhs
<
'blk
,
'tcx
>
(
bcx
:
&
BlockAndBuilder
<
'blk
,
'tcx
>
,
rhs
:
ValueRef
,
debug_loc
:
DebugLoc
)
->
ValueRef
{
rhs
:
ValueRef
)
->
ValueRef
{
let
rhs_llty
=
val_ty
(
rhs
);
b
uild
::
And
(
bcx
,
rhs
,
shift_mask_val
(
bcx
,
rhs_llty
,
rhs_llty
,
false
),
debug_loc
)
b
cx
.and
(
rhs
,
shift_mask_val
(
bcx
,
rhs_llty
,
rhs_llty
,
false
)
)
}
pub
fn
shift_mask_val
<
'blk
,
'tcx
>
(
...
...
@@ -1048,7 +1050,7 @@ pub fn shift_mask_val<'blk, 'tcx>(
},
TypeKind
::
Vector
=>
{
let
mask
=
shift_mask_val
(
bcx
,
llty
.element_type
(),
mask_llty
.element_type
(),
invert
);
b
uild
::
VectorSplat
(
bcx
,
mask_llty
.vector_length
(),
mask
)
b
cx
.vector_splat
(
mask_llty
.vector_length
(),
mask
)
},
_
=>
bug!
(
"shift_mask_val: expected Integer or Vector, found {:?}"
,
kind
),
}
...
...
src/librustc_trans/glue.rs
浏览文件 @
59ef51c1
...
...
@@ -22,10 +22,9 @@
use
rustc
::
ty
::{
self
,
AdtKind
,
Ty
,
TyCtxt
,
TypeFoldable
};
use
adt
;
use
base
::
*
;
use
build
::
*
;
use
callee
::{
Callee
};
use
builder
::
Builder
;
use
common
::
*
;
use
debuginfo
::
DebugLoc
;
use
machine
::
*
;
use
monomorphize
;
use
trans_item
::
TransItem
;
...
...
@@ -41,35 +40,28 @@
pub
fn
trans_exchange_free_dyn
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
size
:
ValueRef
,
align
:
ValueRef
,
debug_loc
:
DebugLoc
)
align
:
ValueRef
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"trans_exchange_free"
);
let
def_id
=
langcall
(
bcx
.tcx
(),
None
,
""
,
ExchangeFreeFnLangItem
);
let
args
=
[
PointerCast
(
&
bcx
,
v
,
Type
::
i8p
(
bcx
.ccx
())),
size
,
align
];
let
args
=
[
bcx
.pointercast
(
v
,
Type
::
i8p
(
bcx
.ccx
())),
size
,
align
];
Callee
::
def
(
bcx
.ccx
(),
def_id
,
bcx
.tcx
()
.intern_substs
(
&
[]))
.call
(
bcx
,
debug_loc
,
&
args
,
None
)
.0
.call
(
bcx
,
&
args
,
None
)
.0
}
pub
fn
trans_exchange_free
<
'blk
,
'tcx
>
(
cx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
size
:
u64
,
align
:
u32
,
debug_loc
:
DebugLoc
)
align
:
u32
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
let
ccx
=
cx
.ccx
();
trans_exchange_free_dyn
(
cx
,
v
,
C_uint
(
ccx
,
size
),
C_uint
(
ccx
,
align
),
debug_loc
)
trans_exchange_free_dyn
(
cx
,
v
,
C_uint
(
ccx
,
size
),
C_uint
(
ccx
,
align
))
}
pub
fn
trans_exchange_free_ty
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
ptr
:
ValueRef
,
content_ty
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
)
content_ty
:
Ty
<
'tcx
>
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
assert
!
(
type_is_sized
(
bcx
.ccx
()
.tcx
(),
content_ty
));
let
sizing_type
=
sizing_type_of
(
bcx
.ccx
(),
content_ty
);
...
...
@@ -78,7 +70,7 @@ pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
// `Box<ZeroSizeType>` does not allocate.
if
content_size
!=
0
{
let
content_align
=
align_of
(
bcx
.ccx
(),
content_ty
);
trans_exchange_free
(
bcx
,
ptr
,
content_size
,
content_align
,
debug_loc
)
trans_exchange_free
(
bcx
,
ptr
,
content_size
,
content_align
)
}
else
{
bcx
}
...
...
@@ -132,15 +124,13 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub
fn
drop_ty
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
drop_ty_core
(
bcx
,
v
,
t
,
debug_loc
,
false
)
t
:
Ty
<
'tcx
>
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
drop_ty_core
(
bcx
,
v
,
t
,
false
)
}
pub
fn
drop_ty_core
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
,
skip_dtor
:
bool
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
// NB: v is an *alias* of type t here, not a direct value.
...
...
@@ -156,13 +146,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
let
glue
=
get_drop_glue_core
(
ccx
,
g
);
let
glue_type
=
get_drop_glue_type
(
ccx
.tcx
(),
t
);
let
ptr
=
if
glue_type
!=
t
{
PointerCast
(
&
bcx
,
v
,
type_of
(
ccx
,
glue_type
)
.ptr_to
())
bcx
.pointercast
(
v
,
type_of
(
ccx
,
glue_type
)
.ptr_to
())
}
else
{
v
};
// No drop-hint ==> call standard drop glue
Call
(
&
bcx
,
glue
,
&
[
ptr
],
debug_loc
);
bcx
.call
(
glue
,
&
[
ptr
],
bcx
.lpad
()
.and_then
(|
b
|
b
.bundle
())
);
}
bcx
}
...
...
@@ -170,14 +160,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
pub
fn
drop_ty_immediate
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
v
:
ValueRef
,
t
:
Ty
<
'tcx
>
,
debug_loc
:
DebugLoc
,
skip_dtor
:
bool
)
->
BlockAndBuilder
<
'blk
,
'tcx
>
{
let
_
icx
=
push_ctxt
(
"drop_ty_immediate"
);
let
vp
=
alloc_ty
(
&
bcx
,
t
,
""
);
call_lifetime_start
(
&
bcx
,
vp
);
store_ty
(
&
bcx
,
v
,
vp
,
t
);
let
bcx
=
drop_ty_core
(
bcx
,
vp
,
t
,
debug_loc
,
skip_dtor
);
let
bcx
=
drop_ty_core
(
bcx
,
vp
,
t
,
skip_dtor
);
call_lifetime_end
(
&
bcx
,
vp
);
bcx
}
...
...
@@ -249,7 +238,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// type, so we don't need to explicitly cast the function parameter.
let
bcx
=
make_drop_glue
(
bcx
,
get_param
(
llfn
,
0
),
g
);
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
);
}
fn
trans_custom_dtor
<
'blk
,
'tcx
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
...
...
@@ -285,8 +274,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
}
else
{
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
unsized_args
=
[
Load
(
&
bcx
,
get_dataptr
(
&
bcx
,
v0
)),
Load
(
&
bcx
,
get_meta
(
&
bcx
,
v0
))
bcx
.load
(
get_dataptr
(
&
bcx
,
v0
)),
bcx
.load
(
get_meta
(
&
bcx
,
v0
))
];
&
unsized_args
};
...
...
@@ -301,7 +290,7 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
};
let
dtor_did
=
def
.destructor
()
.unwrap
();
bcx
=
Callee
::
def
(
bcx
.ccx
(),
dtor_did
,
vtbl
.substs
)
.call
(
bcx
,
DebugLoc
::
None
,
args
,
None
)
.0
;
.call
(
bcx
,
args
,
None
)
.0
;
bcx
.fcx
()
.pop_and_trans_custom_cleanup_scope
(
bcx
,
contents_scope
)
}
...
...
@@ -436,29 +425,27 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
assert
!
(
!
skip_dtor
);
if
!
type_is_sized
(
bcx
.tcx
(),
content_ty
)
{
let
llval
=
get_dataptr
(
&
bcx
,
v0
);
let
llbox
=
Load
(
&
bcx
,
llval
);
let
bcx
=
drop_ty
(
bcx
,
v0
,
content_ty
,
DebugLoc
::
None
);
let
llbox
=
bcx
.load
(
llval
);
let
bcx
=
drop_ty
(
bcx
,
v0
,
content_ty
);
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
let
info
=
get_meta
(
&
bcx
,
v0
);
let
info
=
Load
(
&
bcx
,
info
);
let
info
=
bcx
.load
(
info
);
let
(
llsize
,
llalign
)
=
size_and_align_of_dst
(
&
bcx
,
content_ty
,
info
);
// `Box<ZeroSizeType>` does not allocate.
let
needs_free
=
ICmp
(
&
bcx
,
let
needs_free
=
bcx
.icmp
(
llvm
::
IntNE
,
llsize
,
C_uint
(
bcx
.ccx
(),
0u64
),
DebugLoc
::
None
);
with_cond
(
bcx
,
needs_free
,
|
bcx
|
{
trans_exchange_free_dyn
(
bcx
,
llbox
,
llsize
,
llalign
,
DebugLoc
::
None
)
trans_exchange_free_dyn
(
bcx
,
llbox
,
llsize
,
llalign
)
})
}
else
{
let
llval
=
v0
;
let
llbox
=
Load
(
&
bcx
,
llval
);
let
bcx
=
drop_ty
(
bcx
,
llbox
,
content_ty
,
DebugLoc
::
None
);
trans_exchange_free_ty
(
bcx
,
llbox
,
content_ty
,
DebugLoc
::
None
)
let
llbox
=
bcx
.load
(
llval
);
let
bcx
=
drop_ty
(
bcx
,
llbox
,
content_ty
);
trans_exchange_free_ty
(
bcx
,
llbox
,
content_ty
)
}
}
ty
::
TyDynamic
(
..
)
=>
{
...
...
@@ -468,12 +455,11 @@ fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
assert
!
(
!
skip_dtor
);
let
data_ptr
=
get_dataptr
(
&
bcx
,
v0
);
let
vtable_ptr
=
Load
(
&
bcx
,
get_meta
(
&
bcx
,
v0
));
let
dtor
=
Load
(
&
bcx
,
vtable_ptr
);
Call
(
&
bcx
,
dtor
,
&
[
PointerCast
(
&
bcx
,
Load
(
&
bcx
,
data_ptr
),
Type
::
i8p
(
bcx
.ccx
()))],
DebugLoc
::
None
);
let
vtable_ptr
=
bcx
.load
(
get_meta
(
&
bcx
,
v0
));
let
dtor
=
bcx
.load
(
vtable_ptr
);
bcx
.call
(
dtor
,
&
[
bcx
.pointercast
(
bcx
.load
(
data_ptr
),
Type
::
i8p
(
bcx
.ccx
()))],
bcx
.lpad
()
.and_then
(|
b
|
b
.bundle
()));
bcx
}
ty
::
TyAdt
(
def
,
..
)
if
def
.dtor_kind
()
.is_present
()
&&
!
skip_dtor
=>
{
...
...
@@ -512,7 +498,7 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
for
(
i
,
field
)
in
variant
.fields
.iter
()
.enumerate
()
{
let
arg
=
monomorphize
::
field_ty
(
tcx
,
substs
,
field
);
let
field_ptr
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
av
,
Disr
::
from
(
variant
.disr_val
),
i
);
cx
=
drop_ty
(
cx
,
field_ptr
,
arg
,
DebugLoc
::
None
);
cx
=
drop_ty
(
cx
,
field_ptr
,
arg
);
}
return
cx
;
}
...
...
@@ -521,8 +507,8 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
adt
::
MaybeSizedValue
::
sized
(
av
)
}
else
{
// FIXME(#36457) -- we should pass unsized values as two arguments
let
data
=
Load
(
&
cx
,
get_dataptr
(
&
cx
,
av
));
let
info
=
Load
(
&
cx
,
get_meta
(
&
cx
,
av
));
let
data
=
cx
.load
(
get_dataptr
(
&
cx
,
av
));
let
info
=
cx
.load
(
get_meta
(
&
cx
,
av
));
adt
::
MaybeSizedValue
::
unsized_
(
data
,
info
)
};
...
...
@@ -531,7 +517,7 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
ty
::
TyClosure
(
def_id
,
substs
)
=>
{
for
(
i
,
upvar_ty
)
in
substs
.upvar_tys
(
def_id
,
cx
.tcx
())
.enumerate
()
{
let
llupvar
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
value
,
Disr
(
0
),
i
);
cx
=
drop_ty
(
cx
,
llupvar
,
upvar_ty
,
DebugLoc
::
None
);
cx
=
drop_ty
(
cx
,
llupvar
,
upvar_ty
);
}
}
ty
::
TyArray
(
_
,
n
)
=>
{
...
...
@@ -539,17 +525,17 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
let
len
=
C_uint
(
cx
.ccx
(),
n
);
let
unit_ty
=
t
.sequence_element_type
(
cx
.tcx
());
cx
=
tvec
::
slice_for_each
(
cx
,
base
,
unit_ty
,
len
,
|
bb
,
vv
|
drop_ty
(
bb
,
vv
,
unit_ty
,
DebugLoc
::
None
));
|
bb
,
vv
|
drop_ty
(
bb
,
vv
,
unit_ty
));
}
ty
::
TySlice
(
_
)
|
ty
::
TyStr
=>
{
let
unit_ty
=
t
.sequence_element_type
(
cx
.tcx
());
cx
=
tvec
::
slice_for_each
(
cx
,
value
.value
,
unit_ty
,
value
.meta
,
|
bb
,
vv
|
drop_ty
(
bb
,
vv
,
unit_ty
,
DebugLoc
::
None
));
|
bb
,
vv
|
drop_ty
(
bb
,
vv
,
unit_ty
));
}
ty
::
TyTuple
(
ref
args
)
=>
{
for
(
i
,
arg
)
in
args
.iter
()
.enumerate
()
{
let
llfld_a
=
adt
::
trans_field_ptr
(
&
cx
,
t
,
value
,
Disr
(
0
),
i
);
cx
=
drop_ty
(
cx
,
llfld_a
,
*
arg
,
DebugLoc
::
None
);
cx
=
drop_ty
(
cx
,
llfld_a
,
*
arg
);
}
}
ty
::
TyAdt
(
adt
,
substs
)
=>
match
adt
.adt_kind
()
{
...
...
@@ -563,11 +549,11 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
}
else
{
// FIXME(#36457) -- we should pass unsized values as two arguments
let
scratch
=
alloc_ty
(
&
cx
,
field_ty
,
"__fat_ptr_iter"
);
Store
(
&
cx
,
llfld_a
,
get_dataptr
(
&
cx
,
scratch
));
Store
(
&
cx
,
value
.meta
,
get_meta
(
&
cx
,
scratch
));
cx
.store
(
llfld_a
,
get_dataptr
(
&
cx
,
scratch
));
cx
.store
(
value
.meta
,
get_meta
(
&
cx
,
scratch
));
scratch
};
cx
=
drop_ty
(
cx
,
val
,
field_ty
,
DebugLoc
::
None
);
cx
=
drop_ty
(
cx
,
val
,
field_ty
);
}
}
AdtKind
::
Union
=>
{
...
...
@@ -591,7 +577,7 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
}
(
adt
::
BranchKind
::
Switch
,
Some
(
lldiscrim_a
))
=>
{
let
tcx
=
cx
.tcx
();
cx
=
drop_ty
(
cx
,
lldiscrim_a
,
tcx
.types.isize
,
DebugLoc
::
None
);
cx
=
drop_ty
(
cx
,
lldiscrim_a
,
tcx
.types.isize
);
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
...
...
@@ -607,8 +593,8 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
// call this for an already-valid enum in which case the `ret
// void` will never be hit.
let
ret_void_cx
=
fcx
.new_block
(
"enum-iter-ret-void"
)
.build
();
RetVoid
(
&
ret_void_cx
,
DebugLoc
::
None
);
let
llswitch
=
Switch
(
&
cx
,
lldiscrim_a
,
ret_void_cx
.llbb
(),
n_variants
);
ret_void_cx
.ret_void
(
);
let
llswitch
=
cx
.switch
(
lldiscrim_a
,
ret_void_cx
.llbb
(),
n_variants
);
let
next_cx
=
fcx
.new_block
(
"enum-iter-next"
)
.build
();
for
variant
in
&
adt
.variants
{
...
...
@@ -616,9 +602,9 @@ fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
&
variant
.disr_val
.to_string
());
let
variant_cx
=
fcx
.new_block
(
&
variant_cx_name
)
.build
();
let
case_val
=
adt
::
trans_case
(
&
cx
,
t
,
Disr
::
from
(
variant
.disr_val
));
AddC
ase
(
llswitch
,
case_val
,
variant_cx
.llbb
());
Builder
::
add_c
ase
(
llswitch
,
case_val
,
variant_cx
.llbb
());
let
variant_cx
=
iter_variant
(
variant_cx
,
t
,
value
,
variant
,
substs
);
Br
(
&
variant_cx
,
next_cx
.llbb
(),
DebugLoc
::
None
);
variant_cx
.br
(
next_cx
.llbb
()
);
}
cx
=
next_cx
;
}
...
...
src/librustc_trans/intrinsic.rs
浏览文件 @
59ef51c1
此差异已折叠。
点击以展开。
src/librustc_trans/lib.rs
浏览文件 @
59ef51c1
...
...
@@ -96,7 +96,6 @@ pub mod back {
mod
attributes
;
mod
base
;
mod
basic_block
;
mod
build
;
mod
builder
;
mod
cabi_aarch64
;
mod
cabi_arm
;
...
...
src/librustc_trans/meth.rs
浏览文件 @
59ef51c1
...
...
@@ -14,11 +14,9 @@
use
rustc
::
traits
;
use
abi
::
FnType
;
use
base
::
*
;
use
build
::
*
;
use
callee
::
Callee
;
use
common
::
*
;
use
consts
;
use
debuginfo
::
DebugLoc
;
use
declare
;
use
glue
;
use
machine
;
...
...
@@ -40,7 +38,7 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
debug!
(
"get_virtual_method(vtable_index={}, llvtable={:?})"
,
vtable_index
,
Value
(
llvtable
));
Load
(
bcx
,
GEPi
(
bcx
,
llvtable
,
&
[
vtable_index
+
VTABLE_OFFSET
]))
bcx
.load
(
bcx
.gepi
(
llvtable
,
&
[
vtable_index
+
VTABLE_OFFSET
]))
}
/// Generate a shim function that allows an object type like `SomeTrait` to
...
...
@@ -93,10 +91,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
let
dest
=
fcx
.llretslotptr
.get
();
let
llargs
=
get_params
(
fcx
.llfn
);
bcx
=
callee
.call
(
bcx
,
DebugLoc
::
None
,
&
llargs
[
fcx
.fn_ty.ret
.is_indirect
()
as
usize
..
],
dest
)
.0
;
bcx
=
callee
.call
(
bcx
,
&
llargs
[
fcx
.fn_ty.ret
.is_indirect
()
as
usize
..
],
dest
)
.0
;
fcx
.finish
(
&
bcx
,
DebugLoc
::
None
);
fcx
.finish
(
&
bcx
);
llfn
}
...
...
src/librustc_trans/mir/block.rs
浏览文件 @
59ef51c1
...
...
@@ -16,10 +16,10 @@
use
abi
::{
Abi
,
FnType
,
ArgType
};
use
adt
;
use
base
;
use
build
;
use
callee
::{
Callee
,
CalleeData
,
Fn
,
Intrinsic
,
NamedTupleConstructor
,
Virtual
};
use
common
::{
self
,
Block
,
BlockAndBuilder
,
LandingPad
};
use
common
::{
C_bool
,
C_str_slice
,
C_struct
,
C_u32
,
C_undef
};
use
builder
::
Builder
;
use
consts
;
use
debuginfo
::
DebugLoc
;
use
Disr
;
...
...
@@ -167,7 +167,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
if
default_bb
!=
Some
(
target
)
{
let
llbb
=
llblock
(
self
,
target
);
let
llval
=
adt
::
trans_case
(
&
bcx
,
ty
,
Disr
::
from
(
adt_variant
.disr_val
));
build
::
AddC
ase
(
switch
,
llval
,
llbb
)
Builder
::
add_c
ase
(
switch
,
llval
,
llbb
)
}
}
}
...
...
@@ -180,7 +180,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
for
(
value
,
target
)
in
values
.iter
()
.zip
(
targets
)
{
let
val
=
Const
::
from_constval
(
bcx
.ccx
(),
value
.clone
(),
switch_ty
);
let
llbb
=
llblock
(
self
,
*
target
);
build
::
AddC
ase
(
switch
,
val
.llval
,
llbb
)
Builder
::
add_c
ase
(
switch
,
val
.llval
,
llbb
)
}
}
...
...
@@ -204,7 +204,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
};
let
llslot
=
match
op
.val
{
Immediate
(
_
)
|
Pair
(
..
)
=>
{
let
llscratch
=
b
uild
::
AllocaFcx
(
bcx
.fcx
(),
ret
.original_ty
,
"ret"
);
let
llscratch
=
b
cx
.fcx
()
.alloca
(
ret
.original_ty
,
"ret"
);
self
.store_operand
(
&
bcx
,
llscratch
,
op
);
llscratch
}
...
...
@@ -257,8 +257,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
// I want to avoid touching all of trans.
let
scratch
=
base
::
alloc_ty
(
&
bcx
,
ty
,
"drop"
);
base
::
call_lifetime_start
(
&
bcx
,
scratch
);
b
uild
::
Store
(
&
bcx
,
lvalue
.llval
,
base
::
get_dataptr
(
&
bcx
,
scratch
));
b
uild
::
Store
(
&
bcx
,
lvalue
.llextra
,
base
::
get_meta
(
&
bcx
,
scratch
));
b
cx
.store
(
lvalue
.llval
,
base
::
get_dataptr
(
&
bcx
,
scratch
));
b
cx
.store
(
lvalue
.llextra
,
base
::
get_meta
(
&
bcx
,
scratch
));
scratch
};
if
let
Some
(
unwind
)
=
unwind
{
...
...
@@ -479,8 +479,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
// I want to avoid touching all of trans.
let
scratch
=
base
::
alloc_ty
(
&
bcx
,
ty
,
"drop"
);
base
::
call_lifetime_start
(
&
bcx
,
scratch
);
b
uild
::
Store
(
&
bcx
,
llval
,
base
::
get_dataptr
(
&
bcx
,
scratch
));
b
uild
::
Store
(
&
bcx
,
llextra
,
base
::
get_meta
(
&
bcx
,
scratch
));
b
cx
.store
(
llval
,
base
::
get_dataptr
(
&
bcx
,
scratch
));
b
cx
.store
(
llextra
,
base
::
get_meta
(
&
bcx
,
scratch
));
scratch
};
if
let
Some
(
unwind
)
=
*
cleanup
{
...
...
@@ -702,7 +702,7 @@ fn trans_argument(&mut self,
let
(
mut
llval
,
by_ref
)
=
match
op
.val
{
Immediate
(
_
)
|
Pair
(
..
)
=>
{
if
arg
.is_indirect
()
||
arg
.cast
.is_some
()
{
let
llscratch
=
b
uild
::
AllocaFcx
(
bcx
.fcx
(),
arg
.original_ty
,
"arg"
);
let
llscratch
=
b
cx
.fcx
()
.alloca
(
arg
.original_ty
,
"arg"
);
self
.store_operand
(
bcx
,
llscratch
,
op
);
(
llscratch
,
true
)
}
else
{
...
...
src/librustc_trans/mir/mod.rs
浏览文件 @
59ef51c1
...
...
@@ -478,10 +478,9 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
// environment into its components so it ends up out of bounds.
let
env_ptr
=
if
!
env_ref
{
use
base
::
*
;
use
build
::
*
;
use
common
::
*
;
let
alloc
=
alloca
(
bcx
,
val_ty
(
llval
),
"__debuginfo_env_ptr"
);
Store
(
bcx
,
llval
,
alloc
);
bcx
.store
(
llval
,
alloc
);
alloc
}
else
{
llval
...
...
src/librustc_trans/mir/operand.rs
浏览文件 @
59ef51c1
...
...
@@ -264,11 +264,10 @@ pub fn store_operand_direct(&mut self,
OperandValue
::
Ref
(
r
)
=>
base
::
memcpy_ty
(
bcx
,
lldest
,
r
,
operand
.ty
),
OperandValue
::
Immediate
(
s
)
=>
base
::
store_ty
(
bcx
,
s
,
lldest
,
operand
.ty
),
OperandValue
::
Pair
(
a
,
b
)
=>
{
use
build
::
*
;
let
a
=
base
::
from_immediate
(
bcx
,
a
);
let
b
=
base
::
from_immediate
(
bcx
,
b
);
Store
(
bcx
,
a
,
StructGEP
(
bcx
,
lldest
,
0
));
Store
(
bcx
,
b
,
StructGEP
(
bcx
,
lldest
,
1
));
bcx
.store
(
a
,
bcx
.struct_gep
(
lldest
,
0
));
bcx
.store
(
b
,
bcx
.struct_gep
(
lldest
,
1
));
}
}
}
...
...
src/librustc_trans/mir/rvalue.rs
浏览文件 @
59ef51c1
...
...
@@ -19,7 +19,6 @@
use
callee
::
Callee
;
use
common
::{
self
,
val_ty
,
C_bool
,
C_null
,
C_uint
,
BlockAndBuilder
};
use
common
::{
C_integral
};
use
debuginfo
::
DebugLoc
;
use
adt
;
use
machine
;
use
type_
::
Type
;
...
...
@@ -37,8 +36,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub
fn
trans_rvalue
(
&
mut
self
,
bcx
:
BlockAndBuilder
<
'bcx
,
'tcx
>
,
dest
:
LvalueRef
<
'tcx
>
,
rvalue
:
&
mir
::
Rvalue
<
'tcx
>
,
debug_loc
:
DebugLoc
)
rvalue
:
&
mir
::
Rvalue
<
'tcx
>
)
->
BlockAndBuilder
<
'bcx
,
'tcx
>
{
debug!
(
"trans_rvalue(dest.llval={:?}, rvalue={:?})"
,
...
...
@@ -59,7 +57,7 @@ pub fn trans_rvalue(&mut self,
if
common
::
type_is_fat_ptr
(
bcx
.tcx
(),
cast_ty
)
{
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let
(
bcx
,
temp
)
=
self
.trans_rvalue_operand
(
bcx
,
rvalue
,
debug_loc
);
let
(
bcx
,
temp
)
=
self
.trans_rvalue_operand
(
bcx
,
rvalue
);
self
.store_operand
(
&
bcx
,
dest
.llval
,
temp
);
return
bcx
;
}
...
...
@@ -171,7 +169,7 @@ pub fn trans_rvalue(&mut self,
_
=>
{
assert
!
(
rvalue_creates_operand
(
&
self
.mir
,
&
bcx
,
rvalue
));
let
(
bcx
,
temp
)
=
self
.trans_rvalue_operand
(
bcx
,
rvalue
,
debug_loc
);
let
(
bcx
,
temp
)
=
self
.trans_rvalue_operand
(
bcx
,
rvalue
);
self
.store_operand
(
&
bcx
,
dest
.llval
,
temp
);
bcx
}
...
...
@@ -180,8 +178,7 @@ pub fn trans_rvalue(&mut self,
pub
fn
trans_rvalue_operand
(
&
mut
self
,
bcx
:
BlockAndBuilder
<
'bcx
,
'tcx
>
,
rvalue
:
&
mir
::
Rvalue
<
'tcx
>
,
debug_loc
:
DebugLoc
)
rvalue
:
&
mir
::
Rvalue
<
'tcx
>
)
->
(
BlockAndBuilder
<
'bcx
,
'tcx
>
,
OperandRef
<
'tcx
>
)
{
assert
!
(
rvalue_creates_operand
(
&
self
.mir
,
&
bcx
,
rvalue
),
...
...
@@ -455,14 +452,7 @@ pub fn trans_rvalue_operand(&mut self,
let
llalign
=
C_uint
(
bcx
.ccx
(),
align
);
let
llty_ptr
=
llty
.ptr_to
();
let
box_ty
=
bcx
.tcx
()
.mk_box
(
content_ty
);
let
val
=
base
::
malloc_raw_dyn
(
&
bcx
,
llty_ptr
,
box_ty
,
llsize
,
llalign
,
debug_loc
);
let
val
=
base
::
malloc_raw_dyn
(
&
bcx
,
llty_ptr
,
box_ty
,
llsize
,
llalign
);
let
operand
=
OperandRef
{
val
:
OperandValue
::
Immediate
(
val
),
ty
:
box_ty
,
...
...
@@ -526,23 +516,8 @@ pub fn trans_scalar_binop(&mut self,
mir
::
BinOp
::
BitOr
=>
bcx
.or
(
lhs
,
rhs
),
mir
::
BinOp
::
BitAnd
=>
bcx
.and
(
lhs
,
rhs
),
mir
::
BinOp
::
BitXor
=>
bcx
.xor
(
lhs
,
rhs
),
mir
::
BinOp
::
Shl
=>
{
common
::
build_unchecked_lshift
(
&
bcx
,
lhs
,
rhs
,
DebugLoc
::
None
)
}
mir
::
BinOp
::
Shr
=>
{
common
::
build_unchecked_rshift
(
bcx
,
input_ty
,
lhs
,
rhs
,
DebugLoc
::
None
)
}
mir
::
BinOp
::
Shl
=>
common
::
build_unchecked_lshift
(
bcx
,
lhs
,
rhs
),
mir
::
BinOp
::
Shr
=>
common
::
build_unchecked_rshift
(
bcx
,
input_ty
,
lhs
,
rhs
),
mir
::
BinOp
::
Ne
|
mir
::
BinOp
::
Lt
|
mir
::
BinOp
::
Gt
|
mir
::
BinOp
::
Eq
|
mir
::
BinOp
::
Le
|
mir
::
BinOp
::
Ge
=>
if
is_nil
{
C_bool
(
bcx
.ccx
(),
match
op
{
...
...
src/librustc_trans/mir/statement.rs
浏览文件 @
59ef51c1
...
...
@@ -33,11 +33,10 @@ pub fn trans_statement(&mut self,
if
let
mir
::
Lvalue
::
Local
(
index
)
=
*
lvalue
{
match
self
.locals
[
index
]
{
LocalRef
::
Lvalue
(
tr_dest
)
=>
{
self
.trans_rvalue
(
bcx
,
tr_dest
,
rvalue
,
debug_loc
)
self
.trans_rvalue
(
bcx
,
tr_dest
,
rvalue
)
}
LocalRef
::
Operand
(
None
)
=>
{
let
(
bcx
,
operand
)
=
self
.trans_rvalue_operand
(
bcx
,
rvalue
,
debug_loc
);
let
(
bcx
,
operand
)
=
self
.trans_rvalue_operand
(
bcx
,
rvalue
);
self
.locals
[
index
]
=
LocalRef
::
Operand
(
Some
(
operand
));
bcx
}
...
...
@@ -51,13 +50,13 @@ pub fn trans_statement(&mut self,
}
else
{
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we translate the operand
self
.trans_rvalue_operand
(
bcx
,
rvalue
,
debug_loc
)
.0
self
.trans_rvalue_operand
(
bcx
,
rvalue
)
.0
}
}
}
}
else
{
let
tr_dest
=
self
.trans_lvalue
(
&
bcx
,
lvalue
);
self
.trans_rvalue
(
bcx
,
tr_dest
,
rvalue
,
debug_loc
)
self
.trans_rvalue
(
bcx
,
tr_dest
,
rvalue
)
}
}
mir
::
StatementKind
::
SetDiscriminant
{
ref
lvalue
,
variant_index
}
=>
{
...
...
src/librustc_trans/tvec.rs
浏览文件 @
59ef51c1
...
...
@@ -13,9 +13,8 @@
use
llvm
;
use
llvm
::
ValueRef
;
use
base
::
*
;
use
build
::
*
;
use
common
::
*
;
use
debuginfo
::
DebugLoc
;
use
builder
::
Builder
;
use
rustc
::
ty
::
Ty
;
pub
fn
slice_for_each
<
'blk
,
'tcx
,
F
>
(
bcx
:
BlockAndBuilder
<
'blk
,
'tcx
>
,
...
...
@@ -31,10 +30,10 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>,
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let
zst
=
type_is_zero_size
(
bcx
.ccx
(),
unit_ty
);
let
add
=
|
bcx
,
a
,
b
|
if
zst
{
Add
(
bcx
,
a
,
b
,
DebugLoc
::
None
)
let
add
=
|
bcx
:
&
BlockAndBuilder
,
a
,
b
|
if
zst
{
bcx
.add
(
a
,
b
)
}
else
{
InBoundsGEP
(
bcx
,
a
,
&
[
b
])
bcx
.inbounds_gep
(
a
,
&
[
b
])
};
let
body_bcx
=
fcx
.new_block
(
"slice_loop_body"
)
.build
();
...
...
@@ -42,28 +41,27 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>,
let
header_bcx
=
fcx
.new_block
(
"slice_loop_header"
)
.build
();
let
start
=
if
zst
{
C_uint
(
bcx
.ccx
(),
0
as
usize
)
C_uint
(
bcx
.ccx
(),
0u
size
)
}
else
{
data_ptr
};
let
end
=
add
(
&
bcx
,
start
,
len
);
Br
(
&
bcx
,
header_bcx
.llbb
(),
DebugLoc
::
None
);
let
current
=
Phi
(
&
header_bcx
,
val_ty
(
start
),
&
[
start
],
&
[
bcx
.llbb
()]);
bcx
.br
(
header_bcx
.llbb
()
);
let
current
=
header_bcx
.phi
(
val_ty
(
start
),
&
[
start
],
&
[
bcx
.llbb
()]);
let
keep_going
=
ICmp
(
&
header_bcx
,
llvm
::
IntNE
,
current
,
end
,
DebugLoc
::
None
);
CondBr
(
&
header_bcx
,
keep_going
,
body_bcx
.llbb
(),
next_bcx
.llbb
(),
DebugLoc
::
None
);
let
keep_going
=
header_bcx
.icmp
(
llvm
::
IntNE
,
current
,
end
);
header_bcx
.cond_br
(
keep_going
,
body_bcx
.llbb
(),
next_bcx
.llbb
());
let
body_bcx
=
f
(
body_bcx
,
if
zst
{
data_ptr
}
else
{
current
});
// FIXME(simulacrum): The code below is identical to the closure (add) above, but using the
// closure doesn't compile due to body_bcx still being borrowed when dropped.
let
next
=
if
zst
{
Add
(
&
body_bcx
,
current
,
C_uint
(
bcx
.ccx
(),
1u
size
),
DebugLoc
::
None
)
body_bcx
.add
(
current
,
C_uint
(
bcx
.ccx
(),
1u
size
)
)
}
else
{
InBoundsGEP
(
&
body_bcx
,
current
,
&
[
C_uint
(
bcx
.ccx
(),
1u
size
)])
body_bcx
.inbounds_gep
(
current
,
&
[
C_uint
(
bcx
.ccx
(),
1u
size
)])
};
AddIncomingToP
hi
(
current
,
next
,
body_bcx
.llbb
());
Br
(
&
body_bcx
,
header_bcx
.llbb
(),
DebugLoc
::
None
);
Builder
::
add_incoming_to_p
hi
(
current
,
next
,
body_bcx
.llbb
());
body_bcx
.br
(
header_bcx
.llbb
()
);
next_bcx
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录