提交 bf7d4534 编写于 作者: M Mark-Simulacrum 提交者: Mark Simulacrum

Refactor Block into BlockAndBuilder

上级 164619a8
...@@ -304,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>> ...@@ -304,7 +304,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
/// Obtain a representation of the discriminant sufficient to translate /// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant. /// destructuring; this may or may not involve the actual discriminant.
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_switch<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
scrutinee: ValueRef, scrutinee: ValueRef,
range_assert: bool) range_assert: bool)
...@@ -331,7 +331,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { ...@@ -331,7 +331,7 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
} }
/// Obtain the actual discriminant of a value. /// Obtain the actual discriminant of a value.
pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, pub fn trans_get_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>,
scrutinee: ValueRef, cast_to: Option<Type>, scrutinee: ValueRef, cast_to: Option<Type>,
range_assert: bool) range_assert: bool)
-> ValueRef { -> ValueRef {
...@@ -371,8 +371,12 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, ...@@ -371,8 +371,12 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
} }
} }
fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath, fn struct_wrapped_nullable_bitdiscr(
scrutinee: ValueRef) -> ValueRef { bcx: &BlockAndBuilder,
nndiscr: u64,
discrfield: &layout::FieldPath,
scrutinee: ValueRef
) -> ValueRef {
let llptrptr = GEPi(bcx, scrutinee, let llptrptr = GEPi(bcx, scrutinee,
&discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]); &discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]);
let llptr = Load(bcx, llptrptr); let llptr = Load(bcx, llptrptr);
...@@ -381,7 +385,7 @@ fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layou ...@@ -381,7 +385,7 @@ fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layou
} }
/// Helper for cases where the discriminant is simply loaded. /// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
range_assert: bool) range_assert: bool)
-> ValueRef { -> ValueRef {
let llty = Type::from_integer(bcx.ccx(), ity); let llty = Type::from_integer(bcx.ccx(), ity);
...@@ -409,7 +413,7 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6 ...@@ -409,7 +413,7 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6
/// discriminant-like value returned by `trans_switch`. /// discriminant-like value returned by `trans_switch`.
/// ///
/// This should ideally be less tightly tied to `_match`. /// This should ideally be less tightly tied to `_match`.
pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) pub fn trans_case<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
-> ValueRef { -> ValueRef {
let l = bcx.ccx().layout_of(t); let l = bcx.ccx().layout_of(t);
match *l { match *l {
...@@ -430,7 +434,7 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) ...@@ -430,7 +434,7 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
/// Set the discriminant for a new value of the given case of the given /// Set the discriminant for a new value of the given case of the given
/// representation. /// representation.
pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, pub fn trans_set_discr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>,
val: ValueRef, to: Disr) { val: ValueRef, to: Disr) {
let l = bcx.ccx().layout_of(t); let l = bcx.ccx().layout_of(t);
match *l { match *l {
...@@ -461,12 +465,11 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, ...@@ -461,12 +465,11 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
// Issue #34427: As workaround for LLVM bug on // Issue #34427: As workaround for LLVM bug on
// ARM, use memset of 0 on whole struct rather // ARM, use memset of 0 on whole struct rather
// than storing null to single target field. // than storing null to single target field.
let b = B(bcx); let llptr = bcx.pointercast(val, Type::i8(bcx.ccx()).ptr_to());
let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to()); let fill_byte = C_u8(bcx.ccx(), 0);
let fill_byte = C_u8(b.ccx, 0); let size = C_uint(bcx.ccx(), nonnull.stride().bytes());
let size = C_uint(b.ccx, nonnull.stride().bytes()); let align = C_i32(bcx.ccx(), nonnull.align.abi() as i32);
let align = C_i32(b.ccx, nonnull.align.abi() as i32); base::call_memset(bcx, llptr, fill_byte, size, align, false);
base::call_memset(&b, llptr, fill_byte, size, align, false);
} else { } else {
let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>(); let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
let llptrptr = GEPi(bcx, val, &path[..]); let llptrptr = GEPi(bcx, val, &path[..]);
...@@ -479,7 +482,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, ...@@ -479,7 +482,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
} }
} }
fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool { fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>) -> bool {
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
} }
...@@ -492,9 +495,9 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { ...@@ -492,9 +495,9 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
} }
/// Access a field, at a point when the value's case is known. /// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, pub fn trans_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, t: Ty<'tcx>,
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
trans_field_ptr_builder(&bcx.build(), t, val, discr, ix) trans_field_ptr_builder(bcx, t, val, discr, ix)
} }
/// Access a field, at a point when the value's case is known. /// Access a field, at a point when the value's case is known.
...@@ -530,7 +533,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ...@@ -530,7 +533,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
layout::UntaggedUnion { .. } => { layout::UntaggedUnion { .. } => {
let fields = compute_fields(bcx.ccx(), t, 0, false); let fields = compute_fields(bcx.ccx(), t, 0, false);
let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]);
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
bcx.pointercast(val.value, ty.ptr_to()) bcx.pointercast(val.value, ty.ptr_to())
} }
layout::RawNullablePointer { nndiscr, .. } | layout::RawNullablePointer { nndiscr, .. } |
...@@ -540,9 +542,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ...@@ -540,9 +542,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// (e.d., Result of Either with (), as one side.) // (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
// The contents of memory at this pointer can't matter, but use
// the value that's "reasonable" in case of pointer comparison.
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
bcx.pointercast(val.value, ty.ptr_to()) bcx.pointercast(val.value, ty.ptr_to())
} }
layout::RawNullablePointer { nndiscr, .. } => { layout::RawNullablePointer { nndiscr, .. } => {
...@@ -550,7 +549,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ...@@ -550,7 +549,6 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
assert_eq!(ix, 0); assert_eq!(ix, 0);
assert_eq!(discr.0, nndiscr); assert_eq!(discr.0, nndiscr);
let ty = type_of::type_of(bcx.ccx(), nnty); let ty = type_of::type_of(bcx.ccx(), nnty);
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
bcx.pointercast(val.value, ty.ptr_to()) bcx.pointercast(val.value, ty.ptr_to())
} }
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
...@@ -569,9 +567,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ...@@ -569,9 +567,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
let fty = fields[ix]; let fty = fields[ix];
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
if bcx.is_unreachable() {
return C_undef(ll_fty.ptr_to());
}
let ptr_val = if needs_cast { let ptr_val = if needs_cast {
let fields = st.field_index_by_increasing_offset().map(|i| { let fields = st.field_index_by_increasing_offset().map(|i| {
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM // Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_inline_asm<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
ia: &hir::InlineAsm, ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>, outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>) { mut inputs: Vec<ValueRef>) {
......
...@@ -54,11 +54,10 @@ ...@@ -54,11 +54,10 @@
use build::*; use build::*;
use builder::{Builder, noname}; use builder::{Builder, noname};
use callee::{Callee}; use callee::{Callee};
use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint}; use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode}; use collector::{self, TransItemCollectionMode};
use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
use common::{CrateContext, FunctionContext}; use common::{CrateContext, FunctionContext};
use common::{Result};
use common::{fulfill_obligation}; use common::{fulfill_obligation};
use common::{type_is_zero_size, val_ty}; use common::{type_is_zero_size, val_ty};
use common; use common;
...@@ -174,11 +173,11 @@ fn drop(&mut self) { ...@@ -174,11 +173,11 @@ fn drop(&mut self) {
} }
} }
pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { pub fn get_meta(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef {
StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
} }
pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { pub fn get_dataptr(bcx: &BlockAndBuilder, fat_ptr: ValueRef) -> ValueRef {
StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
} }
...@@ -190,7 +189,9 @@ pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { ...@@ -190,7 +189,9 @@ pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
} }
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { fn require_alloc_fn<'blk, 'tcx>(
bcx: &BlockAndBuilder<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem
) -> DefId {
match bcx.tcx().lang_items.require(it) { match bcx.tcx().lang_items.require(it) {
Ok(id) => id, Ok(id) => id,
Err(s) => { Err(s) => {
...@@ -202,21 +203,19 @@ fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: L ...@@ -202,21 +203,19 @@ fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: L
// The following malloc_raw_dyn* functions allocate a box to contain // The following malloc_raw_dyn* functions allocate a box to contain
// a given type, but with a potentially dynamic size. // a given type, but with a potentially dynamic size.
pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
llty_ptr: Type, llty_ptr: Type,
info_ty: Ty<'tcx>, info_ty: Ty<'tcx>,
size: ValueRef, size: ValueRef,
align: ValueRef, align: ValueRef,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Result<'blk, 'tcx> { -> ValueRef {
let _icx = push_ctxt("malloc_raw_exchange"); let _icx = push_ctxt("malloc_raw_exchange");
// Allocate space: // Allocate space:
let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])).reify(bcx.ccx());
.call(bcx, debug_loc, &[size, align], None); PointerCast(bcx, Call(bcx, r, &[size, align], debug_loc), llty_ptr)
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
} }
...@@ -254,7 +253,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { ...@@ -254,7 +253,7 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
} }
} }
pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn compare_simd_types<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef, rhs: ValueRef,
t: Ty<'tcx>, t: Ty<'tcx>,
...@@ -311,7 +310,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, ...@@ -311,7 +310,7 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
} }
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
src: ValueRef, src: ValueRef,
src_ty: Ty<'tcx>, src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>) dst_ty: Ty<'tcx>)
...@@ -336,7 +335,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -336,7 +335,7 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
/// Coerce `src`, which is a reference to a value of type `src_ty`, /// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst` /// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn coerce_unsized_into<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
src: ValueRef, src: ValueRef,
src_ty: Ty<'tcx>, src_ty: Ty<'tcx>,
dst: ValueRef, dst: ValueRef,
...@@ -415,7 +414,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx ...@@ -415,7 +414,7 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx
} }
} }
pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { pub fn cast_shift_expr_rhs(cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b)) cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
} }
...@@ -462,38 +461,38 @@ fn cast_shift_rhs<F, G>(op: hir::BinOp_, ...@@ -462,38 +461,38 @@ fn cast_shift_rhs<F, G>(op: hir::BinOp_,
} }
} }
pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn invoke<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
llfn: ValueRef, llfn: ValueRef,
llargs: &[ValueRef], llargs: &[ValueRef],
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> (ValueRef, Block<'blk, 'tcx>) { -> (ValueRef, BlockAndBuilder<'blk, 'tcx>) {
let _icx = push_ctxt("invoke_"); let _icx = push_ctxt("invoke_");
if bcx.unreachable.get() { if bcx.is_unreachable() {
return (C_null(Type::i8(bcx.ccx())), bcx); return (C_null(Type::i8(bcx.ccx())), bcx);
} }
if need_invoke(bcx) { if need_invoke(&bcx) {
debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb());
for &llarg in llargs { for &llarg in llargs {
debug!("arg: {:?}", Value(llarg)); debug!("arg: {:?}", Value(llarg));
} }
let normal_bcx = bcx.fcx.new_block("normal-return"); let normal_bcx = bcx.fcx().new_block("normal-return");
let landing_pad = bcx.fcx.get_landing_pad(); let landing_pad = bcx.fcx().get_landing_pad();
let llresult = Invoke(bcx, let llresult = Invoke(&bcx,
llfn, llfn,
&llargs[..], &llargs[..],
normal_bcx.llbb, normal_bcx.llbb,
landing_pad, landing_pad,
debug_loc); debug_loc);
return (llresult, normal_bcx); return (llresult, normal_bcx.build());
} else { } else {
debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb); debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb());
for &llarg in llargs { for &llarg in llargs {
debug!("arg: {:?}", Value(llarg)); debug!("arg: {:?}", Value(llarg));
} }
let llresult = Call(bcx, llfn, &llargs[..], debug_loc); let llresult = Call(&bcx, llfn, &llargs[..], debug_loc);
return (llresult, bcx); return (llresult, bcx);
} }
} }
...@@ -507,15 +506,11 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { ...@@ -507,15 +506,11 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc sess.target.target.options.is_like_msvc
} }
pub fn avoid_invoke(bcx: Block) -> bool { fn need_invoke(bcx: &BlockAndBuilder) -> bool {
bcx.sess().no_landing_pads() || bcx.lpad().is_some() if bcx.sess().no_landing_pads() || bcx.lpad().is_some() {
}
pub fn need_invoke(bcx: Block) -> bool {
if avoid_invoke(bcx) {
false false
} else { } else {
bcx.fcx.needs_invoke() bcx.fcx().needs_invoke()
} }
} }
...@@ -527,11 +522,8 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { ...@@ -527,11 +522,8 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type /// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading. /// gives us better information about what we are loading.
pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { pub fn load_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
if cx.unreachable.get() { load_ty_builder(cx, ptr, t)
return C_undef(type_of::type_of(cx.ccx(), t));
}
load_ty_builder(&B(cx), ptr, t)
} }
pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
...@@ -569,8 +561,8 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc ...@@ -569,8 +561,8 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. /// differs from the type used for SSA values.
pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { pub fn store_ty<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
if cx.unreachable.get() { if cx.is_unreachable() {
return; return;
} }
...@@ -585,7 +577,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t ...@@ -585,7 +577,7 @@ pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t
} }
} }
pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, pub fn store_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>,
data: ValueRef, data: ValueRef,
extra: ValueRef, extra: ValueRef,
dst: ValueRef, dst: ValueRef,
...@@ -595,18 +587,18 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -595,18 +587,18 @@ pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
Store(cx, extra, get_meta(cx, dst)); Store(cx, extra, get_meta(cx, dst));
} }
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, pub fn load_fat_ptr<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>,
src: ValueRef, src: ValueRef,
ty: Ty<'tcx>) ty: Ty<'tcx>)
-> (ValueRef, ValueRef) -> (ValueRef, ValueRef)
{ {
if cx.unreachable.get() { if cx.is_unreachable() {
// FIXME: remove me // FIXME: remove me
return (Load(cx, get_dataptr(cx, src)), return (Load(cx, get_dataptr(cx, src)),
Load(cx, get_meta(cx, src))); Load(cx, get_meta(cx, src)));
} }
load_fat_ptr_builder(&B(cx), src, ty) load_fat_ptr_builder(cx, src, ty)
} }
pub fn load_fat_ptr_builder<'a, 'tcx>( pub fn load_fat_ptr_builder<'a, 'tcx>(
...@@ -629,7 +621,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>( ...@@ -629,7 +621,7 @@ pub fn load_fat_ptr_builder<'a, 'tcx>(
(ptr, meta) (ptr, meta)
} }
pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef {
if val_ty(val) == Type::i1(bcx.ccx()) { if val_ty(val) == Type::i1(bcx.ccx()) {
ZExt(bcx, val, Type::i8(bcx.ccx())) ZExt(bcx, val, Type::i8(bcx.ccx()))
} else { } else {
...@@ -637,7 +629,7 @@ pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { ...@@ -637,7 +629,7 @@ pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
} }
} }
pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef {
if ty.is_bool() { if ty.is_bool() {
Trunc(bcx, val, Type::i1(bcx.ccx())) Trunc(bcx, val, Type::i1(bcx.ccx()))
} else { } else {
...@@ -645,23 +637,23 @@ pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { ...@@ -645,23 +637,23 @@ pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
} }
} }
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> pub fn with_cond<'blk, 'tcx, F>(
where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> bcx: BlockAndBuilder<'blk, 'tcx>, val: ValueRef, f: F
) -> BlockAndBuilder<'blk, 'tcx>
where F: FnOnce(BlockAndBuilder<'blk, 'tcx>) -> BlockAndBuilder<'blk, 'tcx>
{ {
let _icx = push_ctxt("with_cond"); let _icx = push_ctxt("with_cond");
if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { if bcx.is_unreachable() || common::const_to_opt_uint(val) == Some(0) {
return bcx; return bcx;
} }
let fcx = bcx.fcx; let fcx = bcx.fcx();
let next_cx = fcx.new_block("next"); let next_cx = fcx.new_block("next").build();
let cond_cx = fcx.new_block("cond"); let cond_cx = fcx.new_block("cond").build();
CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); CondBr(&bcx, val, cond_cx.llbb(), next_cx.llbb(), DebugLoc::None);
let after_cx = f(cond_cx); let after_cx = f(cond_cx);
if !after_cx.terminated.get() { Br(&after_cx, next_cx.llbb(), DebugLoc::None);
Br(after_cx, next_cx.llbb, DebugLoc::None);
}
next_cx next_cx
} }
...@@ -711,26 +703,25 @@ pub fn call(self, b: &Builder, ptr: ValueRef) { ...@@ -711,26 +703,25 @@ pub fn call(self, b: &Builder, ptr: ValueRef) {
} }
} }
pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) { pub fn call_lifetime_start(bcx: &BlockAndBuilder, ptr: ValueRef) {
if !bcx.unreachable.get() { if !bcx.is_unreachable() {
Lifetime::Start.call(&bcx.build(), ptr); Lifetime::Start.call(bcx, ptr);
} }
} }
pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) { pub fn call_lifetime_end(bcx: &BlockAndBuilder, ptr: ValueRef) {
if !bcx.unreachable.get() { if !bcx.is_unreachable() {
Lifetime::End.call(&bcx.build(), ptr); Lifetime::End.call(bcx, ptr);
} }
} }
// Generates code for resumption of unwind at the end of a landing pad. // Generates code for resumption of unwind at the end of a landing pad.
pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { pub fn trans_unwind_resume(bcx: &BlockAndBuilder, lpval: ValueRef) {
if !bcx.sess().target.target.options.custom_unwind_resume { if !bcx.sess().target.target.options.custom_unwind_resume {
Resume(bcx, lpval); bcx.resume(lpval);
} else { } else {
let exc_ptr = ExtractValue(bcx, lpval, 0); let exc_ptr = ExtractValue(bcx, lpval, 0);
bcx.fcx.eh_unwind_resume() Call(bcx, bcx.fcx().eh_unwind_resume().reify(bcx.ccx()), &[exc_ptr], DebugLoc::None);
.call(bcx, DebugLoc::None, &[exc_ptr], None);
} }
} }
...@@ -752,11 +743,11 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, ...@@ -752,11 +743,11 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
} }
pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { pub fn memcpy_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
let _icx = push_ctxt("memcpy_ty"); let _icx = push_ctxt("memcpy_ty");
let ccx = bcx.ccx(); let ccx = bcx.ccx();
if type_is_zero_size(ccx, t) || bcx.unreachable.get() { if type_is_zero_size(ccx, t) || bcx.is_unreachable() {
return; return;
} }
...@@ -764,7 +755,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe ...@@ -764,7 +755,7 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe
let llty = type_of::type_of(ccx, t); let llty = type_of::type_of(ccx, t);
let llsz = llsize_of(ccx, llty); let llsz = llsize_of(ccx, llty);
let llalign = type_of::align_of(ccx, t); let llalign = type_of::align_of(ccx, t);
call_memcpy(&B(bcx), dst, src, llsz, llalign as u32); call_memcpy(bcx, dst, src, llsz, llalign as u32);
} else if common::type_is_fat_ptr(bcx.tcx(), t) { } else if common::type_is_fat_ptr(bcx.tcx(), t) {
let (data, extra) = load_fat_ptr(bcx, src, t); let (data, extra) = load_fat_ptr(bcx, src, t);
store_fat_ptr(bcx, data, extra, dst, t); store_fat_ptr(bcx, data, extra, dst, t);
...@@ -773,13 +764,13 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe ...@@ -773,13 +764,13 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe
} }
} }
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { pub fn init_zero_mem<'blk, 'tcx>(cx: &BlockAndBuilder<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
if cx.unreachable.get() { if cx.is_unreachable() {
return; return;
} }
let _icx = push_ctxt("init_zero_mem"); let _icx = push_ctxt("init_zero_mem");
let bcx = cx; let bcx = cx;
memfill(&B(bcx), llptr, t, 0); memfill(bcx, llptr, t, 0);
} }
// Always use this function instead of storing a constant byte to the memory // Always use this function instead of storing a constant byte to the memory
...@@ -812,24 +803,17 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, ...@@ -812,24 +803,17 @@ pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
} }
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn alloc_ty<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
ty: Ty<'tcx>, ty: Ty<'tcx>,
name: &str) -> ValueRef { name: &str) -> ValueRef {
assert!(!ty.has_param_types()); assert!(!ty.has_param_types());
alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) alloca(bcx, type_of::type_of(bcx.ccx(), ty), name)
} }
pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { pub fn alloca(cx: &BlockAndBuilder, ty: Type, name: &str) -> ValueRef {
let _icx = push_ctxt("alloca"); let _icx = push_ctxt("alloca");
if cx.unreachable.get() { DebugLoc::None.apply(cx.fcx());
unsafe { Alloca(cx, ty, name)
return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
}
}
DebugLoc::None.apply(cx.fcx);
let result = Alloca(cx, ty, name);
debug!("alloca({:?}) = {:?}", name, result);
result
} }
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
...@@ -894,14 +878,14 @@ pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, ...@@ -894,14 +878,14 @@ pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
/// Performs setup on a newly created function, creating the entry /// Performs setup on a newly created function, creating the entry
/// scope block and allocating space for the return pointer. /// scope block and allocating space for the return pointer.
pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { pub fn init(&'blk self, skip_retptr: bool) -> BlockAndBuilder<'blk, 'tcx> {
let entry_bcx = self.new_block("entry-block"); let entry_bcx = self.new_block("entry-block").build();
// Use a dummy instruction as the insertion point for all allocas. // Use a dummy instruction as the insertion point for all allocas.
// This is later removed in FunctionContext::cleanup. // This is later removed in FunctionContext::cleanup.
self.alloca_insert_pt.set(Some(unsafe { self.alloca_insert_pt.set(Some(unsafe {
Load(entry_bcx, C_null(Type::i8p(self.ccx))); Load(&entry_bcx, C_null(Type::i8p(self.ccx)));
llvm::LLVMGetFirstInstruction(entry_bcx.llbb) llvm::LLVMGetFirstInstruction(entry_bcx.llbb())
})); }));
if !self.fn_ty.ret.is_ignore() && !skip_retptr { if !self.fn_ty.ret.is_ignore() && !skip_retptr {
...@@ -929,7 +913,7 @@ pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { ...@@ -929,7 +913,7 @@ pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> {
/// Ties up the llstaticallocas -> llloadenv -> lltop edges, /// Ties up the llstaticallocas -> llloadenv -> lltop edges,
/// and builds the return block. /// and builds the return block.
pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, pub fn finish(&'blk self, ret_cx: &BlockAndBuilder<'blk, 'tcx>,
ret_debug_loc: DebugLoc) { ret_debug_loc: DebugLoc) {
let _icx = push_ctxt("FunctionContext::finish"); let _icx = push_ctxt("FunctionContext::finish");
...@@ -940,10 +924,9 @@ pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, ...@@ -940,10 +924,9 @@ pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>,
} }
// Builds the return block for a function. // Builds the return block for a function.
pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>, pub fn build_return_block(&self, ret_cx: &BlockAndBuilder<'blk, 'tcx>,
ret_debug_location: DebugLoc) { ret_debug_location: DebugLoc) {
if self.llretslotptr.get().is_none() || if self.llretslotptr.get().is_none() || ret_cx.is_unreachable() ||
ret_cx.unreachable.get() ||
self.fn_ty.ret.is_indirect() { self.fn_ty.ret.is_indirect() {
return RetVoid(ret_cx, ret_debug_location); return RetVoid(ret_cx, ret_debug_location);
} }
...@@ -978,7 +961,7 @@ pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>, ...@@ -978,7 +961,7 @@ pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>,
assert_eq!(cast_ty, None); assert_eq!(cast_ty, None);
let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty); let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty);
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
call_memcpy(&B(ret_cx), get_param(self.llfn, 0), call_memcpy(&ret_cx, get_param(self.llfn, 0),
retslot, llsz, llalign as u32); retslot, llsz, llalign as u32);
RetVoid(ret_cx, ret_debug_location) RetVoid(ret_cx, ret_debug_location)
} }
...@@ -1080,23 +1063,22 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ...@@ -1080,23 +1063,22 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
let mut arg_idx = 0; let mut arg_idx = 0;
for (i, arg_ty) in sig.inputs().iter().enumerate() { for (i, arg_ty) in sig.inputs().iter().enumerate() {
let lldestptr = adt::trans_field_ptr(bcx, sig.output(), dest_val, Disr::from(disr), i); let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i);
let arg = &fcx.fn_ty.args[arg_idx]; let arg = &fcx.fn_ty.args[arg_idx];
arg_idx += 1; arg_idx += 1;
let b = &bcx.build();
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
let meta = &fcx.fn_ty.args[arg_idx]; let meta = &fcx.fn_ty.args[arg_idx];
arg_idx += 1; arg_idx += 1;
arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr)); arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr));
meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr)); meta.store_fn_arg(&bcx, &mut llarg_idx, get_meta(&bcx, lldestptr));
} else { } else {
arg.store_fn_arg(b, &mut llarg_idx, lldestptr); arg.store_fn_arg(&bcx, &mut llarg_idx, lldestptr);
} }
} }
adt::trans_set_discr(bcx, sig.output(), dest, disr); adt::trans_set_discr(&bcx, sig.output(), dest, disr);
} }
fcx.finish(bcx, DebugLoc::None); fcx.finish(&bcx, DebugLoc::None);
} }
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> { pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
......
此差异已折叠。
...@@ -26,7 +26,9 @@ ...@@ -26,7 +26,9 @@
use base; use base;
use base::*; use base::*;
use build::*; use build::*;
use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext}; use common::{
self, Block, BlockAndBuilder, CrateContext, FunctionContext, SharedCrateContext
};
use consts; use consts;
use debuginfo::DebugLoc; use debuginfo::DebugLoc;
use declare; use declare;
...@@ -207,11 +209,11 @@ pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, ...@@ -207,11 +209,11 @@ pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
/// For non-lang items, `dest` is always Some, and hence the result is written /// For non-lang items, `dest` is always Some, and hence the result is written
/// into memory somewhere. Nonetheless we return the actual return value of the /// into memory somewhere. Nonetheless we return the actual return value of the
/// function. /// function.
pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>, pub fn call<'a, 'blk>(self, bcx: BlockAndBuilder<'blk, 'tcx>,
debug_loc: DebugLoc, debug_loc: DebugLoc,
args: &[ValueRef], args: &[ValueRef],
dest: Option<ValueRef>) dest: Option<ValueRef>)
-> Result<'blk, 'tcx> { -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) {
trans_call_inner(bcx, debug_loc, self, args, dest) trans_call_inner(bcx, debug_loc, self, args, dest)
} }
...@@ -370,8 +372,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( ...@@ -370,8 +372,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let (block_arena, fcx): (TypedArena<_>, FunctionContext); let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new(); block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false); let bcx = fcx.init(false);
// the first argument (`self`) will be the (by value) closure env. // the first argument (`self`) will be the (by value) closure env.
...@@ -381,9 +382,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( ...@@ -381,9 +382,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let llenv = if env_arg.is_indirect() { let llenv = if env_arg.is_indirect() {
llargs[self_idx] llargs[self_idx]
} else { } else {
let scratch = alloc_ty(bcx, closure_ty, "self"); let scratch = alloc_ty(&bcx, closure_ty, "self");
let mut llarg_idx = self_idx; let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch); env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
scratch scratch
}; };
...@@ -413,11 +414,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( ...@@ -413,11 +414,11 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let self_scope = fcx.push_custom_cleanup_scope(); let self_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_drop_mem(self_scope, llenv, closure_ty); fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx; let bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).0;
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); let bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
fcx.finish(bcx, DebugLoc::None); fcx.finish(&bcx, DebugLoc::None);
ccx.instances().borrow_mut().insert(method_instance, lloncefn); ccx.instances().borrow_mut().insert(method_instance, lloncefn);
...@@ -522,7 +523,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( ...@@ -522,7 +523,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let (block_arena, fcx): (TypedArena<_>, FunctionContext); let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new(); block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false); let bcx = fcx.init(false);
let llargs = get_params(fcx.llfn); let llargs = get_params(fcx.llfn);
...@@ -530,7 +531,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>( ...@@ -530,7 +531,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let llfnpointer = llfnpointer.unwrap_or_else(|| { let llfnpointer = llfnpointer.unwrap_or_else(|| {
// the first argument (`self`) will be ptr to the fn pointer // the first argument (`self`) will be ptr to the fn pointer
if is_by_ref { if is_by_ref {
Load(bcx, llargs[self_idx]) Load(&bcx, llargs[self_idx])
} else { } else {
llargs[self_idx] llargs[self_idx]
} }
...@@ -542,9 +543,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>( ...@@ -542,9 +543,8 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
data: Fn(llfnpointer), data: Fn(llfnpointer),
ty: bare_fn_ty ty: bare_fn_ty
}; };
bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx; let bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).0;
fcx.finish(&bcx, DebugLoc::None);
fcx.finish(bcx, DebugLoc::None);
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
...@@ -653,12 +653,12 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ...@@ -653,12 +653,12 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// ______________________________________________________________________ // ______________________________________________________________________
// Translating calls // Translating calls
fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn trans_call_inner<'a, 'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
debug_loc: DebugLoc, debug_loc: DebugLoc,
callee: Callee<'tcx>, callee: Callee<'tcx>,
args: &[ValueRef], args: &[ValueRef],
opt_llretslot: Option<ValueRef>) opt_llretslot: Option<ValueRef>)
-> Result<'blk, 'tcx> { -> (BlockAndBuilder<'blk, 'tcx>, ValueRef) {
// Introduce a temporary cleanup scope that will contain cleanups // Introduce a temporary cleanup scope that will contain cleanups
// for the arguments while they are being evaluated. The purpose // for the arguments while they are being evaluated. The purpose
// this cleanup is to ensure that, should a panic occur while // this cleanup is to ensure that, should a panic occur while
...@@ -666,7 +666,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -666,7 +666,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// cleaned up. If no panic occurs, the values are handed off to // cleaned up. If no panic occurs, the values are handed off to
// the callee, and hence none of the cleanups in this temporary // the callee, and hence none of the cleanups in this temporary
// scope will ever execute. // scope will ever execute.
let fcx = bcx.fcx; let fcx = &bcx.fcx();
let ccx = fcx.ccx; let ccx = fcx.ccx;
let fn_ret = callee.ty.fn_ret(); let fn_ret = callee.ty.fn_ret();
...@@ -689,7 +689,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -689,7 +689,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if fn_ty.ret.is_indirect() { if fn_ty.ret.is_indirect() {
let mut llretslot = opt_llretslot.unwrap(); let mut llretslot = opt_llretslot.unwrap();
if let Some(ty) = fn_ty.ret.cast { if let Some(ty) = fn_ty.ret.cast {
llretslot = PointerCast(bcx, llretslot, ty.ptr_to()); llretslot = PointerCast(&bcx, llretslot, ty.ptr_to());
} }
llargs.push(llretslot); llargs.push(llretslot);
} }
...@@ -698,9 +698,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -698,9 +698,9 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
Virtual(idx) => { Virtual(idx) => {
llargs.push(args[0]); llargs.push(args[0]);
let fn_ptr = meth::get_virtual_method(bcx, args[1], idx); let fn_ptr = meth::get_virtual_method(&bcx, args[1], idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); let llty = fn_ty.llvm_type(&bcx.ccx()).ptr_to();
callee = Fn(PointerCast(bcx, fn_ptr, llty)); callee = Fn(PointerCast(&bcx, fn_ptr, llty));
llargs.extend_from_slice(&args[2..]); llargs.extend_from_slice(&args[2..]);
} }
_ => llargs.extend_from_slice(args) _ => llargs.extend_from_slice(args)
...@@ -712,7 +712,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -712,7 +712,7 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}; };
let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
if !bcx.unreachable.get() { if !bcx.is_unreachable() {
fn_ty.apply_attrs_callsite(llret); fn_ty.apply_attrs_callsite(llret);
// If the function we just called does not use an outpointer, // If the function we just called does not use an outpointer,
...@@ -722,14 +722,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -722,14 +722,16 @@ fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// u64. // u64.
if !fn_ty.ret.is_indirect() { if !fn_ty.ret.is_indirect() {
if let Some(llretslot) = opt_llretslot { if let Some(llretslot) = opt_llretslot {
fn_ty.ret.store(&bcx.build(), llret, llretslot); fn_ty.ret.store(&bcx, llret, llretslot);
} }
} }
} }
if fn_ret.0.is_never() { if fn_ret.0.is_never() {
Unreachable(bcx); assert!(!bcx.is_terminated());
bcx.set_unreachable();
bcx.unreachable();
} }
Result::new(bcx, llret) (bcx, llret)
} }
...@@ -120,7 +120,7 @@ ...@@ -120,7 +120,7 @@
use base; use base;
use build; use build;
use common; use common;
use common::{Block, FunctionContext, LandingPad}; use common::{BlockAndBuilder, FunctionContext, LandingPad};
use debuginfo::{DebugLoc}; use debuginfo::{DebugLoc};
use glue; use glue;
use type_::Type; use type_::Type;
...@@ -190,9 +190,9 @@ pub fn pop_custom_cleanup_scope(&self, ...@@ -190,9 +190,9 @@ pub fn pop_custom_cleanup_scope(&self,
/// Removes the top cleanup scope from the stack, which must be a temporary scope, and /// Removes the top cleanup scope from the stack, which must be a temporary scope, and
/// generates the code to do its cleanups for normal exit. /// generates the code to do its cleanups for normal exit.
pub fn pop_and_trans_custom_cleanup_scope(&self, pub fn pop_and_trans_custom_cleanup_scope(&self,
bcx: Block<'blk, 'tcx>, bcx: BlockAndBuilder<'blk, 'tcx>,
custom_scope: CustomScopeIndex) custom_scope: CustomScopeIndex)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope);
assert!(self.is_valid_to_pop_custom_scope(custom_scope)); assert!(self.is_valid_to_pop_custom_scope(custom_scope));
...@@ -339,11 +339,11 @@ fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { ...@@ -339,11 +339,11 @@ fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool {
/// Generates the cleanups for `scope` into `bcx` /// Generates the cleanups for `scope` into `bcx`
fn trans_scope_cleanups(&self, // cannot borrow self, will recurse fn trans_scope_cleanups(&self, // cannot borrow self, will recurse
bcx: Block<'blk, 'tcx>, bcx: BlockAndBuilder<'blk, 'tcx>,
scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> { scope: &CleanupScope<'tcx>) -> BlockAndBuilder<'blk, 'tcx> {
let mut bcx = bcx; let mut bcx = bcx;
if !bcx.unreachable.get() { if !bcx.is_unreachable() {
for cleanup in scope.cleanups.iter().rev() { for cleanup in scope.cleanups.iter().rev() {
bcx = cleanup.trans(bcx, scope.debug_loc); bcx = cleanup.trans(bcx, scope.debug_loc);
} }
...@@ -419,21 +419,21 @@ fn trans_cleanups_to_exit_scope(&'blk self, ...@@ -419,21 +419,21 @@ fn trans_cleanups_to_exit_scope(&'blk self,
UnwindExit(val) => { UnwindExit(val) => {
// Generate a block that will resume unwinding to the // Generate a block that will resume unwinding to the
// calling function // calling function
let bcx = self.new_block("resume"); let bcx = self.new_block("resume").build();
match val { match val {
UnwindKind::LandingPad => { UnwindKind::LandingPad => {
let addr = self.landingpad_alloca.get() let addr = self.landingpad_alloca.get()
.unwrap(); .unwrap();
let lp = build::Load(bcx, addr); let lp = build::Load(&bcx, addr);
base::call_lifetime_end(bcx, addr); base::call_lifetime_end(&bcx, addr);
base::trans_unwind_resume(bcx, lp); base::trans_unwind_resume(&bcx, lp);
} }
UnwindKind::CleanupPad(_) => { UnwindKind::CleanupPad(_) => {
let pad = build::CleanupPad(bcx, None, &[]); let pad = build::CleanupPad(&bcx, None, &[]);
build::CleanupRet(bcx, pad, None); build::CleanupRet(&bcx, pad, None);
} }
} }
prev_llbb = bcx.llbb; prev_llbb = bcx.llbb();
break; break;
} }
} }
...@@ -484,16 +484,17 @@ fn trans_cleanups_to_exit_scope(&'blk self, ...@@ -484,16 +484,17 @@ fn trans_cleanups_to_exit_scope(&'blk self,
let name = scope.block_name("clean"); let name = scope.block_name("clean");
debug!("generating cleanups for {}", name); debug!("generating cleanups for {}", name);
let bcx_in = self.new_block(&name[..]); let bcx_in = self.new_block(&name[..]).build();
let exit_label = label.start(bcx_in); let exit_label = label.start(&bcx_in);
let next_llbb = bcx_in.llbb();
let mut bcx_out = bcx_in; let mut bcx_out = bcx_in;
let len = scope.cleanups.len(); let len = scope.cleanups.len();
for cleanup in scope.cleanups.iter().rev().take(len - skip) { for cleanup in scope.cleanups.iter().rev().take(len - skip) {
bcx_out = cleanup.trans(bcx_out, scope.debug_loc); bcx_out = cleanup.trans(bcx_out, scope.debug_loc);
} }
skip = 0; skip = 0;
exit_label.branch(bcx_out, prev_llbb); exit_label.branch(&bcx_out, prev_llbb);
prev_llbb = bcx_in.llbb; prev_llbb = next_llbb;
scope.add_cached_early_exit(exit_label, prev_llbb, len); scope.add_cached_early_exit(exit_label, prev_llbb, len);
} }
...@@ -527,13 +528,13 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { ...@@ -527,13 +528,13 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
Some(llbb) => return llbb, Some(llbb) => return llbb,
None => { None => {
let name = last_scope.block_name("unwind"); let name = last_scope.block_name("unwind");
pad_bcx = self.new_block(&name[..]); pad_bcx = self.new_block(&name[..]).build();
last_scope.cached_landing_pad = Some(pad_bcx.llbb); last_scope.cached_landing_pad = Some(pad_bcx.llbb());
} }
} }
}; };
let llpersonality = pad_bcx.fcx.eh_personality(); let llpersonality = pad_bcx.fcx().eh_personality();
let val = if base::wants_msvc_seh(self.ccx.sess()) { let val = if base::wants_msvc_seh(self.ccx.sess()) {
// A cleanup pad requires a personality function to be specified, so // A cleanup pad requires a personality function to be specified, so
...@@ -541,8 +542,8 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { ...@@ -541,8 +542,8 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
// creation of the landingpad instruction). We then create a // creation of the landingpad instruction). We then create a
// cleanuppad instruction which has no filters to run cleanup on all // cleanuppad instruction which has no filters to run cleanup on all
// exceptions. // exceptions.
build::SetPersonalityFn(pad_bcx, llpersonality); build::SetPersonalityFn(&pad_bcx, llpersonality);
let llretval = build::CleanupPad(pad_bcx, None, &[]); let llretval = build::CleanupPad(&pad_bcx, None, &[]);
UnwindKind::CleanupPad(llretval) UnwindKind::CleanupPad(llretval)
} else { } else {
// The landing pad return type (the type being propagated). Not sure // The landing pad return type (the type being propagated). Not sure
...@@ -553,31 +554,31 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { ...@@ -553,31 +554,31 @@ fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef {
false); false);
// The only landing pad clause will be 'cleanup' // The only landing pad clause will be 'cleanup'
let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); let llretval = build::LandingPad(&pad_bcx, llretty, llpersonality, 1);
// The landing pad block is a cleanup // The landing pad block is a cleanup
build::SetCleanup(pad_bcx, llretval); build::SetCleanup(&pad_bcx, llretval);
let addr = match self.landingpad_alloca.get() { let addr = match self.landingpad_alloca.get() {
Some(addr) => addr, Some(addr) => addr,
None => { None => {
let addr = base::alloca(pad_bcx, common::val_ty(llretval), let addr = base::alloca(&pad_bcx, common::val_ty(llretval),
""); "");
base::call_lifetime_start(pad_bcx, addr); base::call_lifetime_start(&pad_bcx, addr);
self.landingpad_alloca.set(Some(addr)); self.landingpad_alloca.set(Some(addr));
addr addr
} }
}; };
build::Store(pad_bcx, llretval, addr); build::Store(&pad_bcx, llretval, addr);
UnwindKind::LandingPad UnwindKind::LandingPad
}; };
// Generate the cleanup block and branch to it. // Generate the cleanup block and branch to it.
let label = UnwindExit(val); let label = UnwindExit(val);
let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); let cleanup_llbb = self.trans_cleanups_to_exit_scope(label);
label.branch(pad_bcx, cleanup_llbb); label.branch(&pad_bcx, cleanup_llbb);
return pad_bcx.llbb; return pad_bcx.llbb();
} }
} }
...@@ -628,7 +629,7 @@ impl EarlyExitLabel { ...@@ -628,7 +629,7 @@ impl EarlyExitLabel {
/// Transitions from an exit label to other exit labels depend on the type /// Transitions from an exit label to other exit labels depend on the type
/// of label. For example with MSVC exceptions unwind exit labels will use /// of label. For example with MSVC exceptions unwind exit labels will use
/// the `cleanupret` instruction instead of the `br` instruction. /// the `cleanupret` instruction instead of the `br` instruction.
fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) { fn branch(&self, from_bcx: &BlockAndBuilder, to_llbb: BasicBlockRef) {
if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self { if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self {
build::CleanupRet(from_bcx, pad, Some(to_llbb)); build::CleanupRet(from_bcx, pad, Some(to_llbb));
} else { } else {
...@@ -647,15 +648,15 @@ fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) { ...@@ -647,15 +648,15 @@ fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) {
/// ///
/// Returns a new label which will can be used to cache `bcx` in the list of /// Returns a new label which will can be used to cache `bcx` in the list of
/// early exits. /// early exits.
fn start(&self, bcx: Block) -> EarlyExitLabel { fn start(&self, bcx: &BlockAndBuilder) -> EarlyExitLabel {
match *self { match *self {
UnwindExit(UnwindKind::CleanupPad(..)) => { UnwindExit(UnwindKind::CleanupPad(..)) => {
let pad = build::CleanupPad(bcx, None, &[]); let pad = build::CleanupPad(bcx, None, &[]);
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad)))); bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::msvc(pad))));
UnwindExit(UnwindKind::CleanupPad(pad)) UnwindExit(UnwindKind::CleanupPad(pad))
} }
UnwindExit(UnwindKind::LandingPad) => { UnwindExit(UnwindKind::LandingPad) => {
bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu()))); bcx.set_lpad_ref(Some(bcx.fcx().lpad_arena.alloc(LandingPad::gnu())));
*self *self
} }
} }
...@@ -685,20 +686,19 @@ pub struct DropValue<'tcx> { ...@@ -685,20 +686,19 @@ pub struct DropValue<'tcx> {
impl<'tcx> DropValue<'tcx> { impl<'tcx> DropValue<'tcx> {
fn trans<'blk>(&self, fn trans<'blk>(&self,
bcx: Block<'blk, 'tcx>, bcx: BlockAndBuilder<'blk, 'tcx>,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let skip_dtor = self.skip_dtor; let skip_dtor = self.skip_dtor;
let _icx = if skip_dtor { let _icx = if skip_dtor {
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true") base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=true")
} else { } else {
base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false") base::push_ctxt("<DropValue as Cleanup>::trans skip_dtor=false")
}; };
let bcx = if self.is_immediate { if self.is_immediate {
glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
} else { } else {
glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor)
}; }
bcx
} }
} }
...@@ -441,6 +441,7 @@ pub fn eh_unwind_resume(&self) -> Callee<'tcx> { ...@@ -441,6 +441,7 @@ pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
// code. Each basic block we generate is attached to a function, typically // code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a // with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph. // function are organized as a directed graph.
#[must_use]
pub struct BlockS<'blk, 'tcx: 'blk> { pub struct BlockS<'blk, 'tcx: 'blk> {
// The BasicBlockRef returned from a call to // The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic
...@@ -555,6 +556,7 @@ fn drop(&mut self) { ...@@ -555,6 +556,7 @@ fn drop(&mut self) {
} }
} }
#[must_use]
pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { pub struct BlockAndBuilder<'blk, 'tcx: 'blk> {
bcx: Block<'blk, 'tcx>, bcx: Block<'blk, 'tcx>,
owned_builder: OwnedBuilder<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>,
...@@ -597,10 +599,24 @@ pub fn at_start<F, R>(&self, f: F) -> R ...@@ -597,10 +599,24 @@ pub fn at_start<F, R>(&self, f: F) -> R
// Methods delegated to bcx // Methods delegated to bcx
pub fn terminate(&self) {
debug!("terminate({})", self.bcx.to_str());
self.bcx.terminated.set(true);
}
pub fn set_unreachable(&self) {
debug!("set_unreachable({})", self.bcx.to_str());
self.bcx.unreachable.set(true);
}
pub fn is_unreachable(&self) -> bool { pub fn is_unreachable(&self) -> bool {
self.bcx.unreachable.get() self.bcx.unreachable.get()
} }
pub fn is_terminated(&self) -> bool {
self.bcx.terminated.get()
}
pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
self.bcx.ccx() self.bcx.ccx()
} }
...@@ -700,20 +716,6 @@ fn clone(&self) -> LandingPad { ...@@ -700,20 +716,6 @@ fn clone(&self) -> LandingPad {
} }
} }
pub struct Result<'blk, 'tcx: 'blk> {
pub bcx: Block<'blk, 'tcx>,
pub val: ValueRef
}
impl<'b, 'tcx> Result<'b, 'tcx> {
pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> {
Result {
bcx: bcx,
val: val,
}
}
}
pub fn val_ty(v: ValueRef) -> Type { pub fn val_ty(v: ValueRef) -> Type {
unsafe { unsafe {
Type::from_ref(llvm::LLVMTypeOf(v)) Type::from_ref(llvm::LLVMTypeOf(v))
...@@ -1016,7 +1018,7 @@ pub fn langcall(tcx: TyCtxt, ...@@ -1016,7 +1018,7 @@ pub fn langcall(tcx: TyCtxt,
// all shifts). For 32- and 64-bit types, this matches the semantics // all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.) // of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef, rhs: ValueRef,
binop_debug_loc: DebugLoc) -> ValueRef { binop_debug_loc: DebugLoc) -> ValueRef {
...@@ -1026,7 +1028,7 @@ pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -1026,7 +1028,7 @@ pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
build::Shl(bcx, lhs, rhs, binop_debug_loc) build::Shl(bcx, lhs, rhs, binop_debug_loc)
} }
pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
lhs_t: Ty<'tcx>, lhs_t: Ty<'tcx>,
lhs: ValueRef, lhs: ValueRef,
rhs: ValueRef, rhs: ValueRef,
...@@ -1042,17 +1044,19 @@ pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -1042,17 +1044,19 @@ pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} }
} }
fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn shift_mask_rhs<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
rhs: ValueRef, rhs: ValueRef,
debug_loc: DebugLoc) -> ValueRef { debug_loc: DebugLoc) -> ValueRef {
let rhs_llty = val_ty(rhs); let rhs_llty = val_ty(rhs);
build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc) build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc)
} }
pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn shift_mask_val<'blk, 'tcx>(
llty: Type, bcx: &BlockAndBuilder<'blk, 'tcx>,
mask_llty: Type, llty: Type,
invert: bool) -> ValueRef { mask_llty: Type,
invert: bool
) -> ValueRef {
let kind = llty.kind(); let kind = llty.kind();
match kind { match kind {
TypeKind::Integer => { TypeKind::Integer => {
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
use abi::Abi; use abi::Abi;
use common::{CrateContext, FunctionContext, Block, BlockAndBuilder}; use common::{CrateContext, FunctionContext, BlockAndBuilder};
use monomorphize::{self, Instance}; use monomorphize::{self, Instance};
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::mir; use rustc::mir;
...@@ -441,7 +441,7 @@ fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, ...@@ -441,7 +441,7 @@ fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>,
} }
} }
pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn declare_local<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
variable_name: ast::Name, variable_name: ast::Name,
variable_type: Ty<'tcx>, variable_type: Ty<'tcx>,
scope_metadata: DIScope, scope_metadata: DIScope,
...@@ -494,16 +494,16 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -494,16 +494,16 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
address_operations.as_ptr(), address_operations.as_ptr(),
address_operations.len() as c_uint, address_operations.len() as c_uint,
debug_loc, debug_loc,
bcx.llbb); bcx.llbb());
llvm::LLVMSetInstDebugLocation(::build::B(bcx).llbuilder, instr); llvm::LLVMSetInstDebugLocation(bcx.llbuilder, instr);
} }
} }
} }
match variable_kind { match variable_kind {
ArgumentVariable(_) | CapturedVariable => { ArgumentVariable(_) | CapturedVariable => {
assert!(!bcx.fcx assert!(!bcx.fcx()
.debug_context .debug_context
.get_ref(span) .get_ref(span)
.source_locations_enabled .source_locations_enabled
......
...@@ -38,38 +38,39 @@ ...@@ -38,38 +38,39 @@
use arena::TypedArena; use arena::TypedArena;
use syntax_pos::DUMMY_SP; use syntax_pos::DUMMY_SP;
pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef, v: ValueRef,
size: ValueRef, size: ValueRef,
align: ValueRef, align: ValueRef,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free"); let _icx = push_ctxt("trans_exchange_free");
let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem);
let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; let args = [PointerCast(&bcx, v, Type::i8p(bcx.ccx())), size, align];
Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[]))
.call(bcx, debug_loc, &args, None).bcx .call(bcx, debug_loc, &args, None).0
} }
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, pub fn trans_exchange_free<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef, v: ValueRef,
size: u64, size: u64,
align: u32, align: u32,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let ccx = cx.ccx();
trans_exchange_free_dyn(cx, trans_exchange_free_dyn(cx,
v, v,
C_uint(cx.ccx(), size), C_uint(ccx, size),
C_uint(cx.ccx(), align), C_uint(ccx, align),
debug_loc) debug_loc)
} }
pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
ptr: ValueRef, ptr: ValueRef,
content_ty: Ty<'tcx>, content_ty: Ty<'tcx>,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); assert!(type_is_sized(bcx.ccx().tcx(), content_ty));
let sizing_type = sizing_type_of(bcx.ccx(), content_ty); let sizing_type = sizing_type_of(bcx.ccx(), content_ty);
let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); let content_size = llsize_of_alloc(bcx.ccx(), sizing_type);
...@@ -129,23 +130,23 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ...@@ -129,23 +130,23 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
} }
} }
pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn drop_ty<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef, v: ValueRef,
t: Ty<'tcx>, t: Ty<'tcx>,
debug_loc: DebugLoc) -> Block<'blk, 'tcx> { debug_loc: DebugLoc) -> BlockAndBuilder<'blk, 'tcx> {
drop_ty_core(bcx, v, t, debug_loc, false) drop_ty_core(bcx, v, t, debug_loc, false)
} }
pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn drop_ty_core<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef, v: ValueRef,
t: Ty<'tcx>, t: Ty<'tcx>,
debug_loc: DebugLoc, debug_loc: DebugLoc,
skip_dtor: bool) skip_dtor: bool)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
// NB: v is an *alias* of type t here, not a direct value. // NB: v is an *alias* of type t here, not a direct value.
debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor);
let _icx = push_ctxt("drop_ty"); let _icx = push_ctxt("drop_ty");
if bcx.fcx.type_needs_drop(t) { if bcx.fcx().type_needs_drop(t) {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let g = if skip_dtor { let g = if skip_dtor {
DropGlueKind::TyContents(t) DropGlueKind::TyContents(t)
...@@ -155,29 +156,29 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -155,29 +156,29 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let glue = get_drop_glue_core(ccx, g); let glue = get_drop_glue_core(ccx, g);
let glue_type = get_drop_glue_type(ccx.tcx(), t); let glue_type = get_drop_glue_type(ccx.tcx(), t);
let ptr = if glue_type != t { let ptr = if glue_type != t {
PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to()) PointerCast(&bcx, v, type_of(ccx, glue_type).ptr_to())
} else { } else {
v v
}; };
// No drop-hint ==> call standard drop glue // No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], debug_loc); Call(&bcx, glue, &[ptr], debug_loc);
} }
bcx bcx
} }
pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn drop_ty_immediate<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v: ValueRef, v: ValueRef,
t: Ty<'tcx>, t: Ty<'tcx>,
debug_loc: DebugLoc, debug_loc: DebugLoc,
skip_dtor: bool) skip_dtor: bool)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let _icx = push_ctxt("drop_ty_immediate"); let _icx = push_ctxt("drop_ty_immediate");
let vp = alloc_ty(bcx, t, ""); let vp = alloc_ty(&bcx, t, "");
call_lifetime_start(bcx, vp); call_lifetime_start(&bcx, vp);
store_ty(bcx, v, vp, t); store_ty(&bcx, v, vp, t);
let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor);
call_lifetime_end(bcx, vp); call_lifetime_end(&bcx, vp);
bcx bcx
} }
...@@ -248,14 +249,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ...@@ -248,14 +249,14 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// type, so we don't need to explicitly cast the function parameter. // type, so we don't need to explicitly cast the function parameter.
let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
fcx.finish(bcx, DebugLoc::None); fcx.finish(&bcx, DebugLoc::None);
} }
fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn trans_custom_dtor<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
v0: ValueRef, v0: ValueRef,
shallow_drop: bool) shallow_drop: bool)
-> Block<'blk, 'tcx> -> BlockAndBuilder<'blk, 'tcx>
{ {
debug!("trans_custom_dtor t: {}", t); debug!("trans_custom_dtor t: {}", t);
let tcx = bcx.tcx(); let tcx = bcx.tcx();
...@@ -269,12 +270,12 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -269,12 +270,12 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// //
// FIXME (#14875) panic-in-drop semantics might be unsupported; we // FIXME (#14875) panic-in-drop semantics might be unsupported; we
// might well consider changing below to more direct code. // might well consider changing below to more direct code.
let contents_scope = bcx.fcx.push_custom_cleanup_scope(); let contents_scope = bcx.fcx().push_custom_cleanup_scope();
// Issue #23611: schedule cleanup of contents, re-inspecting the // Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code. // discriminant (if any) in case of variant swap in drop code.
if !shallow_drop { if !shallow_drop {
bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t); bcx.fcx().schedule_drop_adt_contents(contents_scope, v0, t);
} }
let (sized_args, unsized_args); let (sized_args, unsized_args);
...@@ -284,8 +285,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -284,8 +285,8 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} else { } else {
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
unsized_args = [ unsized_args = [
Load(bcx, get_dataptr(bcx, v0)), Load(&bcx, get_dataptr(&bcx, v0)),
Load(bcx, get_meta(bcx, v0)) Load(&bcx, get_meta(&bcx, v0))
]; ];
&unsized_args &unsized_args
}; };
...@@ -300,9 +301,9 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -300,9 +301,9 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
}; };
let dtor_did = def.destructor().unwrap(); let dtor_did = def.destructor().unwrap();
bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
.call(bcx, DebugLoc::None, args, None).bcx; .call(bcx, DebugLoc::None, args, None).0;
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) bcx.fcx().pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
} }
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
...@@ -416,10 +417,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ...@@ -416,10 +417,10 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
} }
} }
fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn make_drop_glue<'blk, 'tcx>(bcx: BlockAndBuilder<'blk, 'tcx>,
v0: ValueRef, v0: ValueRef,
g: DropGlueKind<'tcx>) g: DropGlueKind<'tcx>)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let t = g.ty(); let t = g.ty();
let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true };
...@@ -438,27 +439,28 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -438,27 +439,28 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// a safe-guard, assert TyBox not used with TyContents. // a safe-guard, assert TyBox not used with TyContents.
assert!(!skip_dtor); assert!(!skip_dtor);
if !type_is_sized(bcx.tcx(), content_ty) { if !type_is_sized(bcx.tcx(), content_ty) {
let llval = get_dataptr(bcx, v0); let llval = get_dataptr(&bcx, v0);
let llbox = Load(bcx, llval); let llbox = Load(&bcx, llval);
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
let info = get_meta(bcx, v0); let info = get_meta(&bcx, v0);
let info = Load(bcx, info); let info = Load(&bcx, info);
let (llsize, llalign) = let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info);
size_and_align_of_dst(&bcx.build(), content_ty, info);
// `Box<ZeroSizeType>` does not allocate. // `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx, let needs_free = ICmp(
llvm::IntNE, &bcx,
llsize, llvm::IntNE,
C_uint(bcx.ccx(), 0u64), llsize,
DebugLoc::None); C_uint(bcx.ccx(), 0u64),
DebugLoc::None
);
with_cond(bcx, needs_free, |bcx| { with_cond(bcx, needs_free, |bcx| {
trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None)
}) })
} else { } else {
let llval = v0; let llval = v0;
let llbox = Load(bcx, llval); let llbox = Load(&bcx, llval);
let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None);
trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None)
} }
...@@ -469,12 +471,12 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -469,12 +471,12 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// okay with always calling the Drop impl, if any. // okay with always calling the Drop impl, if any.
// FIXME(#36457) -- we should pass unsized values to drop glue as two arguments // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments
assert!(!skip_dtor); assert!(!skip_dtor);
let data_ptr = get_dataptr(bcx, v0); let data_ptr = get_dataptr(&bcx, v0);
let vtable_ptr = Load(bcx, get_meta(bcx, v0)); let vtable_ptr = Load(&bcx, get_meta(&bcx, v0));
let dtor = Load(bcx, vtable_ptr); let dtor = Load(&bcx, vtable_ptr);
Call(bcx, Call(&bcx,
dtor, dtor,
&[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], &[PointerCast(&bcx, Load(&bcx, data_ptr), Type::i8p(bcx.ccx()))],
DebugLoc::None); DebugLoc::None);
bcx bcx
} }
...@@ -485,7 +487,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -485,7 +487,7 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
bcx bcx
} }
_ => { _ => {
if bcx.fcx.type_needs_drop(t) { if bcx.fcx().type_needs_drop(t) {
drop_structural_ty(bcx, v0, t) drop_structural_ty(bcx, v0, t)
} else { } else {
bcx bcx
...@@ -495,27 +497,26 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -495,27 +497,26 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} }
// Iterates through the elements of a structural type, dropping them. // Iterates through the elements of a structural type, dropping them.
fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, fn drop_structural_ty<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
av: ValueRef, av: ValueRef,
t: Ty<'tcx>) t: Ty<'tcx>)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let _icx = push_ctxt("drop_structural_ty"); let _icx = push_ctxt("drop_structural_ty");
fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, fn iter_variant<'blk, 'tcx>(cx: BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>, t: Ty<'tcx>,
av: adt::MaybeSizedValue, av: adt::MaybeSizedValue,
variant: &'tcx ty::VariantDef, variant: &'tcx ty::VariantDef,
substs: &Substs<'tcx>) substs: &Substs<'tcx>)
-> Block<'blk, 'tcx> { -> BlockAndBuilder<'blk, 'tcx> {
let _icx = push_ctxt("iter_variant"); let _icx = push_ctxt("iter_variant");
let tcx = cx.tcx(); let tcx = cx.tcx();
let mut cx = cx; let mut cx = cx;
for (i, field) in variant.fields.iter().enumerate() { for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field); let arg = monomorphize::field_ty(tcx, substs, field);
cx = drop_ty(cx, let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i);
adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i), cx = drop_ty(cx, field_ptr, arg, DebugLoc::None);
arg, DebugLoc::None);
} }
return cx; return cx;
} }
...@@ -524,8 +525,8 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -524,8 +525,8 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
adt::MaybeSizedValue::sized(av) adt::MaybeSizedValue::sized(av)
} else { } else {
// FIXME(#36457) -- we should pass unsized values as two arguments // FIXME(#36457) -- we should pass unsized values as two arguments
let data = Load(cx, get_dataptr(cx, av)); let data = Load(&cx, get_dataptr(&cx, av));
let info = Load(cx, get_meta(cx, av)); let info = Load(&cx, get_meta(&cx, av));
adt::MaybeSizedValue::unsized_(data, info) adt::MaybeSizedValue::unsized_(data, info)
}; };
...@@ -533,12 +534,12 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -533,12 +534,12 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
match t.sty { match t.sty {
ty::TyClosure(def_id, substs) => { ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i); let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None);
} }
} }
ty::TyArray(_, n) => { ty::TyArray(_, n) => {
let base = get_dataptr(cx, value.value); let base = get_dataptr(&cx, value.value);
let len = C_uint(cx.ccx(), n); let len = C_uint(cx.ccx(), n);
let unit_ty = t.sequence_element_type(cx.tcx()); let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(cx, base, unit_ty, len, cx = tvec::slice_for_each(cx, base, unit_ty, len,
...@@ -551,7 +552,7 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -551,7 +552,7 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
} }
ty::TyTuple(ref args) => { ty::TyTuple(ref args) => {
for (i, arg) in args.iter().enumerate() { for (i, arg) in args.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i); let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i);
cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None);
} }
} }
...@@ -559,15 +560,15 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -559,15 +560,15 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
AdtKind::Struct => { AdtKind::Struct => {
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() { for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i); let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i);
let val = if type_is_sized(cx.tcx(), field_ty) { let val = if type_is_sized(cx.tcx(), field_ty) {
llfld_a llfld_a
} else { } else {
// FIXME(#36457) -- we should pass unsized values as two arguments // FIXME(#36457) -- we should pass unsized values as two arguments
let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter"); let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter");
Store(cx, llfld_a, get_dataptr(cx, scratch)); Store(&cx, llfld_a, get_dataptr(&cx, scratch));
Store(cx, value.meta, get_meta(cx, scratch)); Store(&cx, value.meta, get_meta(&cx, scratch));
scratch scratch
}; };
cx = drop_ty(cx, val, field_ty, DebugLoc::None); cx = drop_ty(cx, val, field_ty, DebugLoc::None);
...@@ -577,14 +578,14 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -577,14 +578,14 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
bug!("Union in `glue::drop_structural_ty`"); bug!("Union in `glue::drop_structural_ty`");
} }
AdtKind::Enum => { AdtKind::Enum => {
let fcx = cx.fcx; let fcx = cx.fcx();
let ccx = fcx.ccx; let ccx = fcx.ccx;
let n_variants = adt.variants.len(); let n_variants = adt.variants.len();
// NB: we must hit the discriminant first so that structural // NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ. // comparison know not to proceed when the discriminants differ.
match adt::trans_switch(cx, t, av, false) { match adt::trans_switch(&cx, t, av, false) {
(adt::BranchKind::Single, None) => { (adt::BranchKind::Single, None) => {
if n_variants != 0 { if n_variants != 0 {
assert!(n_variants == 1); assert!(n_variants == 1);
...@@ -593,7 +594,8 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -593,7 +594,8 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
} }
} }
(adt::BranchKind::Switch, Some(lldiscrim_a)) => { (adt::BranchKind::Switch, Some(lldiscrim_a)) => {
cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None); let tcx = cx.tcx();
cx = drop_ty(cx, lldiscrim_a, tcx.types.isize, DebugLoc::None);
// Create a fall-through basic block for the "else" case of // Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that // the switch instruction we're about to generate. Note that
...@@ -608,23 +610,19 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ...@@ -608,23 +610,19 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
// from the outer function, and any other use case will only // from the outer function, and any other use case will only
// call this for an already-valid enum in which case the `ret // call this for an already-valid enum in which case the `ret
// void` will never be hit. // void` will never be hit.
let ret_void_cx = fcx.new_block("enum-iter-ret-void"); let ret_void_cx = fcx.new_block("enum-iter-ret-void").build();
RetVoid(ret_void_cx, DebugLoc::None); RetVoid(&ret_void_cx, DebugLoc::None);
let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); let llswitch = Switch(&cx, lldiscrim_a, ret_void_cx.llbb(), n_variants);
let next_cx = fcx.new_block("enum-iter-next"); let next_cx = fcx.new_block("enum-iter-next").build();
for variant in &adt.variants { for variant in &adt.variants {
let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", let variant_cx_name = format!("enum-iter-variant-{}",
&variant.disr_val &variant.disr_val.to_string());
.to_string())); let variant_cx = fcx.new_block(&variant_cx_name).build();
let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val)); let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val));
AddCase(llswitch, case_val, variant_cx.llbb); AddCase(llswitch, case_val, variant_cx.llbb());
let variant_cx = iter_variant(variant_cx, let variant_cx = iter_variant(variant_cx, t, value, variant, substs);
t, Br(&variant_cx, next_cx.llbb(), DebugLoc::None);
value,
variant,
substs);
Br(variant_cx, next_cx.llbb, DebugLoc::None);
} }
cx = next_cx; cx = next_cx;
} }
......
...@@ -87,14 +87,13 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> { ...@@ -87,14 +87,13 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs /// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
callee_ty: Ty<'tcx>, callee_ty: Ty<'tcx>,
fn_ty: &FnType, fn_ty: &FnType,
llargs: &[ValueRef], llargs: &[ValueRef],
llresult: ValueRef, llresult: ValueRef,
call_debug_location: DebugLoc) call_debug_location: DebugLoc) {
-> Result<'blk, 'tcx> { let fcx = bcx.fcx();
let fcx = bcx.fcx;
let ccx = fcx.ccx; let ccx = fcx.ccx;
let tcx = bcx.tcx(); let tcx = bcx.tcx();
...@@ -122,11 +121,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ...@@ -122,11 +121,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
if name == "abort" { if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap")); let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], call_debug_location); Call(bcx, llfn, &[], call_debug_location);
Unreachable(bcx); return;
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
} else if name == "unreachable" { } else if name == "unreachable" {
Unreachable(bcx); // FIXME: do nothing?
return Result::new(bcx, C_nil(ccx)); return;
} }
let llret_ty = type_of::type_of(ccx, ret_ty); let llret_ty = type_of::type_of(ccx, ret_ty);
...@@ -145,8 +143,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ...@@ -145,8 +143,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location) Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location)
} }
(_, "try") => { (_, "try") => {
bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, call_debug_location);
call_debug_location);
C_nil(ccx) C_nil(ccx)
} }
(_, "breakpoint") => { (_, "breakpoint") => {
...@@ -162,7 +159,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ...@@ -162,7 +159,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
if !type_is_sized(tcx, tp_ty) { if !type_is_sized(tcx, tp_ty) {
let (llsize, _) = let (llsize, _) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
llsize llsize
} else { } else {
let lltp_ty = type_of::type_of(ccx, tp_ty); let lltp_ty = type_of::type_of(ccx, tp_ty);
...@@ -177,7 +174,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ...@@ -177,7 +174,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
if !type_is_sized(tcx, tp_ty) { if !type_is_sized(tcx, tp_ty) {
let (_, llalign) = let (_, llalign) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
llalign llalign
} else { } else {
C_uint(ccx, type_of::align_of(ccx, tp_ty)) C_uint(ccx, type_of::align_of(ccx, tp_ty))
...@@ -188,25 +185,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ...@@ -188,25 +185,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let lltp_ty = type_of::type_of(ccx, tp_ty); let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
} }
(_, "drop_in_place") => {
let tp_ty = substs.type_at(0);
let is_sized = type_is_sized(tcx, tp_ty);
let ptr = if is_sized {
llargs[0]
} else {
// FIXME(#36457) -- we should pass unsized values as two arguments
let scratch = alloc_ty(bcx, tp_ty, "drop");
call_lifetime_start(bcx, scratch);
Store(bcx, llargs[0], get_dataptr(bcx, scratch));
Store(bcx, llargs[1], get_meta(bcx, scratch));
scratch
};
glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
if !is_sized {
call_lifetime_end(bcx, ptr);
}
C_nil(ccx)
}
(_, "type_name") => { (_, "type_name") => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
...@@ -230,7 +208,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, ...@@ -230,7 +208,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "needs_drop") => { (_, "needs_drop") => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty)) C_bool(ccx, bcx.fcx().type_needs_drop(tp_ty))
} }
(_, "offset") => { (_, "offset") => {
let ptr = llargs[0]; let ptr = llargs[0];
...@@ -613,7 +591,7 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, ...@@ -613,7 +591,7 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
// qux` to be converted into `foo, bar, baz, qux`, integer // qux` to be converted into `foo, bar, baz, qux`, integer
// arguments to be truncated as needed and pointers to be // arguments to be truncated as needed and pointers to be
// cast. // cast.
fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn modify_as_needed<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
t: &intrinsics::Type, t: &intrinsics::Type,
arg_type: Ty<'tcx>, arg_type: Ty<'tcx>,
llarg: ValueRef) llarg: ValueRef)
...@@ -627,7 +605,7 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -627,7 +605,7 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// This assumes the type is "simple", i.e. no // This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD // destructors, and the contents are SIMD
// etc. // etc.
assert!(!bcx.fcx.type_needs_drop(arg_type)); assert!(!bcx.fcx().type_needs_drop(arg_type));
let arg = adt::MaybeSizedValue::sized(llarg); let arg = adt::MaybeSizedValue::sized(llarg);
(0..contents.len()) (0..contents.len())
.map(|i| { .map(|i| {
...@@ -718,11 +696,9 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -718,11 +696,9 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
store_ty(bcx, llval, llresult, ret_ty); store_ty(bcx, llval, llresult, ret_ty);
} }
} }
Result::new(bcx, llresult)
} }
fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn copy_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
allow_overlap: bool, allow_overlap: bool,
volatile: bool, volatile: bool,
tp_ty: Ty<'tcx>, tp_ty: Ty<'tcx>,
...@@ -759,7 +735,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -759,7 +735,7 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_debug_location) call_debug_location)
} }
fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn memset_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
volatile: bool, volatile: bool,
tp_ty: Ty<'tcx>, tp_ty: Ty<'tcx>,
dst: ValueRef, dst: ValueRef,
...@@ -788,7 +764,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -788,7 +764,7 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
call_debug_location) call_debug_location)
} }
fn count_zeros_intrinsic(bcx: Block, fn count_zeros_intrinsic(bcx: &BlockAndBuilder,
name: &str, name: &str,
val: ValueRef, val: ValueRef,
call_debug_location: DebugLoc) call_debug_location: DebugLoc)
...@@ -798,7 +774,7 @@ fn count_zeros_intrinsic(bcx: Block, ...@@ -798,7 +774,7 @@ fn count_zeros_intrinsic(bcx: Block,
Call(bcx, llfn, &[val, y], call_debug_location) Call(bcx, llfn, &[val, y], call_debug_location)
} }
fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn with_overflow_intrinsic<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
name: &str, name: &str,
a: ValueRef, a: ValueRef,
b: ValueRef, b: ValueRef,
...@@ -817,20 +793,21 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -817,20 +793,21 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
C_nil(bcx.ccx()) C_nil(bcx.ccx())
} }
fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn try_intrinsic<'blk, 'tcx>(
func: ValueRef, bcx: &BlockAndBuilder<'blk, 'tcx>,
data: ValueRef, func: ValueRef,
local_ptr: ValueRef, data: ValueRef,
dest: ValueRef, local_ptr: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> { dest: ValueRef,
dloc: DebugLoc
) {
if bcx.sess().no_landing_pads() { if bcx.sess().no_landing_pads() {
Call(bcx, func, &[data], dloc); Call(bcx, func, &[data], dloc);
Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); Store(bcx, C_null(Type::i8p(&bcx.ccx())), dest);
bcx
} else if wants_msvc_seh(bcx.sess()) { } else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, func, data, local_ptr, dest, dloc) trans_msvc_try(bcx, func, data, local_ptr, dest, dloc);
} else { } else {
trans_gnu_try(bcx, func, data, local_ptr, dest, dloc) trans_gnu_try(bcx, func, data, local_ptr, dest, dloc);
} }
} }
...@@ -841,26 +818,26 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -841,26 +818,26 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// instructions are meant to work for all targets, as of the time of this // instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions // writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized. // as the old ones are still more optimized.
fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn trans_msvc_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
func: ValueRef, func: ValueRef,
data: ValueRef, data: ValueRef,
local_ptr: ValueRef, local_ptr: ValueRef,
dest: ValueRef, dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> { dloc: DebugLoc) {
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let dloc = DebugLoc::None; let dloc = DebugLoc::None;
SetPersonalityFn(bcx, bcx.fcx.eh_personality()); SetPersonalityFn(&bcx, bcx.fcx().eh_personality());
let normal = bcx.fcx.new_block("normal"); let normal = bcx.fcx().new_block("normal").build();
let catchswitch = bcx.fcx.new_block("catchswitch"); let catchswitch = bcx.fcx().new_block("catchswitch").build();
let catchpad = bcx.fcx.new_block("catchpad"); let catchpad = bcx.fcx().new_block("catchpad").build();
let caught = bcx.fcx.new_block("caught"); let caught = bcx.fcx().new_block("caught").build();
let func = llvm::get_param(bcx.fcx.llfn, 0); let func = llvm::get_param(bcx.fcx().llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1); let data = llvm::get_param(bcx.fcx().llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); let local_ptr = llvm::get_param(bcx.fcx().llfn, 2);
// We're generating an IR snippet that looks like: // We're generating an IR snippet that looks like:
// //
...@@ -902,37 +879,36 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -902,37 +879,36 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// //
// More information can be found in libstd's seh.rs implementation. // More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to(); let i64p = Type::i64(ccx).ptr_to();
let slot = Alloca(bcx, i64p, "slot"); let slot = Alloca(&bcx, i64p, "slot");
Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc); Invoke(&bcx, func, &[data], normal.llbb(), catchswitch.llbb(), dloc);
Ret(normal, C_i32(ccx, 0), dloc); Ret(&normal, C_i32(ccx, 0), dloc);
let cs = CatchSwitch(catchswitch, None, None, 1); let cs = CatchSwitch(&catchswitch, None, None, 1);
AddHandler(catchswitch, cs, catchpad.llbb); AddHandler(&catchswitch, cs, catchpad.llbb());
let tcx = ccx.tcx(); let tcx = ccx.tcx();
let tydesc = match tcx.lang_items.msvc_try_filter() { let tydesc = match tcx.lang_items.msvc_try_filter() {
Some(did) => ::consts::get_static(ccx, did), Some(did) => ::consts::get_static(ccx, did),
None => bug!("msvc_try_filter not defined"), None => bug!("msvc_try_filter not defined"),
}; };
let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); let tok = CatchPad(&catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]);
let addr = Load(catchpad, slot); let addr = Load(&catchpad, slot);
let arg1 = Load(catchpad, addr); let arg1 = Load(&catchpad, addr);
let val1 = C_i32(ccx, 1); let val1 = C_i32(ccx, 1);
let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1])); let arg2 = Load(&catchpad, InBoundsGEP(&catchpad, addr, &[val1]));
let local_ptr = BitCast(catchpad, local_ptr, i64p); let local_ptr = BitCast(&catchpad, local_ptr, i64p);
Store(catchpad, arg1, local_ptr); Store(&catchpad, arg1, local_ptr);
Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1])); Store(&catchpad, arg2, InBoundsGEP(&catchpad, local_ptr, &[val1]));
CatchRet(catchpad, tok, caught.llbb); CatchRet(&catchpad, tok, caught.llbb());
Ret(caught, C_i32(ccx, 1), dloc); Ret(&caught, C_i32(ccx, 1), dloc);
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
Store(bcx, ret, dest); Store(bcx, ret, dest);
return bcx
} }
// Definition of the standard "try" function for Rust using the GNU-like model // Definition of the standard "try" function for Rust using the GNU-like model
...@@ -946,13 +922,13 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -946,13 +922,13 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// function calling it, and that function may already have other personality // function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have // functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function. // the right personality function.
fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn trans_gnu_try<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
func: ValueRef, func: ValueRef,
data: ValueRef, data: ValueRef,
local_ptr: ValueRef, local_ptr: ValueRef,
dest: ValueRef, dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> { dloc: DebugLoc) {
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let dloc = DebugLoc::None; let dloc = DebugLoc::None;
...@@ -973,14 +949,14 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -973,14 +949,14 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// expected to be `*mut *mut u8` for this to actually work, but that's // expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library. // managed by the standard library.
let then = bcx.fcx.new_block("then"); let then = bcx.fcx().new_block("then").build();
let catch = bcx.fcx.new_block("catch"); let catch = bcx.fcx().new_block("catch").build();
let func = llvm::get_param(bcx.fcx.llfn, 0); let func = llvm::get_param(bcx.fcx().llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1); let data = llvm::get_param(bcx.fcx().llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); let local_ptr = llvm::get_param(bcx.fcx().llfn, 2);
Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc); Invoke(&bcx, func, &[data], then.llbb(), catch.llbb(), dloc);
Ret(then, C_i32(ccx, 0), dloc); Ret(&then, C_i32(ccx, 0), dloc);
// Type indicator for the exception being thrown. // Type indicator for the exception being thrown.
// //
...@@ -990,18 +966,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ...@@ -990,18 +966,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// rust_try ignores the selector. // rust_try ignores the selector.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false); false);
let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1); let vals = LandingPad(&catch, lpad_ty, bcx.fcx().eh_personality(), 1);
AddClause(catch, vals, C_null(Type::i8p(ccx))); AddClause(&catch, vals, C_null(Type::i8p(ccx)));
let ptr = ExtractValue(catch, vals, 0); let ptr = ExtractValue(&catch, vals, 0);
Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to())); Store(&catch, ptr, BitCast(&catch, local_ptr, Type::i8p(ccx).ptr_to()));
Ret(catch, C_i32(ccx, 1), dloc); Ret(&catch, C_i32(ccx, 1), dloc);
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
Store(bcx, ret, dest); Store(bcx, ret, dest);
return bcx;
} }
// Helper function to give a Block to a closure to translate a shim function. // Helper function to give a Block to a closure to translate a shim function.
...@@ -1010,7 +985,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, ...@@ -1010,7 +985,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str, name: &str,
inputs: Vec<Ty<'tcx>>, inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>, output: Ty<'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>))
-> ValueRef { -> ValueRef {
let ccx = fcx.ccx; let ccx = fcx.ccx;
let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false);
...@@ -1035,7 +1010,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, ...@@ -1035,7 +1010,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// //
// This function is only generated once and is then cached. // This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>))
-> ValueRef { -> ValueRef {
let ccx = fcx.ccx; let ccx = fcx.ccx;
if let Some(llfn) = ccx.rust_try_fn().get() { if let Some(llfn) = ccx.rust_try_fn().get() {
...@@ -1060,16 +1035,16 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { ...@@ -1060,16 +1035,16 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
span_err!(a, b, E0511, "{}", c); span_err!(a, b, E0511, "{}", c);
} }
fn generic_simd_intrinsic<'blk, 'tcx, 'a> fn generic_simd_intrinsic<'blk, 'tcx, 'a>(
(bcx: Block<'blk, 'tcx>, bcx: &BlockAndBuilder<'blk, 'tcx>,
name: &str, name: &str,
callee_ty: Ty<'tcx>, callee_ty: Ty<'tcx>,
llargs: &[ValueRef], llargs: &[ValueRef],
ret_ty: Ty<'tcx>, ret_ty: Ty<'tcx>,
llret_ty: Type, llret_ty: Type,
call_debug_location: DebugLoc, call_debug_location: DebugLoc,
span: Span) -> ValueRef span: Span
{ ) -> ValueRef {
// macros for error handling: // macros for error handling:
macro_rules! emit_error { macro_rules! emit_error {
($msg: tt) => { ($msg: tt) => {
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
const VTABLE_OFFSET: usize = 3; const VTABLE_OFFSET: usize = 3;
/// Extracts a method from a trait object's vtable, at the specified index. /// Extracts a method from a trait object's vtable, at the specified index.
pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn get_virtual_method<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
llvtable: ValueRef, llvtable: ValueRef,
vtable_index: usize) vtable_index: usize)
-> ValueRef { -> ValueRef {
...@@ -94,9 +94,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, ...@@ -94,9 +94,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
let dest = fcx.llretslotptr.get(); let dest = fcx.llretslotptr.get();
let llargs = get_params(fcx.llfn); let llargs = get_params(fcx.llfn);
bcx = callee.call(bcx, DebugLoc::None, bcx = callee.call(bcx, DebugLoc::None,
&llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx; &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).0;
fcx.finish(bcx, DebugLoc::None); fcx.finish(&bcx, DebugLoc::None);
llfn llfn
} }
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
use super::operand::OperandValue::{Pair, Ref, Immediate}; use super::operand::OperandValue::{Pair, Ref, Immediate};
use std::cell::Ref as CellRef; use std::cell::Ref as CellRef;
use std::ptr;
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_block(&mut self, bb: mir::BasicBlock) { pub fn trans_block(&mut self, bb: mir::BasicBlock) {
...@@ -121,10 +122,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -121,10 +122,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
let ps = self.get_personality_slot(&bcx); let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps); let lp = bcx.load(ps);
bcx.with_block(|bcx| { base::call_lifetime_end(&bcx, ps);
base::call_lifetime_end(bcx, ps); base::trans_unwind_resume(&bcx, lp);
base::trans_unwind_resume(bcx, lp);
});
} }
} }
...@@ -143,9 +142,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -143,9 +142,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr); let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx()); let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let discr = bcx.with_block(|bcx| let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true)
);
let mut bb_hist = FxHashMap(); let mut bb_hist = FxHashMap();
for target in targets { for target in targets {
...@@ -169,8 +166,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -169,8 +166,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { for (adt_variant, &target) in adt_def.variants.iter().zip(targets) {
if default_bb != Some(target) { if default_bb != Some(target) {
let llbb = llblock(self, target); let llbb = llblock(self, target);
let llval = bcx.with_block(|bcx| adt::trans_case( let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val));
bcx, ty, Disr::from(adt_variant.disr_val)));
build::AddCase(switch, llval, llbb) build::AddCase(switch, llval, llbb)
} }
} }
...@@ -179,7 +175,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -179,7 +175,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
let (otherwise, targets) = targets.split_last().unwrap(); let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); let discr = base::to_immediate(&bcx, discr, switch_ty);
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
for (value, target) in values.iter().zip(targets) { for (value, target) in values.iter().zip(targets) {
let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty);
...@@ -259,13 +255,11 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -259,13 +255,11 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
// but I am shooting for a quick fix to #35546 // but I am shooting for a quick fix to #35546
// here that can be cleanly backported to beta, so // here that can be cleanly backported to beta, so
// I want to avoid touching all of trans. // I want to avoid touching all of trans.
bcx.with_block(|bcx| { let scratch = base::alloc_ty(&bcx, ty, "drop");
let scratch = base::alloc_ty(bcx, ty, "drop"); base::call_lifetime_start(&bcx, scratch);
base::call_lifetime_start(bcx, scratch); build::Store(&bcx, lvalue.llval, base::get_dataptr(&bcx, scratch));
build::Store(bcx, lvalue.llval, base::get_dataptr(bcx, scratch)); build::Store(&bcx, lvalue.llextra, base::get_meta(&bcx, scratch));
build::Store(bcx, lvalue.llextra, base::get_meta(bcx, scratch)); scratch
scratch
})
}; };
if let Some(unwind) = unwind { if let Some(unwind) = unwind {
bcx.invoke(drop_fn, bcx.invoke(drop_fn,
...@@ -443,6 +437,65 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -443,6 +437,65 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
return; return;
} }
// FIXME: This should proxy to the drop glue in the future when the ABI matches;
// most of the below code was copied from the match arm for TerminatorKind::Drop.
if intrinsic == Some("drop_in_place") {
let &(_, target) = destination.as_ref().unwrap();
let ty = if let ty::TyFnDef(_, substs, _) = callee.ty.sty {
substs.type_at(0)
} else {
bug!("Unexpected ty: {}", callee.ty);
};
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
funclet_br(self, bcx, target);
return;
}
let ptr = self.trans_operand(&bcx, &args[0]);
let (llval, llextra) = match ptr.val {
Immediate(llptr) => (llptr, ptr::null_mut()),
Pair(llptr, llextra) => (llptr, llextra),
Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty)
};
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty);
let is_sized = common::type_is_sized(bcx.tcx(), ty);
let llvalue = if is_sized {
if drop_ty != ty {
bcx.pointercast(llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to())
} else {
llval
}
} else {
// FIXME(#36457) Currently drop glue takes sized
// values as a `*(data, meta)`, but elsewhere in
// MIR we pass `(data, meta)` as two separate
// arguments. It would be better to fix drop glue,
// but I am shooting for a quick fix to #35546
// here that can be cleanly backported to beta, so
// I want to avoid touching all of trans.
let scratch = base::alloc_ty(&bcx, ty, "drop");
base::call_lifetime_start(&bcx, scratch);
build::Store(&bcx, llval, base::get_dataptr(&bcx, scratch));
build::Store(&bcx, llextra, base::get_meta(&bcx, scratch));
scratch
};
if let Some(unwind) = *cleanup {
bcx.invoke(drop_fn,
&[llvalue],
self.blocks[target].llbb,
llblock(self, unwind),
cleanup_bundle);
} else {
bcx.call(drop_fn, &[llvalue], cleanup_bundle);
funclet_br(self, bcx, target);
}
return;
}
if intrinsic == Some("transmute") { if intrinsic == Some("transmute") {
let &(ref dest, target) = destination.as_ref().unwrap(); let &(ref dest, target) = destination.as_ref().unwrap();
self.with_lvalue_ref(&bcx, dest, |this, dest| { self.with_lvalue_ref(&bcx, dest, |this, dest| {
...@@ -537,10 +590,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -537,10 +590,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
bug!("Cannot use direct operand with an intrinsic call") bug!("Cannot use direct operand with an intrinsic call")
}; };
bcx.with_block(|bcx| { trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, debug_loc);
trans_intrinsic_call(bcx, callee.ty, &fn_ty,
&llargs, dest, debug_loc);
});
if let ReturnDest::IndirectOperand(dst, _) = ret_dest { if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
// Make a fake operand for store_return // Make a fake operand for store_return
...@@ -554,8 +604,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { ...@@ -554,8 +604,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
if let Some((_, target)) = *destination { if let Some((_, target)) = *destination {
funclet_br(self, bcx, target); funclet_br(self, bcx, target);
} else { } else {
// trans_intrinsic_call already used Unreachable. bcx.unreachable();
// bcx.unreachable();
} }
return; return;
...@@ -620,9 +669,7 @@ fn trans_argument(&mut self, ...@@ -620,9 +669,7 @@ fn trans_argument(&mut self,
let (ptr, meta) = (a, b); let (ptr, meta) = (a, b);
if *next_idx == 0 { if *next_idx == 0 {
if let Virtual(idx) = *callee { if let Virtual(idx) = *callee {
let llfn = bcx.with_block(|bcx| { let llfn = meth::get_virtual_method(bcx, meta, idx);
meth::get_virtual_method(bcx, meta, idx)
});
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(bcx.pointercast(llfn, llty)); *callee = Fn(bcx.pointercast(llfn, llty));
} }
...@@ -768,12 +815,10 @@ fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRe ...@@ -768,12 +815,10 @@ fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRe
slot slot
} else { } else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
bcx.with_block(|bcx| { let slot = base::alloca(bcx, llretty, "personalityslot");
let slot = base::alloca(bcx, llretty, "personalityslot"); self.llpersonalityslot = Some(slot);
self.llpersonalityslot = Some(slot); base::call_lifetime_start(bcx, slot);
base::call_lifetime_start(bcx, slot); slot
slot
})
} }
} }
...@@ -863,18 +908,14 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -863,18 +908,14 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
return if fn_ret_ty.is_indirect() { return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary, // Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return. // but the calling convention has an indirect return.
let tmp = bcx.with_block(|bcx| { let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret");
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
llargs.push(tmp); llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, index) ReturnDest::IndirectOperand(tmp, index)
} else if is_intrinsic { } else if is_intrinsic {
// Currently, intrinsics always need a location to store // Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the // the result. so we create a temporary alloca for the
// result // result
let tmp = bcx.with_block(|bcx| { let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret");
base::alloc_ty(bcx, ret_ty, "tmp_ret")
});
ReturnDest::IndirectOperand(tmp, index) ReturnDest::IndirectOperand(tmp, index)
} else { } else {
ReturnDest::DirectOperand(index) ReturnDest::DirectOperand(index)
...@@ -939,9 +980,7 @@ fn store_return(&mut self, ...@@ -939,9 +980,7 @@ fn store_return(&mut self,
DirectOperand(index) => { DirectOperand(index) => {
// If there is a cast, we have to store and reload. // If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() { let op = if ret_ty.cast.is_some() {
let tmp = bcx.with_block(|bcx| { let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret");
base::alloc_ty(bcx, op.ty, "tmp_ret")
});
ret_ty.store(bcx, op.immediate(), tmp); ret_ty.store(bcx, op.immediate(), tmp);
self.trans_load(bcx, tmp, op.ty) self.trans_load(bcx, tmp, op.ty)
} else { } else {
......
...@@ -50,7 +50,7 @@ pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -50,7 +50,7 @@ pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
-> LvalueRef<'tcx> -> LvalueRef<'tcx>
{ {
assert!(!ty.has_erasable_regions()); assert!(!ty.has_erasable_regions());
let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); let lltemp = base::alloc_ty(bcx, ty, name);
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
} }
......
...@@ -181,7 +181,7 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, ...@@ -181,7 +181,7 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>,
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let bcx = fcx.init(true).build(); let bcx = fcx.init(true);
let mir = bcx.mir(); let mir = bcx.mir();
// Analyze the temps to determine which must be lvalues // Analyze the temps to determine which must be lvalues
...@@ -240,11 +240,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { ...@@ -240,11 +240,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
if dbg { if dbg {
let dbg_loc = mircx.debug_loc(source_info); let dbg_loc = mircx.debug_loc(source_info);
if let DebugLoc::ScopeAt(scope, span) = dbg_loc { if let DebugLoc::ScopeAt(scope, span) = dbg_loc {
bcx.with_block(|bcx| { declare_local(&bcx, name, ty, scope,
declare_local(bcx, name, ty, scope, VariableAccess::DirectVariable { alloca: lvalue.llval },
VariableAccess::DirectVariable { alloca: lvalue.llval }, VariableKind::LocalVariable, span);
VariableKind::LocalVariable, span);
});
} else { } else {
panic!("Unexpected"); panic!("Unexpected");
} }
...@@ -353,9 +351,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -353,9 +351,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
_ => bug!("spread argument isn't a tuple?!") _ => bug!("spread argument isn't a tuple?!")
}; };
let lltemp = bcx.with_block(|bcx| { let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
});
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let dst = bcx.struct_gep(lltemp, i); let dst = bcx.struct_gep(lltemp, i);
let arg = &fcx.fn_ty.args[idx]; let arg = &fcx.fn_ty.args[idx];
...@@ -376,7 +372,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -376,7 +372,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
// Now that we have one alloca that contains the aggregate value, // Now that we have one alloca that contains the aggregate value,
// we can create one debuginfo entry for the argument. // we can create one debuginfo entry for the argument.
bcx.with_block(|bcx| arg_scope.map(|scope| { arg_scope.map(|scope| {
let variable_access = VariableAccess::DirectVariable { let variable_access = VariableAccess::DirectVariable {
alloca: lltemp alloca: lltemp
}; };
...@@ -384,7 +380,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -384,7 +380,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
arg_ty, scope, variable_access, arg_ty, scope, variable_access,
VariableKind::ArgumentVariable(arg_index + 1), VariableKind::ArgumentVariable(arg_index + 1),
bcx.fcx().span.unwrap_or(DUMMY_SP)); bcx.fcx().span.unwrap_or(DUMMY_SP));
})); });
return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)));
} }
...@@ -433,9 +429,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -433,9 +429,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
}; };
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else { } else {
let lltemp = bcx.with_block(|bcx| { let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index));
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
});
if common::type_is_fat_ptr(tcx, arg_ty) { if common::type_is_fat_ptr(tcx, arg_ty) {
// we pass fat pointers as two words, but we want to // we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words, // represent them internally as a pointer to two words,
...@@ -453,7 +447,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -453,7 +447,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
} }
lltemp lltemp
}; };
bcx.with_block(|bcx| arg_scope.map(|scope| { arg_scope.map(|scope| {
// Is this a regular argument? // Is this a regular argument?
if arg_index > 0 || mir.upvar_decls.is_empty() { if arg_index > 0 || mir.upvar_decls.is_empty() {
declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty,
...@@ -531,7 +525,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, ...@@ -531,7 +525,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
VariableKind::CapturedVariable, VariableKind::CapturedVariable,
bcx.fcx().span.unwrap_or(DUMMY_SP)); bcx.fcx().span.unwrap_or(DUMMY_SP));
} }
})); });
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))) LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)))
}).collect() }).collect()
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
use base; use base;
use common::{self, Block, BlockAndBuilder}; use common::{self, BlockAndBuilder};
use value::Value; use value::Value;
use type_of; use type_of;
use type_::Type; use type_::Type;
...@@ -247,11 +247,11 @@ pub fn store_operand(&mut self, ...@@ -247,11 +247,11 @@ pub fn store_operand(&mut self,
operand: OperandRef<'tcx>) operand: OperandRef<'tcx>)
{ {
debug!("store_operand: operand={:?} lldest={:?}", operand, lldest); debug!("store_operand: operand={:?} lldest={:?}", operand, lldest);
bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand)) self.store_operand_direct(bcx, lldest, operand)
} }
pub fn store_operand_direct(&mut self, pub fn store_operand_direct(&mut self,
bcx: Block<'bcx, 'tcx>, bcx: &BlockAndBuilder<'bcx, 'tcx>,
lldest: ValueRef, lldest: ValueRef,
operand: OperandRef<'tcx>) operand: OperandRef<'tcx>)
{ {
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
use asm; use asm;
use base; use base;
use callee::Callee; use callee::Callee;
use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result}; use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder};
use common::{C_integral}; use common::{C_integral};
use debuginfo::DebugLoc; use debuginfo::DebugLoc;
use adt; use adt;
...@@ -70,30 +70,28 @@ pub fn trans_rvalue(&mut self, ...@@ -70,30 +70,28 @@ pub fn trans_rvalue(&mut self,
// so the (generic) MIR may not be able to expand it. // so the (generic) MIR may not be able to expand it.
let operand = self.trans_operand(&bcx, source); let operand = self.trans_operand(&bcx, source);
let operand = operand.pack_if_pair(&bcx); let operand = operand.pack_if_pair(&bcx);
bcx.with_block(|bcx| { match operand.val {
match operand.val { OperandValue::Pair(..) => bug!(),
OperandValue::Pair(..) => bug!(), OperandValue::Immediate(llval) => {
OperandValue::Immediate(llval) => { // unsize from an immediate structure. We don't
// unsize from an immediate structure. We don't // really need a temporary alloca here, but
// really need a temporary alloca here, but // avoiding it would require us to have
// avoiding it would require us to have // `coerce_unsized_into` use extractvalue to
// `coerce_unsized_into` use extractvalue to // index into the struct, and this case isn't
// index into the struct, and this case isn't // important enough for it.
// important enough for it. debug!("trans_rvalue: creating ugly alloca");
debug!("trans_rvalue: creating ugly alloca"); let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp");
let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp"); base::store_ty(&bcx, llval, lltemp, operand.ty);
base::store_ty(bcx, llval, lltemp, operand.ty); base::coerce_unsized_into(&bcx,
base::coerce_unsized_into(bcx, lltemp, operand.ty,
lltemp, operand.ty, dest.llval, cast_ty);
dest.llval, cast_ty);
}
OperandValue::Ref(llref) => {
base::coerce_unsized_into(bcx,
llref, operand.ty,
dest.llval, cast_ty);
}
} }
}); OperandValue::Ref(llref) => {
base::coerce_unsized_into(&bcx,
llref, operand.ty,
dest.llval, cast_ty);
}
}
bcx bcx
} }
...@@ -102,11 +100,9 @@ pub fn trans_rvalue(&mut self, ...@@ -102,11 +100,9 @@ pub fn trans_rvalue(&mut self,
let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); let size = count.value.as_u64(bcx.tcx().sess.target.uint_type);
let size = C_uint(bcx.ccx(), size); let size = C_uint(bcx.ccx(), size);
let base = base::get_dataptr_builder(&bcx, dest.llval); let base = base::get_dataptr_builder(&bcx, dest.llval);
let bcx = bcx.map_block(|block| { let bcx = tvec::slice_for_each(bcx, base, tr_elem.ty, size, |bcx, llslot| {
tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| { self.store_operand_direct(&bcx, llslot, tr_elem);
self.store_operand_direct(block, llslot, tr_elem); bcx
block
})
}); });
bcx bcx
} }
...@@ -115,10 +111,8 @@ pub fn trans_rvalue(&mut self, ...@@ -115,10 +111,8 @@ pub fn trans_rvalue(&mut self,
match *kind { match *kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
let disr = Disr::from(adt_def.variants[variant_index].disr_val); let disr = Disr::from(adt_def.variants[variant_index].disr_val);
bcx.with_block(|bcx| { adt::trans_set_discr(&bcx,
adt::trans_set_discr(bcx, dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr));
dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr));
});
for (i, operand) in operands.iter().enumerate() { for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand); let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields. // Do not generate stores and GEPis for zero-sized fields.
...@@ -171,10 +165,7 @@ pub fn trans_rvalue(&mut self, ...@@ -171,10 +165,7 @@ pub fn trans_rvalue(&mut self,
self.trans_operand(&bcx, input).immediate() self.trans_operand(&bcx, input).immediate()
}).collect(); }).collect();
bcx.with_block(|bcx| { asm::trans_inline_asm(&bcx, asm, outputs, input_vals);
asm::trans_inline_asm(bcx, asm, outputs, input_vals);
});
bcx bcx
} }
...@@ -238,10 +229,8 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -238,10 +229,8 @@ pub fn trans_rvalue_operand(&mut self,
} }
OperandValue::Immediate(lldata) => { OperandValue::Immediate(lldata) => {
// "standard" unsize // "standard" unsize
let (lldata, llextra) = bcx.with_block(|bcx| { let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
base::unsize_thin_ptr(bcx, lldata, operand.ty, cast_ty);
operand.ty, cast_ty)
});
OperandValue::Pair(lldata, llextra) OperandValue::Pair(lldata, llextra)
} }
OperandValue::Ref(_) => { OperandValue::Ref(_) => {
...@@ -281,9 +270,7 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -281,9 +270,7 @@ pub fn trans_rvalue_operand(&mut self,
let discr = match operand.val { let discr = match operand.val {
OperandValue::Immediate(llval) => llval, OperandValue::Immediate(llval) => llval,
OperandValue::Ref(llptr) => { OperandValue::Ref(llptr) => {
bcx.with_block(|bcx| { adt::trans_get_discr(&bcx, operand.ty, llptr, None, true)
adt::trans_get_discr(bcx, operand.ty, llptr, None, true)
})
} }
OperandValue::Pair(..) => bug!("Unexpected Pair operand") OperandValue::Pair(..) => bug!("Unexpected Pair operand")
}; };
...@@ -468,19 +455,16 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -468,19 +455,16 @@ pub fn trans_rvalue_operand(&mut self,
let llalign = C_uint(bcx.ccx(), align); let llalign = C_uint(bcx.ccx(), align);
let llty_ptr = llty.ptr_to(); let llty_ptr = llty.ptr_to();
let box_ty = bcx.tcx().mk_box(content_ty); let box_ty = bcx.tcx().mk_box(content_ty);
let mut llval = None; let val = base::malloc_raw_dyn(
let bcx = bcx.map_block(|bcx| { &bcx,
let Result { bcx, val } = base::malloc_raw_dyn(bcx, llty_ptr,
llty_ptr, box_ty,
box_ty, llsize,
llsize, llalign,
llalign, debug_loc
debug_loc); );
llval = Some(val);
bcx
});
let operand = OperandRef { let operand = OperandRef {
val: OperandValue::Immediate(llval.unwrap()), val: OperandValue::Immediate(val),
ty: box_ty, ty: box_ty,
}; };
(bcx, operand) (bcx, operand)
...@@ -543,21 +527,21 @@ pub fn trans_scalar_binop(&mut self, ...@@ -543,21 +527,21 @@ pub fn trans_scalar_binop(&mut self,
mir::BinOp::BitAnd => bcx.and(lhs, rhs), mir::BinOp::BitAnd => bcx.and(lhs, rhs),
mir::BinOp::BitXor => bcx.xor(lhs, rhs), mir::BinOp::BitXor => bcx.xor(lhs, rhs),
mir::BinOp::Shl => { mir::BinOp::Shl => {
bcx.with_block(|bcx| { common::build_unchecked_lshift(
common::build_unchecked_lshift(bcx, &bcx,
lhs, lhs,
rhs, rhs,
DebugLoc::None) DebugLoc::None
}) )
} }
mir::BinOp::Shr => { mir::BinOp::Shr => {
bcx.with_block(|bcx| { common::build_unchecked_rshift(
common::build_unchecked_rshift(bcx, bcx,
input_ty, input_ty,
lhs, lhs,
rhs, rhs,
DebugLoc::None) DebugLoc::None
}) )
} }
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil {
...@@ -677,9 +661,7 @@ pub fn trans_scalar_checked_binop(&mut self, ...@@ -677,9 +661,7 @@ pub fn trans_scalar_checked_binop(&mut self,
mir::BinOp::Shl | mir::BinOp::Shr => { mir::BinOp::Shl | mir::BinOp::Shr => {
let lhs_llty = val_ty(lhs); let lhs_llty = val_ty(lhs);
let rhs_llty = val_ty(rhs); let rhs_llty = val_ty(rhs);
let invert_mask = bcx.with_block(|bcx| { let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true);
common::shift_mask_val(bcx, lhs_llty, rhs_llty, true)
});
let outer_bits = bcx.and(rhs, invert_mask); let outer_bits = bcx.and(rhs, invert_mask);
let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
......
...@@ -63,12 +63,10 @@ pub fn trans_statement(&mut self, ...@@ -63,12 +63,10 @@ pub fn trans_statement(&mut self,
mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
let ty = self.monomorphized_lvalue_ty(lvalue); let ty = self.monomorphized_lvalue_ty(lvalue);
let lvalue_transed = self.trans_lvalue(&bcx, lvalue); let lvalue_transed = self.trans_lvalue(&bcx, lvalue);
bcx.with_block(|bcx| adt::trans_set_discr(&bcx,
adt::trans_set_discr(bcx, ty,
ty, lvalue_transed.llval,
lvalue_transed.llval, Disr::from(variant_index));
Disr::from(variant_index))
);
bcx bcx
} }
mir::StatementKind::StorageLive(ref lvalue) => { mir::StatementKind::StorageLive(ref lvalue) => {
......
...@@ -18,16 +18,16 @@ ...@@ -18,16 +18,16 @@
use debuginfo::DebugLoc; use debuginfo::DebugLoc;
use rustc::ty::Ty; use rustc::ty::Ty;
pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, pub fn slice_for_each<'blk, 'tcx, F>(bcx: BlockAndBuilder<'blk, 'tcx>,
data_ptr: ValueRef, data_ptr: ValueRef,
unit_ty: Ty<'tcx>, unit_ty: Ty<'tcx>,
len: ValueRef, len: ValueRef,
f: F) f: F)
-> Block<'blk, 'tcx> where -> BlockAndBuilder<'blk, 'tcx>
F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, where F: FnOnce(BlockAndBuilder<'blk, 'tcx>, ValueRef) -> BlockAndBuilder<'blk, 'tcx>,
{ {
let _icx = push_ctxt("tvec::slice_for_each"); let _icx = push_ctxt("tvec::slice_for_each");
let fcx = bcx.fcx; let fcx = bcx.fcx();
// Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890)
let zst = type_is_zero_size(bcx.ccx(), unit_ty); let zst = type_is_zero_size(bcx.ccx(), unit_ty);
...@@ -37,27 +37,33 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, ...@@ -37,27 +37,33 @@ pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
InBoundsGEP(bcx, a, &[b]) InBoundsGEP(bcx, a, &[b])
}; };
let header_bcx = fcx.new_block("slice_loop_header"); let body_bcx = fcx.new_block("slice_loop_body").build();
let body_bcx = fcx.new_block("slice_loop_body"); let next_bcx = fcx.new_block("slice_loop_next").build();
let next_bcx = fcx.new_block("slice_loop_next"); let header_bcx = fcx.new_block("slice_loop_header").build();
let start = if zst { let start = if zst {
C_uint(bcx.ccx(), 0 as usize) C_uint(bcx.ccx(), 0 as usize)
} else { } else {
data_ptr data_ptr
}; };
let end = add(bcx, start, len); let end = add(&bcx, start, len);
Br(bcx, header_bcx.llbb, DebugLoc::None); Br(&bcx, header_bcx.llbb(), DebugLoc::None);
let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]); let current = Phi(&header_bcx, val_ty(start), &[start], &[bcx.llbb()]);
let keep_going = let keep_going =
ICmp(header_bcx, llvm::IntNE, current, end, DebugLoc::None); ICmp(&header_bcx, llvm::IntNE, current, end, DebugLoc::None);
CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); CondBr(&header_bcx, keep_going, body_bcx.llbb(), next_bcx.llbb(), DebugLoc::None);
let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); let body_bcx = f(body_bcx, if zst { data_ptr } else { current });
let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize)); // FIXME(simulacrum): The code below is identical to the closure (add) above, but using the
AddIncomingToPhi(current, next, body_bcx.llbb); // closure doesn't compile due to body_bcx still being borrowed when dropped.
Br(body_bcx, header_bcx.llbb, DebugLoc::None); let next = if zst {
Add(&body_bcx, current, C_uint(bcx.ccx(), 1usize), DebugLoc::None)
} else {
InBoundsGEP(&body_bcx, current, &[C_uint(bcx.ccx(), 1usize)])
};
AddIncomingToPhi(current, next, body_bcx.llbb());
Br(&body_bcx, header_bcx.llbb(), DebugLoc::None);
next_bcx next_bcx
} }
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
use llvm; use llvm;
use llvm::{UseRef, ValueRef}; use llvm::{UseRef, ValueRef};
use basic_block::BasicBlock; use basic_block::BasicBlock;
use common::Block; use common::BlockAndBuilder;
use std::fmt; use std::fmt;
...@@ -65,11 +65,11 @@ pub fn erase_from_parent(self) { ...@@ -65,11 +65,11 @@ pub fn erase_from_parent(self) {
/// This only performs a search for a trivially dominating store. The store /// This only performs a search for a trivially dominating store. The store
/// must be the only user of this value, and there must not be any conditional /// must be the only user of this value, and there must not be any conditional
/// branches between the store and the given block. /// branches between the store and the given block.
pub fn get_dominating_store(self, bcx: Block) -> Option<Value> { pub fn get_dominating_store(self, bcx: &BlockAndBuilder) -> Option<Value> {
match self.get_single_user().and_then(|user| user.as_store_inst()) { match self.get_single_user().and_then(|user| user.as_store_inst()) {
Some(store) => { Some(store) => {
store.get_parent().and_then(|store_bb| { store.get_parent().and_then(|store_bb| {
let mut bb = BasicBlock(bcx.llbb); let mut bb = BasicBlock(bcx.llbb());
let mut ret = Some(store); let mut ret = Some(store);
while bb.get() != store_bb.get() { while bb.get() != store_bb.get() {
match bb.get_single_predecessor() { match bb.get_single_predecessor() {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册