diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index 12809171b743899f909a67299c16102b1faa6116..a1e32636980812b8ba7cd802ee5936cd71d3e0eb 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -127,6 +127,7 @@ pub fn usable_size(size: usize, align: usize) -> usize { pub const EMPTY: *mut () = 0x1 as *mut (); /// The allocator for unique pointers. +// This function must not unwind. If it does, MIR trans will fail. #[cfg(not(test))] #[lang = "exchange_malloc"] #[inline] diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index f3d4c17654dcc7a0f650ff0c849ad539eccd631a..d2b86ade7a2ab26fb283d5396321453224b5d1db 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -710,6 +710,7 @@ pub fn LLVMAppendBasicBlockInContext(C: ContextRef, // Operations on instructions pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef; + pub fn LLVMGetFirstBasicBlock(Fn: ValueRef) -> BasicBlockRef; pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef; pub fn LLVMInstructionEraseFromParent(Inst: ValueRef); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 0ac853e99eecdba47d0fc66dd9ca0fe2054a825c..8b4343af1990f53f0f8ee6901f08104d78343057 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -10,7 +10,6 @@ use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use base; -use build::AllocaFcx; use common::{type_is_fat_ptr, BlockAndBuilder, C_uint}; use context::CrateContext; use cabi_x86; @@ -99,21 +98,11 @@ pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { self } - pub fn unset(&mut self, attr: ArgAttribute) -> &mut Self { - self.regular = self.regular - attr; - self - } - pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { self.dereferenceable_bytes = bytes; self } - pub fn unset_dereferenceable(&mut self) -> &mut Self { - self.dereferenceable_bytes = 0; - self - } - pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { unsafe { self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); @@ -246,7 +235,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { if self.is_ignore() { return; } - let ccx = bcx.ccx(); + let ccx = bcx.ccx; if self.is_indirect() { let llsz = llsize_of(ccx, self.ty); let llalign = llalign_of_min(ccx, self.ty); @@ -278,7 +267,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast"); + let llscratch = bcx.fcx().alloca(ty, "abi_cast"); base::Lifetime::Start.call(bcx, llscratch); // ...where we first store the value... @@ -431,7 +420,7 @@ pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let ret_ty = sig.output(); let mut ret = arg_of(ret_ty, true); - if !type_is_fat_ptr(ccx.tcx(), ret_ty) { + if !type_is_fat_ptr(ccx, ret_ty) { // The `noalias` attribute on the return value is useful to a // function ptr caller. if let ty::TyBox(_) = ret_ty.sty { @@ -496,7 +485,7 @@ pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, for ty in inputs.iter().chain(extra_args.iter()) { let mut arg = arg_of(ty, false); - if type_is_fat_ptr(ccx.tcx(), ty) { + if type_is_fat_ptr(ccx, ty) { let original_tys = arg.original_ty.field_types(); let sizing_tys = arg.ty.field_types(); assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2)); @@ -569,7 +558,7 @@ pub fn adjust_for_abi<'a, 'tcx>(&mut self, }; // Fat pointers are returned by-value. if !self.ret.is_ignore() { - if !type_is_fat_ptr(ccx.tcx(), sig.output()) { + if !type_is_fat_ptr(ccx, sig.output()) { fixup(&mut self.ret); } } diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index 9c82e2507737116939a63bdab2ae85ea534349c9..31a5538a3c1179f3126eb279fffbc59ae9427d37 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -48,9 +48,7 @@ use llvm::{ValueRef, True, IntEQ, IntNE}; use rustc::ty::layout; use rustc::ty::{self, Ty, AdtKind}; -use build::*; use common::*; -use debuginfo::DebugLoc; use glue; use base; use machine; @@ -295,7 +293,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> sizing: bool, dst: bool) -> Vec { let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]); if sizing { - fields.filter(|ty| !dst || type_is_sized(cx.tcx(), *ty)) + fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty)) .map(|ty| type_of::sizing_type_of(cx, ty)).collect() } else { fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect() @@ -304,12 +302,13 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec> /// Obtain a representation of the discriminant sufficient to translate /// destructuring; this may or may not involve the actual discriminant. -pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - range_assert: bool) - -> (BranchKind, Option) { - let l = bcx.ccx().layout_of(t); +pub fn trans_switch<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + scrutinee: ValueRef, + range_assert: bool +) -> (BranchKind, Option) { + let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { .. } | layout::General { .. } | layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { @@ -331,34 +330,37 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { } /// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, - scrutinee: ValueRef, cast_to: Option, - range_assert: bool) - -> ValueRef { +pub fn trans_get_discr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + scrutinee: ValueRef, + cast_to: Option, + range_assert: bool +) -> ValueRef { let (def, substs) = match t.sty { ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs), _ => bug!("{} is not an enum", t) }; debug!("trans_get_discr t: {:?}", t); - let l = bcx.ccx().layout_of(t); + let l = bcx.ccx.layout_of(t); let val = match *l { layout::CEnum { discr, min, max, .. } => { load_discr(bcx, discr, scrutinee, min, max, range_assert) } layout::General { discr, .. } => { - let ptr = StructGEP(bcx, scrutinee, 0); + let ptr = bcx.struct_gep(scrutinee, 0); load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1, range_assert) } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0), + layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), layout::RawNullablePointer { nndiscr, .. } => { let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - let llptrty = type_of::sizing_type_of(bcx.ccx(), - monomorphize::field_ty(bcx.ccx().tcx(), substs, + let llptrty = type_of::sizing_type_of(bcx.ccx, + monomorphize::field_ty(bcx.ccx.tcx(), substs, &def.variants[nndiscr as usize].fields[0])); - ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None) + bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty)) } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee) @@ -367,24 +369,28 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, }; match cast_to { None => val, - Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) } + Some(llty) => if is_discr_signed(&l) { bcx.sext(val, llty) } else { bcx.zext(val, llty) } } } -fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath, - scrutinee: ValueRef) -> ValueRef { - let llptrptr = GEPi(bcx, scrutinee, +fn struct_wrapped_nullable_bitdiscr( + bcx: &BlockAndBuilder, + nndiscr: u64, + discrfield: &layout::FieldPath, + scrutinee: ValueRef +) -> ValueRef { + let llptrptr = bcx.gepi(scrutinee, &discrfield.iter().map(|f| *f as usize).collect::>()[..]); - let llptr = Load(bcx, llptrptr); + let llptr = bcx.load(llptrptr); let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None) + bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) } /// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, +fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64, range_assert: bool) -> ValueRef { - let llty = Type::from_integer(bcx.ccx(), ity); + let llty = Type::from_integer(bcx.ccx, ity); assert_eq!(val_ty(ptr), llty.ptr_to()); let bits = ity.size().bits(); assert!(bits <= 64); @@ -397,11 +403,11 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6 // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). - Load(bcx, ptr) + bcx.load(ptr) } else { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True) + bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True) } } @@ -409,18 +415,17 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6 /// discriminant-like value returned by `trans_switch`. /// /// This should ideally be less tightly tied to `_match`. -pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) - -> ValueRef { - let l = bcx.ccx().layout_of(t); +pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef { + let l = bcx.ccx.layout_of(t); match *l { layout::CEnum { discr, .. } | layout::General { discr, .. }=> { - C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true) + C_integral(Type::from_integer(bcx.ccx, discr), value.0, true) } layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => { assert!(value == Disr(0) || value == Disr(1)); - C_bool(bcx.ccx(), value != Disr(0)) + C_bool(bcx.ccx, value != Disr(0)) } _ => { bug!("{} does not have a discriminant. Represented as {:#?}", t, l); @@ -430,18 +435,19 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr) /// Set the discriminant for a new value of the given case of the given /// representation. -pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, - val: ValueRef, to: Disr) { - let l = bcx.ccx().layout_of(t); +pub fn trans_set_discr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr +) { + let l = bcx.ccx.layout_of(t); match *l { layout::CEnum{ discr, min, max, .. } => { assert_discr_in_range(Disr(min), Disr(max), to); - Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), + bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true), val); } layout::General{ discr, .. } => { - Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true), - StructGEP(bcx, val, 0)); + bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true), + bcx.struct_gep(val, 0)); } layout::Univariant { .. } | layout::UntaggedUnion { .. } @@ -449,10 +455,10 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, assert_eq!(to, Disr(0)); } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; if to.0 != nndiscr { - let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty); - Store(bcx, C_null(llptrty), val); + let llptrty = type_of::sizing_type_of(bcx.ccx, nnty); + bcx.store(C_null(llptrty), val); } } layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { @@ -461,17 +467,16 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, // Issue #34427: As workaround for LLVM bug on // ARM, use memset of 0 on whole struct rather // than storing null to single target field. - let b = B(bcx); - let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to()); - let fill_byte = C_u8(b.ccx, 0); - let size = C_uint(b.ccx, nonnull.stride().bytes()); - let align = C_i32(b.ccx, nonnull.align.abi() as i32); - base::call_memset(&b, llptr, fill_byte, size, align, false); + let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to()); + let fill_byte = C_u8(bcx.ccx, 0); + let size = C_uint(bcx.ccx, nonnull.stride().bytes()); + let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); + base::call_memset(bcx, llptr, fill_byte, size, align, false); } else { let path = discrfield.iter().map(|&i| i as usize).collect::>(); - let llptrptr = GEPi(bcx, val, &path[..]); + let llptrptr = bcx.gepi(val, &path[..]); let llptrty = val_ty(llptrptr).element_type(); - Store(bcx, C_null(llptrty), llptrptr); + bcx.store(C_null(llptrty), llptrptr); } } } @@ -479,7 +484,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, } } -fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool { +fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool { bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" } @@ -492,19 +497,15 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) { } /// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, - val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { - trans_field_ptr_builder(&bcx.build(), t, val, discr, ix) -} - -/// Access a field, at a point when the value's case is known. -pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, - val: MaybeSizedValue, - discr: Disr, ix: usize) - -> ValueRef { - let l = bcx.ccx().layout_of(t); - debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l); +pub fn trans_field_ptr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + val: MaybeSizedValue, + discr: Disr, + ix: usize +) -> ValueRef { + let l = bcx.ccx.layout_of(t); + debug!("trans_field_ptr on {} represented as {:#?}", t, l); // Note: if this ever needs to generate conditionals (e.g., if we // decide to do some kind of cdr-coding-like non-unique repr // someday), it will need to return a possibly-new bcx as well. @@ -512,7 +513,7 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, layout::Univariant { ref variant, .. } => { assert_eq!(discr, Disr(0)); struct_field_ptr(bcx, &variant, - &compute_fields(bcx.ccx(), t, 0, false), + &compute_fields(bcx.ccx, t, 0, false), val, ix, false) } layout::Vector { count, .. } => { @@ -521,57 +522,53 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, bcx.struct_gep(val.value, ix) } layout::General { discr: d, ref variants, .. } => { - let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false); - fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false)); + let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false); + fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false)); struct_field_ptr(bcx, &variants[discr.0 as usize], &fields, val, ix + 1, true) } layout::UntaggedUnion { .. } => { - let fields = compute_fields(bcx.ccx(), t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]); - if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + let fields = compute_fields(bcx.ccx, t, 0, false); + let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); bcx.pointercast(val.value, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } | layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => { - let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false); + let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx(), nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); - // The contents of memory at this pointer can't matter, but use - // the value that's "reasonable" in case of pointer comparison. - if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + let ty = type_of::type_of(bcx.ccx, nullfields[ix]); + assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); bcx.pointercast(val.value, ty.ptr_to()) } layout::RawNullablePointer { nndiscr, .. } => { - let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0]; + let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; assert_eq!(ix, 0); assert_eq!(discr.0, nndiscr); - let ty = type_of::type_of(bcx.ccx(), nnty); - if bcx.is_unreachable() { return C_undef(ty.ptr_to()); } + let ty = type_of::type_of(bcx.ccx, nnty); bcx.pointercast(val.value, ty.ptr_to()) } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { assert_eq!(discr.0, nndiscr); struct_field_ptr(bcx, &nonnull, - &compute_fields(bcx.ccx(), t, discr.0 as usize, false), + &compute_fields(bcx.ccx, t, discr.0 as usize, false), val, ix, false) } _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) } } -fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - st: &layout::Struct, fields: &Vec>, val: MaybeSizedValue, - ix: usize, needs_cast: bool) -> ValueRef { +fn struct_field_ptr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + st: &layout::Struct, + fields: &Vec>, + val: MaybeSizedValue, + ix: usize, + needs_cast: bool +) -> ValueRef { let fty = fields[ix]; - let ccx = bcx.ccx(); - let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); - if bcx.is_unreachable() { - return C_undef(ll_fty.ptr_to()); - } + let ccx = bcx.ccx; let ptr_val = if needs_cast { let fields = st.field_index_by_increasing_offset().map(|i| { @@ -587,7 +584,8 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // * First field - Always aligned properly // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || type_is_sized(bcx.tcx(), fty) { + if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || + bcx.ccx.shared().type_is_sized(fty) { return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize); } @@ -607,8 +605,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, return bcx.struct_gep(ptr_val, ix); } - let dbloc = DebugLoc::None; - // We need to get the pointer manually now. // We do this by casting to a *i8, then offsetting it by the appropriate amount. // We do this instead of, say, simply adjusting the pointer from the result of a GEP @@ -628,7 +624,7 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_uint(bcx.ccx(), offset); + let unaligned_offset = C_uint(bcx.ccx, offset); // Get the alignment of the field let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); @@ -639,19 +635,18 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // (unaligned offset + (align - 1)) & -align // Calculate offset - dbloc.apply(bcx.fcx()); - let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64)); + let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64)); let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), bcx.neg(align)); debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx())); + let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); + let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); debug!("struct_field_ptr: Field type is {:?}", ll_fty); bcx.pointercast(byte_ptr, ll_fty.ptr_to()) } diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 665e12cbe87955e624cfeb2ef7e5b03e297718db..d6385e1ca156263994ec0ef4d35c030b13a23416 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -12,7 +12,6 @@ use llvm::{self, ValueRef}; use base; -use build::*; use common::*; use type_of; use type_::Type; @@ -25,10 +24,12 @@ use libc::{c_uint, c_char}; // Take an inline assembly expression and splat it out via LLVM -pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ia: &hir::InlineAsm, - outputs: Vec<(ValueRef, Ty<'tcx>)>, - mut inputs: Vec) { +pub fn trans_inline_asm<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + ia: &hir::InlineAsm, + outputs: Vec<(ValueRef, Ty<'tcx>)>, + mut inputs: Vec +) { let mut ext_constraints = vec![]; let mut output_types = vec![]; @@ -47,7 +48,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, if out.is_indirect { indirect_outputs.push(val.unwrap()); } else { - output_types.push(type_of::type_of(bcx.ccx(), ty)); + output_types.push(type_of::type_of(bcx.ccx, ty)); } } if !indirect_outputs.is_empty() { @@ -78,9 +79,9 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => Type::void(bcx.ccx()), + 0 => Type::void(bcx.ccx), 1 => output_types[0], - _ => Type::struct_(bcx.ccx(), &output_types[..], false) + _ => Type::struct_(bcx.ccx, &output_types[..], false) }; let dialect = match ia.dialect { @@ -90,32 +91,33 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = InlineAsmCall(bcx, - asm.as_ptr(), - constraint_cstr.as_ptr(), - &inputs, - output_type, - ia.volatile, - ia.alignstack, - dialect); + let r = bcx.inline_asm_call( + asm.as_ptr(), + constraint_cstr.as_ptr(), + &inputs, + output_type, + ia.volatile, + ia.alignstack, + dialect + ); // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); for (i, (_, &(val, _))) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) }; - Store(bcx, v, val); + let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) }; + bcx.store(v, val); } // Store expn_id in a metadata node so we can map LLVM errors // back to source locations. See #17552. unsafe { let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(), + let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx.llcx(), key.as_ptr() as *const c_char, key.len() as c_uint); - let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32); + let val: llvm::ValueRef = C_i32(bcx.ccx, ia.expn_id.into_u32() as i32); llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); + llvm::LLVMMDNodeInContext(bcx.ccx.llcx(), &val, 1)); } } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index c7f21427a0ceb656c5e6731acbc635dd25ba80cb..76bb1c56af3818aa155be237b4bb5da7292aded4 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -23,8 +23,6 @@ //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int, //! int) and rec(x=int, y=int, z=int) will have the same TypeRef. -#![allow(non_camel_case_types)] - use super::CrateTranslation; use super::ModuleLlvm; use super::ModuleSource; @@ -37,7 +35,7 @@ use llvm::{Linkage, ValueRef, Vector, get_param}; use llvm; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; -use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}; +use middle::lang_items::StartFnLangItem; use rustc::ty::subst::Substs; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; @@ -51,20 +49,18 @@ use abi::{self, Abi, FnType}; use adt; use attributes; -use build::*; -use builder::{Builder, noname}; +use builder::Builder; use callee::{Callee}; -use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint}; +use common::{BlockAndBuilder, C_bool, C_bytes_in_context, C_i32, C_uint}; use collector::{self, TransItemCollectionMode}; -use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}; +use common::{C_struct_in_context, C_u64, C_undef}; use common::{CrateContext, FunctionContext}; -use common::{Result}; use common::{fulfill_obligation}; use common::{type_is_zero_size, val_ty}; use common; use consts; use context::{SharedCrateContext, CrateContextList}; -use debuginfo::{self, DebugLoc}; +use debuginfo; use declare; use machine; use machine::{llalign_of_min, llsize_of}; @@ -81,11 +77,8 @@ use Disr; use util::nodemap::{NodeSet, FxHashMap, FxHashSet}; -use arena::TypedArena; use libc::c_uint; use std::ffi::{CStr, CString}; -use std::cell::{Cell, RefCell}; -use std::ptr; use std::rc::Rc; use std::str; use std::i32; @@ -95,52 +88,6 @@ use rustc::ty::layout::{self, Layout}; use syntax::ast; -thread_local! { - static TASK_LOCAL_INSN_KEY: RefCell>> = { - RefCell::new(None) - } -} - -pub fn with_insn_ctxt(blk: F) - where F: FnOnce(&[&'static str]) -{ - TASK_LOCAL_INSN_KEY.with(move |slot| { - slot.borrow().as_ref().map(move |s| blk(s)); - }) -} - -pub fn init_insn_ctxt() { - TASK_LOCAL_INSN_KEY.with(|slot| { - *slot.borrow_mut() = Some(Vec::new()); - }); -} - -pub struct _InsnCtxt { - _cannot_construct_outside_of_this_module: (), -} - -impl Drop for _InsnCtxt { - fn drop(&mut self) { - TASK_LOCAL_INSN_KEY.with(|slot| { - if let Some(ctx) = slot.borrow_mut().as_mut() { - ctx.pop(); - } - }) - } -} - -pub fn push_ctxt(s: &'static str) -> _InsnCtxt { - debug!("new InsnCtxt: {}", s); - TASK_LOCAL_INSN_KEY.with(|slot| { - if let Some(ctx) = slot.borrow_mut().as_mut() { - ctx.push(s) - } - }); - _InsnCtxt { - _cannot_construct_outside_of_this_module: (), - } -} - pub struct StatRecorder<'a, 'tcx: 'a> { ccx: &'a CrateContext<'a, 'tcx>, name: Option, @@ -162,10 +109,7 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> { fn drop(&mut self) { if self.ccx.sess().trans_stats() { let iend = self.ccx.stats().n_llvm_insns.get(); - self.ccx - .stats() - .fn_stats - .borrow_mut() + self.ccx.stats().fn_stats.borrow_mut() .push((self.name.take().unwrap(), iend - self.istart)); self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1); // Reset LLVM insn count to avoid compound costs. @@ -174,52 +118,14 @@ fn drop(&mut self) { } } -pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef { - StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR) +pub fn get_meta(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { + bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) } -pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { - b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) +pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { + bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) } -pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef { - b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) -} - -fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId { - match bcx.tcx().lang_items.require(it) { - Ok(id) => id, - Err(s) => { - bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s)); - } - } -} - -// The following malloc_raw_dyn* functions allocate a box to contain -// a given type, but with a potentially dynamic size. - -pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llty_ptr: Type, - info_ty: Ty<'tcx>, - size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) - -> Result<'blk, 'tcx> { - let _icx = push_ctxt("malloc_raw_exchange"); - - // Allocate space: - let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem); - let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, debug_loc, &[size, align], None); - - Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr)) -} - - pub fn bin_op_to_icmp_predicate(op: hir::BinOp_, signed: bool) -> llvm::IntPredicate { @@ -254,18 +160,18 @@ pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate { } } -pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - t: Ty<'tcx>, - ret_ty: Type, - op: hir::BinOp_, - debug_loc: DebugLoc) - -> ValueRef { +pub fn compare_simd_types<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + lhs: ValueRef, + rhs: ValueRef, + t: Ty<'tcx>, + ret_ty: Type, + op: hir::BinOp_ +) -> ValueRef { let signed = match t.sty { ty::TyFloat(_) => { let cmp = bin_op_to_fcmp_predicate(op); - return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty); + return bcx.sext(bcx.fcmp(cmp, lhs, rhs), ret_ty); }, ty::TyUint(_) => false, ty::TyInt(_) => true, @@ -277,7 +183,7 @@ pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // to get the correctly sized type. This will compile to a single instruction // once the IR is converted to assembly if the SIMD instruction is supported // by the target architecture. - SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty) + bcx.sext(bcx.icmp(cmp, lhs, rhs), ret_ty) } /// Retrieve the information we are losing (making dynamic) in an unsizing @@ -311,11 +217,12 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, } /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - src: ValueRef, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx>) - -> (ValueRef, ValueRef) { +pub fn unsize_thin_ptr<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + src: ValueRef, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (ValueRef, ValueRef) { debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); match (&src_ty.sty, &dst_ty.sty) { (&ty::TyBox(a), &ty::TyBox(b)) | @@ -325,10 +232,9 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) | (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(common::type_is_sized(bcx.tcx(), a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to(); - (PointerCast(bcx, src, ptr_ty), - unsized_info(bcx.ccx(), a, b, None)) + assert!(bcx.ccx.shared().type_is_sized(a)); + let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to(); + (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } _ => bug!("unsize_thin_ptr: called on bad types"), } @@ -336,24 +242,24 @@ pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - src: ValueRef, - src_ty: Ty<'tcx>, - dst: ValueRef, - dst_ty: Ty<'tcx>) { +pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + src: ValueRef, + src_ty: Ty<'tcx>, + dst: ValueRef, + dst_ty: Ty<'tcx>) { match (&src_ty.sty, &dst_ty.sty) { (&ty::TyBox(..), &ty::TyBox(..)) | (&ty::TyRef(..), &ty::TyRef(..)) | (&ty::TyRef(..), &ty::TyRawPtr(..)) | (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => { - let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) { + let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) { // fat-ptr to fat-ptr unsize preserves the vtable // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. let (base, info) = load_fat_ptr(bcx, src, src_ty); - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty); - let base = PointerCast(bcx, base, llcast_ty); + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); + let base = bcx.pointercast(base, llcast_ty); (base, info) } else { let base = load_ty(bcx, src, src_ty); @@ -377,7 +283,7 @@ pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let iter = src_fields.zip(dst_fields).enumerate(); for (i, (src_fty, dst_fty)) in iter { - if type_is_zero_size(bcx.ccx(), dst_fty) { + if type_is_zero_size(bcx.ccx, dst_fty) { continue; } @@ -415,8 +321,10 @@ pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx } } -pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b)) +pub fn cast_shift_expr_rhs( + cx: &BlockAndBuilder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef +) -> ValueRef { + cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) } pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef { @@ -462,42 +370,6 @@ fn cast_shift_rhs(op: hir::BinOp_, } } -pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llfn: ValueRef, - llargs: &[ValueRef], - debug_loc: DebugLoc) - -> (ValueRef, Block<'blk, 'tcx>) { - let _icx = push_ctxt("invoke_"); - if bcx.unreachable.get() { - return (C_null(Type::i8(bcx.ccx())), bcx); - } - - if need_invoke(bcx) { - debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb); - for &llarg in llargs { - debug!("arg: {:?}", Value(llarg)); - } - let normal_bcx = bcx.fcx.new_block("normal-return"); - let landing_pad = bcx.fcx.get_landing_pad(); - - let llresult = Invoke(bcx, - llfn, - &llargs[..], - normal_bcx.llbb, - landing_pad, - debug_loc); - return (llresult, normal_bcx); - } else { - debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb); - for &llarg in llargs { - debug!("arg: {:?}", Value(llarg)); - } - - let llresult = Call(bcx, llfn, &llargs[..], debug_loc); - return (llresult, bcx); - } -} - /// Returns whether this session's target will use SEH-based unwinding. /// /// This is only true for MSVC targets, and even then the 64-bit MSVC target @@ -507,18 +379,6 @@ pub fn wants_msvc_seh(sess: &Session) -> bool { sess.target.target.options.is_like_msvc } -pub fn avoid_invoke(bcx: Block) -> bool { - bcx.sess().no_landing_pads() || bcx.lpad().is_some() -} - -pub fn need_invoke(bcx: Block) -> bool { - if avoid_invoke(bcx) { - false - } else { - bcx.fcx.needs_invoke() - } -} - pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { let assume_intrinsic = b.ccx.get_intrinsic("llvm.assume"); b.call(assume_intrinsic, &[val], None); @@ -527,14 +387,7 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { /// Helper for loading values from memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. Also handles various special cases where the type /// gives us better information about what we are loading. -pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { - if cx.unreachable.get() { - return C_undef(type_of::type_of(cx.ccx(), t)); - } - load_ty_builder(&B(cx), ptr, t) -} - -pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { +pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef { let ccx = b.ccx; if type_is_zero_size(ccx, t) { return C_undef(type_of::type_of(ccx, t)); @@ -559,8 +412,7 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc // a char is a Unicode codepoint, and so takes values from 0 // to 0x10FFFF inclusive only. b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False) - } else if (t.is_region_ptr() || t.is_unique()) && - !common::type_is_fat_ptr(ccx.tcx(), t) { + } else if (t.is_region_ptr() || t.is_unique()) && !common::type_is_fat_ptr(ccx, t) { b.load_nonnull(ptr) } else { b.load(ptr) @@ -569,54 +421,32 @@ pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tc /// Helper for storing values in memory. Does the necessary conversion if the in-memory type /// differs from the type used for SSA values. -pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - +pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) { debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); - if common::type_is_fat_ptr(cx.tcx(), t) { - let lladdr = ExtractValue(cx, v, abi::FAT_PTR_ADDR); - let llextra = ExtractValue(cx, v, abi::FAT_PTR_EXTRA); + if common::type_is_fat_ptr(cx.ccx, t) { + let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR); + let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); store_fat_ptr(cx, lladdr, llextra, dst, t); } else { - Store(cx, from_immediate(cx, v), dst); + cx.store(from_immediate(cx, v), dst); } } -pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - data: ValueRef, - extra: ValueRef, - dst: ValueRef, - _ty: Ty<'tcx>) { +pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, + data: ValueRef, + extra: ValueRef, + dst: ValueRef, + _ty: Ty<'tcx>) { // FIXME: emit metadata - Store(cx, data, get_dataptr(cx, dst)); - Store(cx, extra, get_meta(cx, dst)); + cx.store(data, get_dataptr(cx, dst)); + cx.store(extra, get_meta(cx, dst)); } -pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - src: ValueRef, - ty: Ty<'tcx>) - -> (ValueRef, ValueRef) -{ - if cx.unreachable.get() { - // FIXME: remove me - return (Load(cx, get_dataptr(cx, src)), - Load(cx, get_meta(cx, src))); - } - - load_fat_ptr_builder(&B(cx), src, ty) -} - -pub fn load_fat_ptr_builder<'a, 'tcx>( - b: &Builder<'a, 'tcx>, - src: ValueRef, - t: Ty<'tcx>) - -> (ValueRef, ValueRef) -{ - - let ptr = get_dataptr_builder(b, src); +pub fn load_fat_ptr<'a, 'tcx>( + b: &Builder<'a, 'tcx>, src: ValueRef, t: Ty<'tcx> +) -> (ValueRef, ValueRef) { + let ptr = get_dataptr(b, src); let ptr = if t.is_region_ptr() || t.is_unique() { b.load_nonnull(ptr) } else { @@ -624,122 +454,63 @@ pub fn load_fat_ptr_builder<'a, 'tcx>( }; // FIXME: emit metadata on `meta`. - let meta = b.load(get_meta_builder(b, src)); + let meta = b.load(get_meta(b, src)); (ptr, meta) } -pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef { - if val_ty(val) == Type::i1(bcx.ccx()) { - ZExt(bcx, val, Type::i8(bcx.ccx())) +pub fn from_immediate(bcx: &BlockAndBuilder, val: ValueRef) -> ValueRef { + if val_ty(val) == Type::i1(bcx.ccx) { + bcx.zext(val, Type::i8(bcx.ccx)) } else { val } } -pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef { +pub fn to_immediate(bcx: &BlockAndBuilder, val: ValueRef, ty: Ty) -> ValueRef { if ty.is_bool() { - Trunc(bcx, val, Type::i1(bcx.ccx())) + bcx.trunc(val, Type::i1(bcx.ccx)) } else { val } } -pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx> - where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> -{ - let _icx = push_ctxt("with_cond"); - - if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) { - return bcx; - } - - let fcx = bcx.fcx; - let next_cx = fcx.new_block("next"); - let cond_cx = fcx.new_block("cond"); - CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None); - let after_cx = f(cond_cx); - if !after_cx.terminated.get() { - Br(after_cx, next_cx.llbb, DebugLoc::None); - } - next_cx -} - pub enum Lifetime { Start, End } -// If LLVM lifetime intrinsic support is enabled (i.e. optimizations -// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` -// and the intrinsic for `lt` and passes them to `emit`, which is in -// charge of generating code to call the passed intrinsic on whatever -// block of generated code is targetted for the intrinsic. -// -// If LLVM lifetime intrinsic support is disabled (i.e. optimizations -// off) or `ptr` is zero-sized, then no-op (does not call `emit`). -fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>, - ptr: ValueRef, - lt: Lifetime, - emit: F) - where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef) -{ - if ccx.sess().opts.optimize == config::OptLevel::No { - return; - } - - let _icx = push_ctxt(match lt { - Lifetime::Start => "lifetime_start", - Lifetime::End => "lifetime_end" - }); - - let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()); - if size == 0 { - return; - } - - let lifetime_intrinsic = ccx.get_intrinsic(match lt { - Lifetime::Start => "llvm.lifetime.start", - Lifetime::End => "llvm.lifetime.end" - }); - emit(ccx, size, lifetime_intrinsic) -} - impl Lifetime { + // If LLVM lifetime intrinsic support is enabled (i.e. optimizations + // on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + // and the intrinsic for `lt` and passes them to `emit`, which is in + // charge of generating code to call the passed intrinsic on whatever + // block of generated code is targetted for the intrinsic. + // + // If LLVM lifetime intrinsic support is disabled (i.e. optimizations + // off) or `ptr` is zero-sized, then no-op (does not call `emit`). pub fn call(self, b: &Builder, ptr: ValueRef) { - core_lifetime_emit(b.ccx, ptr, self, |ccx, size, lifetime_intrinsic| { - let ptr = b.pointercast(ptr, Type::i8p(ccx)); - b.call(lifetime_intrinsic, &[C_u64(ccx, size), ptr], None); - }); - } -} + if b.ccx.sess().opts.optimize == config::OptLevel::No { + return; + } -pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) { - if !bcx.unreachable.get() { - Lifetime::Start.call(&bcx.build(), ptr); - } -} + let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type()); + if size == 0 { + return; + } -pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) { - if !bcx.unreachable.get() { - Lifetime::End.call(&bcx.build(), ptr); - } -} + let lifetime_intrinsic = b.ccx.get_intrinsic(match self { + Lifetime::Start => "llvm.lifetime.start", + Lifetime::End => "llvm.lifetime.end" + }); -// Generates code for resumption of unwind at the end of a landing pad. -pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) { - if !bcx.sess().target.target.options.custom_unwind_resume { - Resume(bcx, lpval); - } else { - let exc_ptr = ExtractValue(bcx, lpval, 0); - bcx.fcx.eh_unwind_resume() - .call(bcx, DebugLoc::None, &[exc_ptr], None); + let ptr = b.pointercast(ptr, Type::i8p(b.ccx)); + b.call(lifetime_intrinsic, &[C_u64(b.ccx, size), ptr], None); } } -pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, +pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) { - let _icx = push_ctxt("call_memcpy"); let ccx = b.ccx; let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); @@ -752,11 +523,12 @@ pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } -pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) { - let _icx = push_ctxt("memcpy_ty"); - let ccx = bcx.ccx(); +pub fn memcpy_ty<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx> +) { + let ccx = bcx.ccx; - if type_is_zero_size(ccx, t) || bcx.unreachable.get() { + if type_is_zero_size(ccx, t) { return; } @@ -764,8 +536,8 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe let llty = type_of::type_of(ccx, t); let llsz = llsize_of(ccx, llty); let llalign = type_of::align_of(ccx, t); - call_memcpy(&B(bcx), dst, src, llsz, llalign as u32); - } else if common::type_is_fat_ptr(bcx.tcx(), t) { + call_memcpy(bcx, dst, src, llsz, llalign as u32); + } else if common::type_is_fat_ptr(bcx.ccx, t) { let (data, extra) = load_fat_ptr(bcx, src, t); store_fat_ptr(bcx, data, extra, dst, t); } else { @@ -773,234 +545,22 @@ pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRe } } -pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) { - if cx.unreachable.get() { - return; - } - let _icx = push_ctxt("init_zero_mem"); - let bcx = cx; - memfill(&B(bcx), llptr, t, 0); -} - -// Always use this function instead of storing a constant byte to the memory -// in question. e.g. if you store a zero constant, LLVM will drown in vreg -// allocation for large data structures, and the generated code will be -// awful. (A telltale sign of this is large quantities of -// `mov [byte ptr foo],0` in the generated code.) -fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) { - let _icx = push_ctxt("memfill"); - let ccx = b.ccx; - let llty = type_of::type_of(ccx, ty); - let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to()); - let llzeroval = C_u8(ccx, byte); - let size = machine::llsize_of(ccx, llty); - let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); - call_memset(b, llptr, llzeroval, size, align, false); -} - -pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>, +pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, fill_byte: ValueRef, size: ValueRef, align: ValueRef, - volatile: bool) { - let ccx = b.ccx; - let ptr_width = &ccx.sess().target.target.target_pointer_width[..]; + volatile: bool) -> ValueRef { + let ptr_width = &b.ccx.sess().target.target.target_pointer_width[..]; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key); - let volatile = C_bool(ccx, volatile); - b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); + let llintrinsicfn = b.ccx.get_intrinsic(&intrinsic_key); + let volatile = C_bool(b.ccx, volatile); + b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) } -pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ty: Ty<'tcx>, - name: &str) -> ValueRef { +pub fn alloc_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef { assert!(!ty.has_param_types()); - alloca(bcx, type_of::type_of(bcx.ccx(), ty), name) -} - -pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef { - let _icx = push_ctxt("alloca"); - if cx.unreachable.get() { - unsafe { - return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); - } - } - DebugLoc::None.apply(cx.fcx); - let result = Alloca(cx, ty, name); - debug!("alloca({:?}) = {:?}", name, result); - result -} - -impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { - /// Create a function context for the given function. - /// Beware that you must call `fcx.init` or `fcx.bind_args` - /// before doing anything with the returned function context. - pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>, - llfndecl: ValueRef, - fn_ty: FnType, - definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>, - block_arena: &'blk TypedArena>) - -> FunctionContext<'blk, 'tcx> { - let (param_substs, def_id) = match definition { - Some((instance, ..)) => { - common::validate_substs(instance.substs); - (instance.substs, Some(instance.def)) - } - None => (ccx.tcx().intern_substs(&[]), None) - }; - - let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id)); - - debug!("FunctionContext::new({})", - definition.map_or(String::new(), |d| d.0.to_string())); - - let no_debug = if let Some(id) = local_id { - ccx.tcx().map.attrs(id) - .iter().any(|item| item.check_name("no_debug")) - } else if let Some(def_id) = def_id { - ccx.sess().cstore.item_attrs(def_id) - .iter().any(|item| item.check_name("no_debug")) - } else { - false - }; - - let mir = def_id.map(|id| ccx.tcx().item_mir(id)); - - let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) = - (no_debug, definition, &mir) { - debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir) - } else { - debuginfo::empty_function_debug_context(ccx) - }; - - FunctionContext { - mir: mir, - llfn: llfndecl, - llretslotptr: Cell::new(None), - param_env: ccx.tcx().empty_parameter_environment(), - alloca_insert_pt: Cell::new(None), - landingpad_alloca: Cell::new(None), - fn_ty: fn_ty, - param_substs: param_substs, - span: None, - block_arena: block_arena, - lpad_arena: TypedArena::new(), - ccx: ccx, - debug_context: debug_context, - scopes: RefCell::new(Vec::new()), - } - } - - /// Performs setup on a newly created function, creating the entry - /// scope block and allocating space for the return pointer. - pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> { - let entry_bcx = self.new_block("entry-block"); - - // Use a dummy instruction as the insertion point for all allocas. - // This is later removed in FunctionContext::cleanup. - self.alloca_insert_pt.set(Some(unsafe { - Load(entry_bcx, C_null(Type::i8p(self.ccx))); - llvm::LLVMGetFirstInstruction(entry_bcx.llbb) - })); - - if !self.fn_ty.ret.is_ignore() && !skip_retptr { - // We normally allocate the llretslotptr, unless we - // have been instructed to skip it for immediate return - // values, or there is nothing to return at all. - - // We create an alloca to hold a pointer of type `ret.original_ty` - // which will hold the pointer to the right alloca which has the - // final ret value - let llty = self.fn_ty.ret.memory_ty(self.ccx); - // But if there are no nested returns, we skip the indirection - // and have a single retslot - let slot = if self.fn_ty.ret.is_indirect() { - get_param(self.llfn, 0) - } else { - AllocaFcx(self, llty, "sret_slot") - }; - - self.llretslotptr.set(Some(slot)); - } - - entry_bcx - } - - /// Ties up the llstaticallocas -> llloadenv -> lltop edges, - /// and builds the return block. - pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>, - ret_debug_loc: DebugLoc) { - let _icx = push_ctxt("FunctionContext::finish"); - - self.build_return_block(ret_cx, ret_debug_loc); - - DebugLoc::None.apply(self); - self.cleanup(); - } - - // Builds the return block for a function. - pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>, - ret_debug_location: DebugLoc) { - if self.llretslotptr.get().is_none() || - ret_cx.unreachable.get() || - self.fn_ty.ret.is_indirect() { - return RetVoid(ret_cx, ret_debug_location); - } - - let retslot = self.llretslotptr.get().unwrap(); - let retptr = Value(retslot); - let llty = self.fn_ty.ret.original_ty; - match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) { - // If there's only a single store to the ret slot, we can directly return - // the value that was stored and omit the store and the alloca. - // However, we only want to do this when there is no cast needed. - (Some(s), None) => { - let mut retval = s.get_operand(0).unwrap().get(); - s.erase_from_parent(); - - if retptr.has_no_uses() { - retptr.erase_from_parent(); - } - - if self.fn_ty.ret.is_indirect() { - Store(ret_cx, retval, get_param(self.llfn, 0)); - RetVoid(ret_cx, ret_debug_location) - } else { - if llty == Type::i1(self.ccx) { - retval = Trunc(ret_cx, retval, llty); - } - Ret(ret_cx, retval, ret_debug_location) - } - } - (_, cast_ty) if self.fn_ty.ret.is_indirect() => { - // Otherwise, copy the return value to the ret slot. - assert_eq!(cast_ty, None); - let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty); - let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); - call_memcpy(&B(ret_cx), get_param(self.llfn, 0), - retslot, llsz, llalign as u32); - RetVoid(ret_cx, ret_debug_location) - } - (_, Some(cast_ty)) => { - let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to())); - let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty); - unsafe { - llvm::LLVMSetAlignment(load, llalign); - } - Ret(ret_cx, load, ret_debug_location) - } - (_, None) => { - let retval = if llty == Type::i1(self.ccx) { - let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False); - Trunc(ret_cx, val, llty) - } else { - Load(ret_cx, retslot) - }; - Ret(ret_cx, retval, ret_debug_location) - } - } - } + bcx.fcx().alloca(type_of::type_of(bcx.ccx, ty), name) } pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) { @@ -1018,8 +578,6 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance // release builds. info!("trans_instance({})", instance); - let _icx = push_ctxt("trans_instance"); - let fn_ty = ccx.tcx().item_type(instance.def); let fn_ty = ccx.tcx().erase_regions(&fn_ty); let fn_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &fn_ty); @@ -1040,19 +598,9 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, - lldecl, - fn_ty, - Some((instance, &sig, abi)), - &arena); - - if fcx.mir.is_none() { - bug!("attempted translation of `{}` w/o MIR", instance); - } - - mir::trans_mir(&fcx); + let fcx = FunctionContext::new(ccx, lldecl); + let mir = ccx.tcx().item_mir(instance.def); + mir::trans_mir(&fcx, fn_ty, &mir, instance, &sig, abi); } pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, @@ -1069,34 +617,55 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig()); let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena); - let bcx = fcx.init(false); - - if !fcx.fn_ty.ret.is_ignore() { - let dest = fcx.llretslotptr.get().unwrap(); + let fcx = FunctionContext::new(ccx, llfndecl); + let bcx = fcx.get_entry_block(); + if !fn_ty.ret.is_ignore() { + // But if there are no nested returns, we skip the indirection + // and have a single retslot + let dest = if fn_ty.ret.is_indirect() { + get_param(fcx.llfn, 0) + } else { + // We create an alloca to hold a pointer of type `ret.original_ty` + // which will hold the pointer to the right alloca which has the + // final ret value + fcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot") + }; let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value - let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + let mut llarg_idx = fn_ty.ret.is_indirect() as usize; let mut arg_idx = 0; for (i, arg_ty) in sig.inputs().iter().enumerate() { - let lldestptr = adt::trans_field_ptr(bcx, sig.output(), dest_val, Disr::from(disr), i); - let arg = &fcx.fn_ty.args[arg_idx]; + let lldestptr = adt::trans_field_ptr(&bcx, sig.output(), dest_val, Disr::from(disr), i); + let arg = &fn_ty.args[arg_idx]; arg_idx += 1; - let b = &bcx.build(); - if common::type_is_fat_ptr(bcx.tcx(), arg_ty) { - let meta = &fcx.fn_ty.args[arg_idx]; + if common::type_is_fat_ptr(bcx.ccx, arg_ty) { + let meta = &fn_ty.args[arg_idx]; arg_idx += 1; - arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr)); - meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr)); + arg.store_fn_arg(&bcx, &mut llarg_idx, get_dataptr(&bcx, lldestptr)); + meta.store_fn_arg(&bcx, &mut llarg_idx, get_meta(&bcx, lldestptr)); } else { - arg.store_fn_arg(b, &mut llarg_idx, lldestptr); + arg.store_fn_arg(&bcx, &mut llarg_idx, lldestptr); } } - adt::trans_set_discr(bcx, sig.output(), dest, disr); - } + adt::trans_set_discr(&bcx, sig.output(), dest, disr); - fcx.finish(bcx, DebugLoc::None); + if fn_ty.ret.is_indirect() { + bcx.ret_void(); + return; + } + + if let Some(cast_ty) = fn_ty.ret.cast { + let load = bcx.load(bcx.pointercast(dest, cast_ty.ptr_to())); + let llalign = llalign_of_min(ccx, fn_ty.ret.ty); + unsafe { + llvm::LLVMSetAlignment(load, llalign); + } + bcx.ret(load) + } else { + bcx.ret(bcx.load(dest)) + } + } else { + bcx.ret_void(); + } } pub fn llvm_linkage_by_name(name: &str) -> Option { @@ -1168,9 +737,7 @@ pub fn maybe_create_entry_wrapper(ccx: &CrateContext) { let et = ccx.sess().entry_type.get().unwrap(); match et { - config::EntryMain => { - create_entry_fn(ccx, span, main_llfn, true); - } + config::EntryMain => create_entry_fn(ccx, span, main_llfn, true), config::EntryStart => create_entry_fn(ccx, span, main_llfn, false), config::EntryNone => {} // Do nothing. } @@ -1195,47 +762,27 @@ fn create_entry_fn(ccx: &CrateContext, attributes::set_frame_pointer_elimination(ccx, llfn); let llbb = unsafe { - llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _) + let name = CString::new("top").unwrap(); + llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, name.as_ptr()) }; - let bld = ccx.raw_builder(); - unsafe { - llvm::LLVMPositionBuilderAtEnd(bld, llbb); - - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx); - - let (start_fn, args) = if use_start_lang_item { - let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) { - Ok(id) => id, - Err(s) => ccx.sess().fatal(&s) - }; - let empty_substs = ccx.tcx().intern_substs(&[]); - let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); - let args = { - let opaque_rust_main = - llvm::LLVMBuildPointerCast(bld, - rust_main, - Type::i8p(ccx).to_ref(), - "rust_main\0".as_ptr() as *const _); - - vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)] - }; - (start_fn, args) - } else { - debug!("using user-defined start fn"); - let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]; + let bld = Builder::with_ccx(ccx); + bld.position_at_end(llbb); - (rust_main, args) - }; + debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx, &bld); - let result = llvm::LLVMRustBuildCall(bld, - start_fn, - args.as_ptr(), - args.len() as c_uint, - ptr::null_mut(), - noname()); + let (start_fn, args) = if use_start_lang_item { + let start_def_id = ccx.tcx().require_lang_item(StartFnLangItem); + let empty_substs = ccx.tcx().intern_substs(&[]); + let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx); + (start_fn, vec![bld.pointercast(rust_main, Type::i8p(ccx).ptr_to()), get_param(llfn, 0), + get_param(llfn, 1)]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)]) + }; - llvm::LLVMBuildRet(bld, result); - } + let result = bld.call(start_fn, &args, None); + bld.ret(result); } } diff --git a/src/librustc_trans/basic_block.rs b/src/librustc_trans/basic_block.rs deleted file mode 100644 index 60bd3fb8ef1b8db10ed2417a2a6db484d5e8e73a..0000000000000000000000000000000000000000 --- a/src/librustc_trans/basic_block.rs +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use llvm::BasicBlockRef; -use value::{Users, Value}; -use std::iter::{Filter, Map}; - -#[derive(Copy, Clone)] -pub struct BasicBlock(pub BasicBlockRef); - -pub type Preds = Map bool>, fn(Value) -> BasicBlock>; - -/// Wrapper for LLVM BasicBlockRef -impl BasicBlock { - pub fn get(&self) -> BasicBlockRef { - let BasicBlock(v) = *self; v - } - - pub fn as_value(self) -> Value { - unsafe { - Value(llvm::LLVMBasicBlockAsValue(self.get())) - } - } - - pub fn pred_iter(self) -> Preds { - fn is_a_terminator_inst(user: &Value) -> bool { user.is_a_terminator_inst() } - let is_a_terminator_inst: fn(&Value) -> bool = is_a_terminator_inst; - - fn get_parent(user: Value) -> BasicBlock { user.get_parent().unwrap() } - let get_parent: fn(Value) -> BasicBlock = get_parent; - - self.as_value().user_iter() - .filter(is_a_terminator_inst) - .map(get_parent) - } - - pub fn get_single_predecessor(self) -> Option { - let mut iter = self.pred_iter(); - match (iter.next(), iter.next()) { - (Some(first), None) => Some(first), - _ => None - } - } - - pub fn delete(self) { - unsafe { - llvm::LLVMDeleteBasicBlock(self.0); - } - } -} diff --git a/src/librustc_trans/build.rs b/src/librustc_trans/build.rs deleted file mode 100644 index 8cd47bd148d0cf597e5241a65bc6b0b511a7635f..0000000000000000000000000000000000000000 --- a/src/librustc_trans/build.rs +++ /dev/null @@ -1,1167 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(dead_code)] // FFI wrappers -#![allow(non_snake_case)] - -use llvm; -use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{Opcode, IntPredicate, RealPredicate}; -use llvm::{ValueRef, BasicBlockRef}; -use common::*; -use syntax_pos::Span; - -use builder::Builder; -use type_::Type; -use value::Value; -use debuginfo::DebugLoc; - -use libc::{c_uint, c_char}; - -pub fn terminate(cx: Block, _: &str) { - debug!("terminate({})", cx.to_str()); - cx.terminated.set(true); -} - -pub fn check_not_terminated(cx: Block) { - if cx.terminated.get() { - bug!("already terminated!"); - } -} - -pub fn B<'blk, 'tcx>(cx: Block<'blk, 'tcx>) -> Builder<'blk, 'tcx> { - let b = cx.fcx.ccx.builder(); - b.position_at_end(cx.llbb); - b -} - -// The difference between a block being unreachable and being terminated is -// somewhat obscure, and has to do with error checking. When a block is -// terminated, we're saying that trying to add any further statements in the -// block is an error. On the other hand, if something is unreachable, that -// means that the block was terminated in some way that we don't want to check -// for (panic/break/return statements, call to diverging functions, etc), and -// further instructions to the block should simply be ignored. - -pub fn RetVoid(cx: Block, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "RetVoid"); - debug_loc.apply(cx.fcx); - B(cx).ret_void(); -} - -pub fn Ret(cx: Block, v: ValueRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "Ret"); - debug_loc.apply(cx.fcx); - B(cx).ret(v); -} - -pub fn AggregateRet(cx: Block, - ret_vals: &[ValueRef], - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "AggregateRet"); - debug_loc.apply(cx.fcx); - B(cx).aggregate_ret(ret_vals); -} - -pub fn Br(cx: Block, dest: BasicBlockRef, debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "Br"); - debug_loc.apply(cx.fcx); - B(cx).br(dest); -} - -pub fn CondBr(cx: Block, - if_: ValueRef, - then: BasicBlockRef, - else_: BasicBlockRef, - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "CondBr"); - debug_loc.apply(cx.fcx); - B(cx).cond_br(if_, then, else_); -} - -pub fn Switch(cx: Block, v: ValueRef, else_: BasicBlockRef, num_cases: usize) - -> ValueRef { - if cx.unreachable.get() { return _Undef(v); } - check_not_terminated(cx); - terminate(cx, "Switch"); - B(cx).switch(v, else_, num_cases) -} - -pub fn AddCase(s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { - unsafe { - if llvm::LLVMIsUndef(s) == llvm::True { return; } - llvm::LLVMAddCase(s, on_val, dest); - } -} - -pub fn IndirectBr(cx: Block, - addr: ValueRef, - num_dests: usize, - debug_loc: DebugLoc) { - if cx.unreachable.get() { - return; - } - check_not_terminated(cx); - terminate(cx, "IndirectBr"); - debug_loc.apply(cx.fcx); - B(cx).indirect_br(addr, num_dests); -} - -pub fn Invoke(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - then: BasicBlockRef, - catch: BasicBlockRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return C_null(Type::i8(cx.ccx())); - } - check_not_terminated(cx); - terminate(cx, "Invoke"); - debug!("Invoke({:?} with arguments ({}))", - Value(fn_), - args.iter().map(|a| { - format!("{:?}", Value(*a)) - }).collect::>().join(", ")); - debug_loc.apply(cx.fcx); - let bundle = cx.lpad().and_then(|b| b.bundle()); - B(cx).invoke(fn_, args, then, catch, bundle) -} - -pub fn Unreachable(cx: Block) { - if cx.unreachable.get() { - return - } - cx.unreachable.set(true); - if !cx.terminated.get() { - B(cx).unreachable(); - } -} - -pub fn _Undef(val: ValueRef) -> ValueRef { - unsafe { - return llvm::LLVMGetUndef(val_ty(val).to_ref()); - } -} - -/* Arithmetic */ -pub fn Add(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).add(lhs, rhs) -} - -pub fn NSWAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nswadd(lhs, rhs) -} - -pub fn NUWAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nuwadd(lhs, rhs) -} - -pub fn FAdd(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fadd(lhs, rhs) -} - -pub fn FAddFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fadd_fast(lhs, rhs) -} - -pub fn Sub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).sub(lhs, rhs) -} - -pub fn NSWSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nswsub(lhs, rhs) -} - -pub fn NUWSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nuwsub(lhs, rhs) -} - -pub fn FSub(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fsub(lhs, rhs) -} - -pub fn FSubFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fsub_fast(lhs, rhs) -} - -pub fn Mul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).mul(lhs, rhs) -} - -pub fn NSWMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nswmul(lhs, rhs) -} - -pub fn NUWMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).nuwmul(lhs, rhs) -} - -pub fn FMul(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fmul(lhs, rhs) -} - -pub fn FMulFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fmul_fast(lhs, rhs) -} - -pub fn UDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).udiv(lhs, rhs) -} - -pub fn SDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).sdiv(lhs, rhs) -} - -pub fn ExactSDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).exactsdiv(lhs, rhs) -} - -pub fn FDiv(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fdiv(lhs, rhs) -} - -pub fn FDivFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).fdiv_fast(lhs, rhs) -} - -pub fn URem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).urem(lhs, rhs) -} - -pub fn SRem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).srem(lhs, rhs) -} - -pub fn FRem(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).frem(lhs, rhs) -} - -pub fn FRemFast(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).frem_fast(lhs, rhs) -} - -pub fn Shl(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).shl(lhs, rhs) -} - -pub fn LShr(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).lshr(lhs, rhs) -} - -pub fn AShr(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).ashr(lhs, rhs) -} - -pub fn And(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).and(lhs, rhs) -} - -pub fn Or(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).or(lhs, rhs) -} - -pub fn Xor(cx: Block, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).xor(lhs, rhs) -} - -pub fn BinOp(cx: Block, - op: Opcode, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _Undef(lhs); - } - debug_loc.apply(cx.fcx); - B(cx).binop(op, lhs, rhs) -} - -pub fn Neg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).neg(v) -} - -pub fn NSWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).nswneg(v) -} - -pub fn NUWNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).nuwneg(v) -} -pub fn FNeg(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).fneg(v) -} - -pub fn Not(cx: Block, v: ValueRef, debug_loc: DebugLoc) -> ValueRef { - if cx.unreachable.get() { - return _Undef(v); - } - debug_loc.apply(cx.fcx); - B(cx).not(v) -} - -pub fn Alloca(cx: Block, ty: Type, name: &str) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.ptr_to().to_ref()); } - AllocaFcx(cx.fcx, ty, name) - } -} - -pub fn AllocaFcx(fcx: &FunctionContext, ty: Type, name: &str) -> ValueRef { - let b = fcx.ccx.builder(); - b.position_before(fcx.alloca_insert_pt.get().unwrap()); - DebugLoc::None.apply(fcx); - b.alloca(ty, name) -} - -pub fn Free(cx: Block, pointer_val: ValueRef) { - if cx.unreachable.get() { return; } - B(cx).free(pointer_val) -} - -pub fn Load(cx: Block, pointer_val: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { - let ty = val_ty(pointer_val); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - return llvm::LLVMGetUndef(eltty.to_ref()); - } - B(cx).load(pointer_val) - } -} - -pub fn VolatileLoad(cx: Block, pointer_val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).volatile_load(pointer_val) - } -} - -pub fn AtomicLoad(cx: Block, pointer_val: ValueRef, order: AtomicOrdering) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { - return llvm::LLVMGetUndef(ccx.int_type().to_ref()); - } - B(cx).atomic_load(pointer_val, order) - } -} - - -pub fn LoadRangeAssert(cx: Block, pointer_val: ValueRef, lo: u64, - hi: u64, signed: llvm::Bool) -> ValueRef { - if cx.unreachable.get() { - let ccx = cx.fcx.ccx; - let ty = val_ty(pointer_val); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - unsafe { - llvm::LLVMGetUndef(eltty.to_ref()) - } - } else { - B(cx).load_range_assert(pointer_val, lo, hi, signed) - } -} - -pub fn LoadNonNull(cx: Block, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { - let ccx = cx.fcx.ccx; - let ty = val_ty(ptr); - let eltty = if ty.kind() == llvm::Array { - ty.element_type() - } else { - ccx.int_type() - }; - unsafe { - llvm::LLVMGetUndef(eltty.to_ref()) - } - } else { - B(cx).load_nonnull(ptr) - } -} - -pub fn Store(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } - B(cx).store(val, ptr) -} - -pub fn VolatileStore(cx: Block, val: ValueRef, ptr: ValueRef) -> ValueRef { - if cx.unreachable.get() { return C_nil(cx.ccx()); } - B(cx).volatile_store(val, ptr) -} - -pub fn AtomicStore(cx: Block, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { - if cx.unreachable.get() { return; } - B(cx).atomic_store(val, ptr, order) -} - -pub fn GEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).gep(pointer, indices) - } -} - -// Simple wrapper around GEP that takes an array of ints and wraps them -// in C_i32() -#[inline] -pub fn GEPi(cx: Block, base: ValueRef, ixs: &[usize]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).gepi(base, ixs) - } -} - -pub fn InBoundsGEP(cx: Block, pointer: ValueRef, indices: &[ValueRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).inbounds_gep(pointer, indices) - } -} - -pub fn StructGEP(cx: Block, pointer: ValueRef, idx: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).ptr_to().to_ref()); - } - B(cx).struct_gep(pointer, idx) - } -} - -pub fn GlobalString(cx: Block, _str: *const c_char) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); - } - B(cx).global_string(_str) - } -} - -pub fn GlobalStringPtr(cx: Block, _str: *const c_char) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i8p(cx.ccx()).to_ref()); - } - B(cx).global_string_ptr(_str) - } -} - -/* Casts */ -pub fn Trunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).trunc(val, dest_ty) - } -} - -pub fn ZExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).zext(val, dest_ty) - } -} - -pub fn SExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sext(val, dest_ty) - } -} - -pub fn FPToUI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptoui(val, dest_ty) - } -} - -pub fn FPToSI(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptosi(val, dest_ty) - } -} - -pub fn UIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).uitofp(val, dest_ty) - } -} - -pub fn SIToFP(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sitofp(val, dest_ty) - } -} - -pub fn FPTrunc(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fptrunc(val, dest_ty) - } -} - -pub fn FPExt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fpext(val, dest_ty) - } -} - -pub fn PtrToInt(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).ptrtoint(val, dest_ty) - } -} - -pub fn IntToPtr(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).inttoptr(val, dest_ty) - } -} - -pub fn BitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).bitcast(val, dest_ty) - } -} - -pub fn ZExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).zext_or_bitcast(val, dest_ty) - } -} - -pub fn SExtOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).sext_or_bitcast(val, dest_ty) - } -} - -pub fn TruncOrBitCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).trunc_or_bitcast(val, dest_ty) - } -} - -pub fn Cast(cx: Block, op: Opcode, val: ValueRef, dest_ty: Type, - _: *const u8) - -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).cast(op, val, dest_ty) - } -} - -pub fn PointerCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).pointercast(val, dest_ty) - } -} - -pub fn IntCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).intcast(val, dest_ty) - } -} - -pub fn FPCast(cx: Block, val: ValueRef, dest_ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(dest_ty.to_ref()); } - B(cx).fpcast(val, dest_ty) - } -} - - -/* Comparisons */ -pub fn ICmp(cx: Block, - op: IntPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - debug_loc.apply(cx.fcx); - B(cx).icmp(op, lhs, rhs) - } -} - -pub fn FCmp(cx: Block, - op: RealPredicate, - lhs: ValueRef, - rhs: ValueRef, - debug_loc: DebugLoc) - -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - debug_loc.apply(cx.fcx); - B(cx).fcmp(op, lhs, rhs) - } -} - -/* Miscellaneous instructions */ -pub fn EmptyPhi(cx: Block, ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).empty_phi(ty) - } -} - -pub fn Phi(cx: Block, ty: Type, vals: &[ValueRef], - bbs: &[BasicBlockRef]) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).phi(ty, vals, bbs) - } -} - -pub fn AddIncomingToPhi(phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { - unsafe { - if llvm::LLVMIsUndef(phi) == llvm::True { return; } - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); - } -} - -pub fn _UndefReturn(cx: Block, fn_: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - let ty = val_ty(fn_); - let retty = if ty.kind() == llvm::Function { - ty.return_type() - } else { - ccx.int_type() - }; - B(cx).count_insn("ret_undef"); - llvm::LLVMGetUndef(retty.to_ref()) - } -} - -pub fn add_span_comment(cx: Block, sp: Span, text: &str) { - B(cx).add_span_comment(sp, text) -} - -pub fn add_comment(cx: Block, text: &str) { - B(cx).add_comment(text) -} - -pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char, - inputs: &[ValueRef], output: Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> ValueRef { - B(cx).inline_asm_call(asm, cons, inputs, output, volatile, alignstack, dia) -} - -pub fn Call(cx: Block, - fn_: ValueRef, - args: &[ValueRef], - debug_loc: DebugLoc) - -> ValueRef { - if cx.unreachable.get() { - return _UndefReturn(cx, fn_); - } - debug_loc.apply(cx.fcx); - let bundle = cx.lpad.get().and_then(|b| b.bundle()); - B(cx).call(fn_, args, bundle) -} - -pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { - if cx.unreachable.get() { return; } - B(cx).atomic_fence(order, scope) -} - -pub fn Select(cx: Block, if_: ValueRef, then: ValueRef, else_: ValueRef) -> ValueRef { - if cx.unreachable.get() { return _Undef(then); } - B(cx).select(if_, then, else_) -} - -pub fn VAArg(cx: Block, list: ValueRef, ty: Type) -> ValueRef { - unsafe { - if cx.unreachable.get() { return llvm::LLVMGetUndef(ty.to_ref()); } - B(cx).va_arg(list, ty) - } -} - -pub fn ExtractElement(cx: Block, vec_val: ValueRef, index: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).extract_element(vec_val, index) - } -} - -pub fn InsertElement(cx: Block, vec_val: ValueRef, elt_val: ValueRef, - index: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).insert_element(vec_val, elt_val, index) - } -} - -pub fn ShuffleVector(cx: Block, v1: ValueRef, v2: ValueRef, - mask: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).shuffle_vector(v1, v2, mask) - } -} - -pub fn VectorSplat(cx: Block, num_elts: usize, elt_val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).vector_splat(num_elts, elt_val) - } -} - -pub fn ExtractValue(cx: Block, agg_val: ValueRef, index: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).extract_value(agg_val, index) - } -} - -pub fn InsertValue(cx: Block, agg_val: ValueRef, elt_val: ValueRef, index: usize) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::nil(cx.ccx()).to_ref()); - } - B(cx).insert_value(agg_val, elt_val, index) - } -} - -pub fn IsNull(cx: Block, val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - B(cx).is_null(val) - } -} - -pub fn IsNotNull(cx: Block, val: ValueRef) -> ValueRef { - unsafe { - if cx.unreachable.get() { - return llvm::LLVMGetUndef(Type::i1(cx.ccx()).to_ref()); - } - B(cx).is_not_null(val) - } -} - -pub fn PtrDiff(cx: Block, lhs: ValueRef, rhs: ValueRef) -> ValueRef { - unsafe { - let ccx = cx.fcx.ccx; - if cx.unreachable.get() { return llvm::LLVMGetUndef(ccx.int_type().to_ref()); } - B(cx).ptrdiff(lhs, rhs) - } -} - -pub fn Trap(cx: Block) { - if cx.unreachable.get() { return; } - B(cx).trap(); -} - -pub fn LandingPad(cx: Block, ty: Type, pers_fn: ValueRef, - num_clauses: usize) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).landing_pad(ty, pers_fn, num_clauses, cx.fcx.llfn) -} - -pub fn AddClause(cx: Block, landing_pad: ValueRef, clause: ValueRef) { - B(cx).add_clause(landing_pad, clause) -} - -pub fn SetCleanup(cx: Block, landing_pad: ValueRef) { - B(cx).set_cleanup(landing_pad) -} - -pub fn SetPersonalityFn(cx: Block, f: ValueRef) { - B(cx).set_personality_fn(f) -} - -pub fn Resume(cx: Block, exn: ValueRef) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "Resume"); - B(cx).resume(exn) -} - -// Atomic Operations -pub fn AtomicCmpXchg(cx: Block, dst: ValueRef, - cmp: ValueRef, src: ValueRef, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: llvm::Bool) -> ValueRef { - B(cx).atomic_cmpxchg(dst, cmp, src, order, failure_order, weak) -} -pub fn AtomicRMW(cx: Block, op: AtomicRmwBinOp, - dst: ValueRef, src: ValueRef, - order: AtomicOrdering) -> ValueRef { - B(cx).atomic_rmw(op, dst, src, order) -} - -pub fn CleanupPad(cx: Block, - parent: Option, - args: &[ValueRef]) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).cleanup_pad(parent, args) -} - -pub fn CleanupRet(cx: Block, - cleanup: ValueRef, - unwind: Option) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "CleanupRet"); - B(cx).cleanup_ret(cleanup, unwind) -} - -pub fn CatchPad(cx: Block, - parent: ValueRef, - args: &[ValueRef]) -> ValueRef { - check_not_terminated(cx); - assert!(!cx.unreachable.get()); - B(cx).catch_pad(parent, args) -} - -pub fn CatchRet(cx: Block, pad: ValueRef, unwind: BasicBlockRef) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "CatchRet"); - B(cx).catch_ret(pad, unwind) -} - -pub fn CatchSwitch(cx: Block, - parent: Option, - unwind: Option, - num_handlers: usize) -> ValueRef { - check_not_terminated(cx); - terminate(cx, "CatchSwitch"); - B(cx).catch_switch(parent, unwind, num_handlers) -} - -pub fn AddHandler(cx: Block, catch_switch: ValueRef, handler: BasicBlockRef) { - B(cx).add_handler(catch_switch, handler) -} diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 0480bb82a998e902396d78f98b282d1f0ec00875..136d1aad31a03965077995779028848e8b3ae033 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -14,12 +14,10 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; -use base; use common::*; use machine::llalign_of_pref; use type_::Type; use value::Value; -use util::nodemap::FxHashMap; use libc::{c_uint, c_char}; use std::borrow::Cow; @@ -32,65 +30,40 @@ pub struct Builder<'a, 'tcx: 'a> { pub ccx: &'a CrateContext<'a, 'tcx>, } +impl<'a, 'tcx> Drop for Builder<'a, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMDisposeBuilder(self.llbuilder); + } + } +} + // This is a really awful way to get a zero-length c-string, but better (and a // lot more efficient) than doing str::as_c_str("", ...) every time. -pub fn noname() -> *const c_char { +fn noname() -> *const c_char { static CNULL: c_char = 0; &CNULL } impl<'a, 'tcx> Builder<'a, 'tcx> { - pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> { + pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self { + // Create a fresh builder from the crate context. + let llbuilder = unsafe { + llvm::LLVMCreateBuilderInContext(ccx.llcx()) + }; Builder { - llbuilder: ccx.raw_builder(), + llbuilder: llbuilder, ccx: ccx, } } - pub fn count_insn(&self, category: &str) { + fn count_insn(&self, category: &str) { if self.ccx.sess().trans_stats() { - self.ccx.stats().n_llvm_insns.set(self.ccx - .stats() - .n_llvm_insns - .get() + 1); + self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1); } - self.ccx.count_llvm_insn(); if self.ccx.sess().count_llvm_insns() { - base::with_insn_ctxt(|v| { - let mut h = self.ccx.stats().llvm_insns.borrow_mut(); - - // Build version of path with cycles removed. - - // Pass 1: scan table mapping str -> rightmost pos. - let mut mm = FxHashMap(); - let len = v.len(); - let mut i = 0; - while i < len { - mm.insert(v[i], i); - i += 1; - } - - // Pass 2: concat strings for each elt, skipping - // forwards over any cycles by advancing to rightmost - // occurrence of each element in path. - let mut s = String::from("."); - i = 0; - while i < len { - i = mm[v[i]]; - s.push('/'); - s.push_str(v[i]); - i += 1; - } - - s.push('/'); - s.push_str(category); - - let n = match h.get(&s) { - Some(&n) => n, - _ => 0 - }; - h.insert(s, n+1); - }) + let mut h = self.ccx.stats().llvm_insns.borrow_mut(); + *h.entry(category.to_string()).or_insert(0) += 1; } } @@ -462,7 +435,7 @@ pub fn not(&self, v: ValueRef) -> ValueRef { } } - pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef { self.count_insn("alloca"); unsafe { if name.is_empty() { @@ -1103,6 +1076,20 @@ pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { } } + pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { + unsafe { + if llvm::LLVMIsUndef(s) == llvm::True { return; } + llvm::LLVMAddCase(s, on_val, dest) + } + } + + pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { + unsafe { + if llvm::LLVMIsUndef(phi) == llvm::True { return; } + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } + } + /// Returns the ptr value that should be used for storing `val`. fn check_store<'b>(&self, val: ValueRef, diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 93d43f7d96116c5095fddd65f18e11d6ba92047d..85b26074bae6d4358758eaea18d7bd0f3a5ba427 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_upper_case_globals)] - use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; use abi::{self, align_up_to, FnType, ArgType}; use context::CrateContext; diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index d7e9f1372e06d1af5a206f7243c1a5c84cc98345..ac832b6f746fd7262a037623af2e99e97800da46 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -16,7 +16,6 @@ pub use self::CalleeData::*; -use arena::TypedArena; use llvm::{self, ValueRef, get_params}; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; @@ -25,10 +24,10 @@ use attributes; use base; use base::*; -use build::*; -use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext}; +use common::{ + self, CrateContext, FunctionContext, SharedCrateContext +}; use consts; -use debuginfo::DebugLoc; use declare; use value::Value; use meth; @@ -71,25 +70,8 @@ pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> { } } - /// Trait or impl method call. - pub fn method_call<'blk>(bcx: Block<'blk, 'tcx>, - method_call: ty::MethodCall) - -> Callee<'tcx> { - let method = bcx.tcx().tables().method_map[&method_call]; - Callee::method(bcx, method) - } - - /// Trait or impl method. - pub fn method<'blk>(bcx: Block<'blk, 'tcx>, - method: ty::MethodCallee<'tcx>) -> Callee<'tcx> { - let substs = bcx.fcx.monomorphize(&method.substs); - Callee::def(bcx.ccx(), method.def_id, substs) - } - /// Function or method definition. - pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>) + pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Callee<'tcx> { let tcx = ccx.tcx(); @@ -196,25 +178,6 @@ pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, fn_ty } - /// This behemoth of a function translates function calls. Unfortunately, in - /// order to generate more efficient LLVM output at -O0, it has quite a complex - /// signature (refactoring this into two functions seems like a good idea). - /// - /// In particular, for lang items, it is invoked with a dest of None, and in - /// that case the return value contains the result of the fn. The lang item must - /// not return a structural type or else all heck breaks loose. - /// - /// For non-lang items, `dest` is always Some, and hence the result is written - /// into memory somewhere. Nonetheless we return the actual return value of the - /// function. - pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc, - args: &[ValueRef], - dest: Option) - -> Result<'blk, 'tcx> { - trans_call_inner(bcx, debug_loc, self, args, dest) - } - /// Turn the callee into a function pointer. pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { match self.data { @@ -267,8 +230,6 @@ fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, // then adapt the self type let llfn_closure_kind = ccx.tcx().closure_kind(def_id); - let _icx = push_ctxt("trans_closure_adapter_shim"); - debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \ trait_closure_kind={:?}, llfn={:?})", llfn_closure_kind, trait_closure_kind, Value(llfn)); @@ -367,23 +328,28 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty); attributes::set_frame_pointer_elimination(ccx, lloncefn); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false); + let orig_fn_ty = fn_ty; + let fcx = FunctionContext::new(ccx, lloncefn); + let mut bcx = fcx.get_entry_block(); + let callee = Callee { + data: Fn(llreffn), + ty: llref_fn_ty + }; // the first argument (`self`) will be the (by value) closure env. let mut llargs = get_params(fcx.llfn); - let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize; - let env_arg = &fcx.fn_ty.args[0]; + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); + let self_idx = fn_ty.ret.is_indirect() as usize; + let env_arg = &orig_fn_ty.args[0]; let llenv = if env_arg.is_indirect() { llargs[self_idx] } else { - let scratch = alloc_ty(bcx, closure_ty, "self"); + let scratch = alloc_ty(&bcx, closure_ty, "self"); let mut llarg_idx = self_idx; - env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch); + env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch); scratch }; @@ -391,33 +357,37 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>( // Adjust llargs such that llargs[self_idx..] has the call arguments. // For zero-sized closures that means sneaking in a new argument. if env_arg.is_ignore() { - if self_idx > 0 { - self_idx -= 1; - llargs[self_idx] = llenv; - } else { - llargs.insert(0, llenv); - } + llargs.insert(self_idx, llenv); } else { llargs[self_idx] = llenv; } - let dest = fcx.llretslotptr.get(); - - let callee = Callee { - data: Fn(llreffn), - ty: llref_fn_ty - }; - // Call the by-ref closure body with `self` in a cleanup scope, // to drop `self` when the body returns, or in case it unwinds. - let self_scope = fcx.push_custom_cleanup_scope(); - fcx.schedule_drop_mem(self_scope, llenv, closure_ty); - - bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx; + let self_scope = fcx.schedule_drop_mem(llenv, closure_ty); + + let llfn = callee.reify(bcx.ccx); + let llret; + if let Some(landing_pad) = self_scope.landing_pad { + let normal_bcx = bcx.fcx().build_new_block("normal-return"); + llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None); + bcx = normal_bcx; + } else { + llret = bcx.call(llfn, &llargs[..], None); + } + fn_ty.apply_attrs_callsite(llret); - fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); + if fn_ret.0.is_never() { + bcx.unreachable(); + } else { + self_scope.trans(&bcx); - fcx.finish(bcx, DebugLoc::None); + if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } + } ccx.instances().borrow_mut().insert(method_instance, lloncefn); @@ -443,7 +413,6 @@ fn trans_fn_pointer_shim<'a, 'tcx>( bare_fn_ty: Ty<'tcx>) -> ValueRef { - let _icx = push_ctxt("trans_fn_pointer_shim"); let tcx = ccx.tcx(); // Normalize the type for better caching. @@ -519,32 +488,39 @@ fn trans_fn_pointer_shim<'a, 'tcx>( let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty); attributes::set_frame_pointer_elimination(ccx, llfn); // - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, llfn); + let bcx = fcx.get_entry_block(); - let llargs = get_params(fcx.llfn); + let mut llargs = get_params(fcx.llfn); - let self_idx = fcx.fn_ty.ret.is_indirect() as usize; + let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize); let llfnpointer = llfnpointer.unwrap_or_else(|| { // the first argument (`self`) will be ptr to the fn pointer if is_by_ref { - Load(bcx, llargs[self_idx]) + bcx.load(self_arg) } else { - llargs[self_idx] + self_arg } }); - let dest = fcx.llretslotptr.get(); - let callee = Callee { data: Fn(llfnpointer), ty: bare_fn_ty }; - bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx; + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); + let llret = bcx.call(llfnpointer, &llargs, None); + fn_ty.apply_attrs_callsite(llret); - fcx.finish(bcx, DebugLoc::None); + if fn_ret.0.is_never() { + bcx.unreachable(); + } else { + if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } + } ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn); @@ -649,87 +625,3 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, (llfn, fn_ty) } - -// ______________________________________________________________________ -// Translating calls - -fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc, - callee: Callee<'tcx>, - args: &[ValueRef], - opt_llretslot: Option) - -> Result<'blk, 'tcx> { - // Introduce a temporary cleanup scope that will contain cleanups - // for the arguments while they are being evaluated. The purpose - // this cleanup is to ensure that, should a panic occur while - // evaluating argument N, the values for arguments 0...N-1 are all - // cleaned up. If no panic occurs, the values are handed off to - // the callee, and hence none of the cleanups in this temporary - // scope will ever execute. - let fcx = bcx.fcx; - let ccx = fcx.ccx; - - let fn_ret = callee.ty.fn_ret(); - let fn_ty = callee.direct_fn_type(ccx, &[]); - - let mut callee = match callee.data { - NamedTupleConstructor(_) | Intrinsic => { - bug!("{:?} calls should not go through Callee::call", callee); - } - f => f - }; - - // If there no destination, return must be direct, with no cast. - if opt_llretslot.is_none() { - assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none()); - } - - let mut llargs = Vec::new(); - - if fn_ty.ret.is_indirect() { - let mut llretslot = opt_llretslot.unwrap(); - if let Some(ty) = fn_ty.ret.cast { - llretslot = PointerCast(bcx, llretslot, ty.ptr_to()); - } - llargs.push(llretslot); - } - - match callee { - Virtual(idx) => { - llargs.push(args[0]); - - let fn_ptr = meth::get_virtual_method(bcx, args[1], idx); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); - callee = Fn(PointerCast(bcx, fn_ptr, llty)); - llargs.extend_from_slice(&args[2..]); - } - _ => llargs.extend_from_slice(args) - } - - let llfn = match callee { - Fn(f) => f, - _ => bug!("expected fn pointer callee, found {:?}", callee) - }; - - let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc); - if !bcx.unreachable.get() { - fn_ty.apply_attrs_callsite(llret); - - // If the function we just called does not use an outpointer, - // store the result into the rust outpointer. Cast the outpointer - // type to match because some ABIs will use a different type than - // the Rust type. e.g., a {u32,u32} struct could be returned as - // u64. - if !fn_ty.ret.is_indirect() { - if let Some(llretslot) = opt_llretslot { - fn_ty.ret.store(&bcx.build(), llret, llretslot); - } - } - } - - if fn_ret.0.is_never() { - Unreachable(bcx); - } - - Result::new(bcx, llret) -} diff --git a/src/librustc_trans/cleanup.rs b/src/librustc_trans/cleanup.rs index b9f24eba9dc1e31b120d759d0102563c2b063be1..add820748acfcd5325107a8292a73e5d9b3671d2 100644 --- a/src/librustc_trans/cleanup.rs +++ b/src/librustc_trans/cleanup.rs @@ -11,216 +11,100 @@ //! ## The Cleanup module //! //! The cleanup module tracks what values need to be cleaned up as scopes -//! are exited, either via panic or just normal control flow. The basic -//! idea is that the function context maintains a stack of cleanup scopes -//! that are pushed/popped as we traverse the AST tree. There is typically -//! at least one cleanup scope per AST node; some AST nodes may introduce -//! additional temporary scopes. +//! are exited, either via panic or just normal control flow. //! //! Cleanup items can be scheduled into any of the scopes on the stack. -//! Typically, when a scope is popped, we will also generate the code for -//! each of its cleanups at that time. This corresponds to a normal exit -//! from a block (for example, an expression completing evaluation -//! successfully without panic). However, it is also possible to pop a -//! block *without* executing its cleanups; this is typically used to -//! guard intermediate values that must be cleaned up on panic, but not -//! if everything goes right. See the section on custom scopes below for -//! more details. -//! -//! Cleanup scopes come in three kinds: -//! -//! - **AST scopes:** each AST node in a function body has a corresponding -//! AST scope. We push the AST scope when we start generate code for an AST -//! node and pop it once the AST node has been fully generated. -//! - **Loop scopes:** loops have an additional cleanup scope. Cleanups are -//! never scheduled into loop scopes; instead, they are used to record the -//! basic blocks that we should branch to when a `continue` or `break` statement -//! is encountered. -//! - **Custom scopes:** custom scopes are typically used to ensure cleanup -//! of intermediate values. -//! -//! ### When to schedule cleanup -//! -//! Although the cleanup system is intended to *feel* fairly declarative, -//! it's still important to time calls to `schedule_clean()` correctly. -//! Basically, you should not schedule cleanup for memory until it has -//! been initialized, because if an unwind should occur before the memory -//! is fully initialized, then the cleanup will run and try to free or -//! drop uninitialized memory. If the initialization itself produces -//! byproducts that need to be freed, then you should use temporary custom -//! scopes to ensure that those byproducts will get freed on unwind. For -//! example, an expression like `box foo()` will first allocate a box in the -//! heap and then call `foo()` -- if `foo()` should panic, this box needs -//! to be *shallowly* freed. -//! -//! ### Long-distance jumps -//! -//! In addition to popping a scope, which corresponds to normal control -//! flow exiting the scope, we may also *jump out* of a scope into some -//! earlier scope on the stack. This can occur in response to a `return`, -//! `break`, or `continue` statement, but also in response to panic. In -//! any of these cases, we will generate a series of cleanup blocks for -//! each of the scopes that is exited. So, if the stack contains scopes A -//! ... Z, and we break out of a loop whose corresponding cleanup scope is -//! X, we would generate cleanup blocks for the cleanups in X, Y, and Z. -//! After cleanup is done we would branch to the exit point for scope X. -//! But if panic should occur, we would generate cleanups for all the -//! scopes from A to Z and then resume the unwind process afterwards. -//! -//! To avoid generating tons of code, we cache the cleanup blocks that we -//! create for breaks, returns, unwinds, and other jumps. Whenever a new -//! cleanup is scheduled, though, we must clear these cached blocks. A -//! possible improvement would be to keep the cached blocks but simply -//! generate a new block which performs the additional cleanup and then -//! branches to the existing cached blocks. -//! -//! ### AST and loop cleanup scopes -//! -//! AST cleanup scopes are pushed when we begin and end processing an AST -//! node. They are used to house cleanups related to rvalue temporary that -//! get referenced (e.g., due to an expression like `&Foo()`). Whenever an -//! AST scope is popped, we always trans all the cleanups, adding the cleanup -//! code after the postdominator of the AST node. -//! -//! AST nodes that represent breakable loops also push a loop scope; the -//! loop scope never has any actual cleanups, it's just used to point to -//! the basic blocks where control should flow after a "continue" or -//! "break" statement. Popping a loop scope never generates code. -//! -//! ### Custom cleanup scopes -//! -//! Custom cleanup scopes are used for a variety of purposes. The most -//! common though is to handle temporary byproducts, where cleanup only -//! needs to occur on panic. The general strategy is to push a custom -//! cleanup scope, schedule *shallow* cleanups into the custom scope, and -//! then pop the custom scope (without transing the cleanups) when -//! execution succeeds normally. This way the cleanups are only trans'd on -//! unwind, and only up until the point where execution succeeded, at -//! which time the complete value should be stored in an lvalue or some -//! other place where normal cleanup applies. -//! -//! To spell it out, here is an example. Imagine an expression `box expr`. -//! We would basically: -//! -//! 1. Push a custom cleanup scope C. -//! 2. Allocate the box. -//! 3. Schedule a shallow free in the scope C. -//! 4. Trans `expr` into the box. -//! 5. Pop the scope C. -//! 6. Return the box as an rvalue. -//! -//! This way, if a panic occurs while transing `expr`, the custom -//! cleanup scope C is pushed and hence the box will be freed. The trans -//! code for `expr` itself is responsible for freeing any other byproducts -//! that may be in play. - -pub use self::EarlyExitLabel::*; +//! Typically, when a scope is finished, we generate the cleanup code. This +//! corresponds to a normal exit from a block (for example, an expression +//! completing evaluation successfully without panic). use llvm::{BasicBlockRef, ValueRef}; use base; -use build; -use common; -use common::{Block, FunctionContext, LandingPad}; -use debuginfo::{DebugLoc}; +use common::{BlockAndBuilder, FunctionContext, Funclet}; use glue; use type_::Type; use value::Value; use rustc::ty::Ty; pub struct CleanupScope<'tcx> { - // Cleanups to run upon scope exit. - cleanups: Vec>, - - // The debug location any drop calls generated for this scope will be - // associated with. - debug_loc: DebugLoc, + // Cleanup to run upon scope exit. + cleanup: Option>, - cached_early_exits: Vec, - cached_landing_pad: Option, + // Computed on creation if compiling with landing pads (!sess.no_landing_pads) + pub landing_pad: Option, } -#[derive(Copy, Clone, Debug)] -pub struct CustomScopeIndex { - index: usize +#[derive(Copy, Clone)] +pub struct DropValue<'tcx> { + val: ValueRef, + ty: Ty<'tcx>, + skip_dtor: bool, } -#[derive(Copy, Clone, PartialEq, Debug)] -pub enum EarlyExitLabel { - UnwindExit(UnwindKind), -} +impl<'tcx> DropValue<'tcx> { + fn trans<'a>(&self, funclet: Option<&'a Funclet>, bcx: &BlockAndBuilder<'a, 'tcx>) { + glue::call_drop_glue(bcx, self.val, self.ty, self.skip_dtor, funclet) + } -#[derive(Copy, Clone, Debug)] -pub enum UnwindKind { - LandingPad, - CleanupPad(ValueRef), -} + /// Creates a landing pad for the top scope. The landing pad will perform all cleanups necessary + /// for an unwind and then `resume` to continue error propagation: + /// + /// landing_pad -> ... cleanups ... -> [resume] + /// + /// This should only be called once per function, as it creates an alloca for the landingpad. + fn get_landing_pad<'a>(&self, fcx: &FunctionContext<'a, 'tcx>) -> BasicBlockRef { + debug!("get_landing_pad"); + let bcx = fcx.build_new_block("cleanup_unwind"); + let llpersonality = bcx.ccx.eh_personality(); + bcx.set_personality_fn(llpersonality); -#[derive(Copy, Clone)] -pub struct CachedEarlyExit { - label: EarlyExitLabel, - cleanup_block: BasicBlockRef, - last_cleanup: usize, -} + if base::wants_msvc_seh(fcx.ccx.sess()) { + let pad = bcx.cleanup_pad(None, &[]); + let funclet = Some(Funclet::new(pad)); + self.trans(funclet.as_ref(), &bcx); -impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> { - pub fn push_custom_cleanup_scope(&self) -> CustomScopeIndex { - let index = self.scopes_len(); - debug!("push_custom_cleanup_scope(): {}", index); + bcx.cleanup_ret(pad, None); + } else { + // The landing pad return type (the type being propagated). Not sure + // what this represents but it's determined by the personality + // function and this is what the EH proposal example uses. + let llretty = Type::struct_(fcx.ccx, &[Type::i8p(fcx.ccx), Type::i32(fcx.ccx)], false); - // Just copy the debuginfo source location from the enclosing scope - let debug_loc = self.scopes - .borrow() - .last() - .map(|opt_scope| opt_scope.debug_loc) - .unwrap_or(DebugLoc::None); + // The only landing pad clause will be 'cleanup' + let llretval = bcx.landing_pad(llretty, llpersonality, 1, bcx.fcx().llfn); - self.push_scope(CleanupScope::new(debug_loc)); - CustomScopeIndex { index: index } - } + // The landing pad block is a cleanup + bcx.set_cleanup(llretval); - /// Removes the top cleanup scope from the stack without executing its cleanups. The top - /// cleanup scope must be the temporary scope `custom_scope`. - pub fn pop_custom_cleanup_scope(&self, - custom_scope: CustomScopeIndex) { - debug!("pop_custom_cleanup_scope({})", custom_scope.index); - assert!(self.is_valid_to_pop_custom_scope(custom_scope)); - let _ = self.pop_scope(); - } + // Insert cleanup instructions into the cleanup block + self.trans(None, &bcx); - /// Removes the top cleanup scope from the stack, which must be a temporary scope, and - /// generates the code to do its cleanups for normal exit. - pub fn pop_and_trans_custom_cleanup_scope(&self, - bcx: Block<'blk, 'tcx>, - custom_scope: CustomScopeIndex) - -> Block<'blk, 'tcx> { - debug!("pop_and_trans_custom_cleanup_scope({:?})", custom_scope); - assert!(self.is_valid_to_pop_custom_scope(custom_scope)); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(llretval); + } else { + let exc_ptr = bcx.extract_value(llretval, 0); + bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], None); + bcx.unreachable(); + } + } - let scope = self.pop_scope(); - self.trans_scope_cleanups(bcx, &scope) + bcx.llbb() } +} - /// Schedules a (deep) drop of `val`, which is a pointer to an instance of - /// `ty` - pub fn schedule_drop_mem(&self, - cleanup_scope: CustomScopeIndex, - val: ValueRef, - ty: Ty<'tcx>) { - if !self.type_needs_drop(ty) { return; } +impl<'a, 'tcx> FunctionContext<'a, 'tcx> { + /// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty` + pub fn schedule_drop_mem(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { + if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { - is_immediate: false, val: val, ty: ty, skip_dtor: false, }; - debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) skip_dtor={}", - cleanup_scope, - Value(val), - ty, - drop.skip_dtor); + debug!("schedule_drop_mem(val={:?}, ty={:?}) skip_dtor={}", Value(val), ty, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop); + CleanupScope::new(self, drop) } /// Issue #23611: Schedules a (deep) drop of the contents of @@ -228,477 +112,46 @@ pub fn schedule_drop_mem(&self, /// `ty`. The scheduled code handles extracting the discriminant /// and dropping the contents associated with that variant /// *without* executing any associated drop implementation. - pub fn schedule_drop_adt_contents(&self, - cleanup_scope: CustomScopeIndex, - val: ValueRef, - ty: Ty<'tcx>) { + pub fn schedule_drop_adt_contents(&self, val: ValueRef, ty: Ty<'tcx>) -> CleanupScope<'tcx> { // `if` below could be "!contents_needs_drop"; skipping drop // is just an optimization, so sound to be conservative. - if !self.type_needs_drop(ty) { return; } + if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); } let drop = DropValue { - is_immediate: false, val: val, ty: ty, skip_dtor: true, }; - debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) skip_dtor={}", - cleanup_scope, - Value(val), - ty, - drop.skip_dtor); + debug!("schedule_drop_adt_contents(val={:?}, ty={:?}) skip_dtor={}", + Value(val), ty, drop.skip_dtor); - self.schedule_clean(cleanup_scope, drop); - } - - /// Schedules a (deep) drop of `val`, which is an instance of `ty` - pub fn schedule_drop_immediate(&self, - cleanup_scope: CustomScopeIndex, - val: ValueRef, - ty: Ty<'tcx>) { - - if !self.type_needs_drop(ty) { return; } - let drop = DropValue { - is_immediate: true, - val: val, - ty: ty, - skip_dtor: false, - }; - - debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) skip_dtor={}", - cleanup_scope, - Value(val), - ty, - drop.skip_dtor); - - self.schedule_clean(cleanup_scope, drop); - } - - /// Schedules a cleanup to occur in the top-most scope, which must be a temporary scope. - fn schedule_clean(&self, custom_scope: CustomScopeIndex, cleanup: DropValue<'tcx>) { - debug!("schedule_clean_in_custom_scope(custom_scope={})", - custom_scope.index); - - assert!(self.is_valid_custom_scope(custom_scope)); - - let mut scopes = self.scopes.borrow_mut(); - let scope = &mut (*scopes)[custom_scope.index]; - scope.cleanups.push(cleanup); - scope.cached_landing_pad = None; - } - - /// Returns true if there are pending cleanups that should execute on panic. - pub fn needs_invoke(&self) -> bool { - self.scopes.borrow().iter().rev().any(|s| s.needs_invoke()) - } - - /// Returns a basic block to branch to in the event of a panic. This block - /// will run the panic cleanups and eventually resume the exception that - /// caused the landing pad to be run. - pub fn get_landing_pad(&'blk self) -> BasicBlockRef { - let _icx = base::push_ctxt("get_landing_pad"); - - debug!("get_landing_pad"); - - let orig_scopes_len = self.scopes_len(); - assert!(orig_scopes_len > 0); - - // Remove any scopes that do not have cleanups on panic: - let mut popped_scopes = vec![]; - while !self.top_scope(|s| s.needs_invoke()) { - debug!("top scope does not need invoke"); - popped_scopes.push(self.pop_scope()); - } - - // Check for an existing landing pad in the new topmost scope: - let llbb = self.get_or_create_landing_pad(); - - // Push the scopes we removed back on: - loop { - match popped_scopes.pop() { - Some(scope) => self.push_scope(scope), - None => break - } - } - - assert_eq!(self.scopes_len(), orig_scopes_len); - - return llbb; - } - - fn is_valid_to_pop_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - self.is_valid_custom_scope(custom_scope) && - custom_scope.index == self.scopes.borrow().len() - 1 - } - - fn is_valid_custom_scope(&self, custom_scope: CustomScopeIndex) -> bool { - let scopes = self.scopes.borrow(); - custom_scope.index < scopes.len() - } - - /// Generates the cleanups for `scope` into `bcx` - fn trans_scope_cleanups(&self, // cannot borrow self, will recurse - bcx: Block<'blk, 'tcx>, - scope: &CleanupScope<'tcx>) -> Block<'blk, 'tcx> { - - let mut bcx = bcx; - if !bcx.unreachable.get() { - for cleanup in scope.cleanups.iter().rev() { - bcx = cleanup.trans(bcx, scope.debug_loc); - } - } - bcx - } - - fn scopes_len(&self) -> usize { - self.scopes.borrow().len() - } - - fn push_scope(&self, scope: CleanupScope<'tcx>) { - self.scopes.borrow_mut().push(scope) - } - - fn pop_scope(&self) -> CleanupScope<'tcx> { - debug!("popping cleanup scope {}, {} scopes remaining", - self.top_scope(|s| s.block_name("")), - self.scopes_len() - 1); - - self.scopes.borrow_mut().pop().unwrap() - } - - fn top_scope(&self, f: F) -> R where F: FnOnce(&CleanupScope<'tcx>) -> R { - f(self.scopes.borrow().last().unwrap()) - } - - /// Used when the caller wishes to jump to an early exit, such as a return, - /// break, continue, or unwind. This function will generate all cleanups - /// between the top of the stack and the exit `label` and return a basic - /// block that the caller can branch to. - /// - /// For example, if the current stack of cleanups were as follows: - /// - /// AST 22 - /// Custom 1 - /// AST 23 - /// Loop 23 - /// Custom 2 - /// AST 24 - /// - /// and the `label` specifies a break from `Loop 23`, then this function - /// would generate a series of basic blocks as follows: - /// - /// Cleanup(AST 24) -> Cleanup(Custom 2) -> break_blk - /// - /// where `break_blk` is the block specified in `Loop 23` as the target for - /// breaks. The return value would be the first basic block in that sequence - /// (`Cleanup(AST 24)`). The caller could then branch to `Cleanup(AST 24)` - /// and it will perform all cleanups and finally branch to the `break_blk`. - fn trans_cleanups_to_exit_scope(&'blk self, - label: EarlyExitLabel) - -> BasicBlockRef { - debug!("trans_cleanups_to_exit_scope label={:?} scopes={}", - label, self.scopes_len()); - - let orig_scopes_len = self.scopes_len(); - let mut prev_llbb; - let mut popped_scopes = vec![]; - let mut skip = 0; - - // First we pop off all the cleanup stacks that are - // traversed until the exit is reached, pushing them - // onto the side vector `popped_scopes`. No code is - // generated at this time. - // - // So, continuing the example from above, we would wind up - // with a `popped_scopes` vector of `[AST 24, Custom 2]`. - // (Presuming that there are no cached exits) - loop { - if self.scopes_len() == 0 { - match label { - UnwindExit(val) => { - // Generate a block that will resume unwinding to the - // calling function - let bcx = self.new_block("resume"); - match val { - UnwindKind::LandingPad => { - let addr = self.landingpad_alloca.get() - .unwrap(); - let lp = build::Load(bcx, addr); - base::call_lifetime_end(bcx, addr); - base::trans_unwind_resume(bcx, lp); - } - UnwindKind::CleanupPad(_) => { - let pad = build::CleanupPad(bcx, None, &[]); - build::CleanupRet(bcx, pad, None); - } - } - prev_llbb = bcx.llbb; - break; - } - } - } - - // Pop off the scope, since we may be generating - // unwinding code for it. - let top_scope = self.pop_scope(); - let cached_exit = top_scope.cached_early_exit(label); - popped_scopes.push(top_scope); - - // Check if we have already cached the unwinding of this - // scope for this label. If so, we can stop popping scopes - // and branch to the cached label, since it contains the - // cleanups for any subsequent scopes. - if let Some((exit, last_cleanup)) = cached_exit { - prev_llbb = exit; - skip = last_cleanup; - break; - } - } - - debug!("trans_cleanups_to_exit_scope: popped {} scopes", - popped_scopes.len()); - - // Now push the popped scopes back on. As we go, - // we track in `prev_llbb` the exit to which this scope - // should branch when it's done. - // - // So, continuing with our example, we will start out with - // `prev_llbb` being set to `break_blk` (or possibly a cached - // early exit). We will then pop the scopes from `popped_scopes` - // and generate a basic block for each one, prepending it in the - // series and updating `prev_llbb`. So we begin by popping `Custom 2` - // and generating `Cleanup(Custom 2)`. We make `Cleanup(Custom 2)` - // branch to `prev_llbb == break_blk`, giving us a sequence like: - // - // Cleanup(Custom 2) -> prev_llbb - // - // We then pop `AST 24` and repeat the process, giving us the sequence: - // - // Cleanup(AST 24) -> Cleanup(Custom 2) -> prev_llbb - // - // At this point, `popped_scopes` is empty, and so the final block - // that we return to the user is `Cleanup(AST 24)`. - while let Some(mut scope) = popped_scopes.pop() { - if !scope.cleanups.is_empty() { - let name = scope.block_name("clean"); - debug!("generating cleanups for {}", name); - - let bcx_in = self.new_block(&name[..]); - let exit_label = label.start(bcx_in); - let mut bcx_out = bcx_in; - let len = scope.cleanups.len(); - for cleanup in scope.cleanups.iter().rev().take(len - skip) { - bcx_out = cleanup.trans(bcx_out, scope.debug_loc); - } - skip = 0; - exit_label.branch(bcx_out, prev_llbb); - prev_llbb = bcx_in.llbb; - - scope.add_cached_early_exit(exit_label, prev_llbb, len); - } - self.push_scope(scope); - } - - debug!("trans_cleanups_to_exit_scope: prev_llbb={:?}", prev_llbb); - - assert_eq!(self.scopes_len(), orig_scopes_len); - prev_llbb - } - - /// Creates a landing pad for the top scope, if one does not exist. The - /// landing pad will perform all cleanups necessary for an unwind and then - /// `resume` to continue error propagation: - /// - /// landing_pad -> ... cleanups ... -> [resume] - /// - /// (The cleanups and resume instruction are created by - /// `trans_cleanups_to_exit_scope()`, not in this function itself.) - fn get_or_create_landing_pad(&'blk self) -> BasicBlockRef { - let pad_bcx; - - debug!("get_or_create_landing_pad"); - - // Check if a landing pad block exists; if not, create one. - { - let mut scopes = self.scopes.borrow_mut(); - let last_scope = scopes.last_mut().unwrap(); - match last_scope.cached_landing_pad { - Some(llbb) => return llbb, - None => { - let name = last_scope.block_name("unwind"); - pad_bcx = self.new_block(&name[..]); - last_scope.cached_landing_pad = Some(pad_bcx.llbb); - } - } - }; - - let llpersonality = pad_bcx.fcx.eh_personality(); - - let val = if base::wants_msvc_seh(self.ccx.sess()) { - // A cleanup pad requires a personality function to be specified, so - // we do that here explicitly (happens implicitly below through - // creation of the landingpad instruction). We then create a - // cleanuppad instruction which has no filters to run cleanup on all - // exceptions. - build::SetPersonalityFn(pad_bcx, llpersonality); - let llretval = build::CleanupPad(pad_bcx, None, &[]); - UnwindKind::CleanupPad(llretval) - } else { - // The landing pad return type (the type being propagated). Not sure - // what this represents but it's determined by the personality - // function and this is what the EH proposal example uses. - let llretty = Type::struct_(self.ccx, - &[Type::i8p(self.ccx), Type::i32(self.ccx)], - false); - - // The only landing pad clause will be 'cleanup' - let llretval = build::LandingPad(pad_bcx, llretty, llpersonality, 1); - - // The landing pad block is a cleanup - build::SetCleanup(pad_bcx, llretval); - - let addr = match self.landingpad_alloca.get() { - Some(addr) => addr, - None => { - let addr = base::alloca(pad_bcx, common::val_ty(llretval), - ""); - base::call_lifetime_start(pad_bcx, addr); - self.landingpad_alloca.set(Some(addr)); - addr - } - }; - build::Store(pad_bcx, llretval, addr); - UnwindKind::LandingPad - }; - - // Generate the cleanup block and branch to it. - let label = UnwindExit(val); - let cleanup_llbb = self.trans_cleanups_to_exit_scope(label); - label.branch(pad_bcx, cleanup_llbb); - - return pad_bcx.llbb; + CleanupScope::new(self, drop) } } impl<'tcx> CleanupScope<'tcx> { - fn new(debug_loc: DebugLoc) -> CleanupScope<'tcx> { + fn new<'a>(fcx: &FunctionContext<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> { CleanupScope { - debug_loc: debug_loc, - cleanups: vec![], - cached_early_exits: vec![], - cached_landing_pad: None, + cleanup: Some(drop_val), + landing_pad: if !fcx.ccx.sess().no_landing_pads() { + Some(drop_val.get_landing_pad(fcx)) + } else { + None + }, } } - fn cached_early_exit(&self, - label: EarlyExitLabel) - -> Option<(BasicBlockRef, usize)> { - self.cached_early_exits.iter().rev(). - find(|e| e.label == label). - map(|e| (e.cleanup_block, e.last_cleanup)) - } - - fn add_cached_early_exit(&mut self, - label: EarlyExitLabel, - blk: BasicBlockRef, - last_cleanup: usize) { - self.cached_early_exits.push( - CachedEarlyExit { label: label, - cleanup_block: blk, - last_cleanup: last_cleanup}); - } - - /// True if this scope has cleanups that need unwinding - fn needs_invoke(&self) -> bool { - self.cached_landing_pad.is_some() || - !self.cleanups.is_empty() - } - - /// Returns a suitable name to use for the basic block that handles this cleanup scope - fn block_name(&self, prefix: &str) -> String { - format!("{}_custom_", prefix) - } -} - -impl EarlyExitLabel { - /// Generates a branch going from `from_bcx` to `to_llbb` where `self` is - /// the exit label attached to the start of `from_bcx`. - /// - /// Transitions from an exit label to other exit labels depend on the type - /// of label. For example with MSVC exceptions unwind exit labels will use - /// the `cleanupret` instruction instead of the `br` instruction. - fn branch(&self, from_bcx: Block, to_llbb: BasicBlockRef) { - if let UnwindExit(UnwindKind::CleanupPad(pad)) = *self { - build::CleanupRet(from_bcx, pad, Some(to_llbb)); - } else { - build::Br(from_bcx, to_llbb, DebugLoc::None); - } - } - - /// Generates the necessary instructions at the start of `bcx` to prepare - /// for the same kind of early exit label that `self` is. - /// - /// This function will appropriately configure `bcx` based on the kind of - /// label this is. For UnwindExit labels, the `lpad` field of the block will - /// be set to `Some`, and for MSVC exceptions this function will generate a - /// `cleanuppad` instruction at the start of the block so it may be jumped - /// to in the future (e.g. so this block can be cached as an early exit). - /// - /// Returns a new label which will can be used to cache `bcx` in the list of - /// early exits. - fn start(&self, bcx: Block) -> EarlyExitLabel { - match *self { - UnwindExit(UnwindKind::CleanupPad(..)) => { - let pad = build::CleanupPad(bcx, None, &[]); - bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::msvc(pad)))); - UnwindExit(UnwindKind::CleanupPad(pad)) - } - UnwindExit(UnwindKind::LandingPad) => { - bcx.lpad.set(Some(bcx.fcx.lpad_arena.alloc(LandingPad::gnu()))); - *self - } + pub fn noop() -> CleanupScope<'tcx> { + CleanupScope { + cleanup: None, + landing_pad: None, } } -} -impl PartialEq for UnwindKind { - fn eq(&self, val: &UnwindKind) -> bool { - match (*self, *val) { - (UnwindKind::LandingPad, UnwindKind::LandingPad) | - (UnwindKind::CleanupPad(..), UnwindKind::CleanupPad(..)) => true, - _ => false, + pub fn trans<'a>(self, bcx: &'a BlockAndBuilder<'a, 'tcx>) { + if let Some(cleanup) = self.cleanup { + cleanup.trans(None, &bcx); } } } - -/////////////////////////////////////////////////////////////////////////// -// Cleanup types - -#[derive(Copy, Clone)] -pub struct DropValue<'tcx> { - is_immediate: bool, - val: ValueRef, - ty: Ty<'tcx>, - skip_dtor: bool, -} - -impl<'tcx> DropValue<'tcx> { - fn trans<'blk>(&self, - bcx: Block<'blk, 'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - let skip_dtor = self.skip_dtor; - let _icx = if skip_dtor { - base::push_ctxt("::trans skip_dtor=true") - } else { - base::push_ctxt("::trans skip_dtor=false") - }; - let bcx = if self.is_immediate { - glue::drop_ty_immediate(bcx, self.val, self.ty, debug_loc, self.skip_dtor) - } else { - glue::drop_ty_core(bcx, self.val, self.ty, debug_loc, self.skip_dtor) - }; - bcx - } -} diff --git a/src/librustc_trans/collector.rs b/src/librustc_trans/collector.rs index 3af3ada66b3e9c2ba3c8709f4c4438ae3de58f35..d8c212745376d69225bd2ff424dd1c15f4731f62 100644 --- a/src/librustc_trans/collector.rs +++ b/src/librustc_trans/collector.rs @@ -208,7 +208,7 @@ use syntax_pos::DUMMY_SP; use base::custom_coerce_unsize_info; use context::SharedCrateContext; -use common::{fulfill_obligation, type_is_sized}; +use common::fulfill_obligation; use glue::{self, DropGlueKind}; use monomorphize::{self, Instance}; use util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; @@ -337,7 +337,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>, TransItem::Static(node_id) => { let def_id = scx.tcx().map.local_def_id(node_id); let ty = scx.tcx().item_type(def_id); - let ty = glue::get_drop_glue_type(scx.tcx(), ty); + let ty = glue::get_drop_glue_type(scx, ty); neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); recursion_depth_reset = None; @@ -542,7 +542,7 @@ fn visit_lvalue(&mut self, self.param_substs, &ty); assert!(ty.is_normalized_for_trans()); - let ty = glue::get_drop_glue_type(self.scx.tcx(), ty); + let ty = glue::get_drop_glue_type(self.scx, ty); self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } @@ -678,7 +678,7 @@ fn visit_terminator_kind(&mut self, let operand_ty = monomorphize::apply_param_substs(self.scx, self.param_substs, &mt.ty); - let ty = glue::get_drop_glue_type(tcx, operand_ty); + let ty = glue::get_drop_glue_type(self.scx, operand_ty); self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } else { bug!("Has the drop_in_place() intrinsic's signature changed?") @@ -804,17 +804,17 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, let field_type = monomorphize::apply_param_substs(scx, substs, &field_type); - let field_type = glue::get_drop_glue_type(scx.tcx(), field_type); + let field_type = glue::get_drop_glue_type(scx, field_type); - if glue::type_needs_drop(scx.tcx(), field_type) { + if scx.type_needs_drop(field_type) { output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type))); } } } ty::TyClosure(def_id, substs) => { for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) { - let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty); - if glue::type_needs_drop(scx.tcx(), upvar_ty) { + let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty); + if scx.type_needs_drop(upvar_ty) { output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty))); } } @@ -822,15 +822,15 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, ty::TyBox(inner_type) | ty::TySlice(inner_type) | ty::TyArray(inner_type, _) => { - let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type); - if glue::type_needs_drop(scx.tcx(), inner_type) { + let inner_type = glue::get_drop_glue_type(scx, inner_type); + if scx.type_needs_drop(inner_type) { output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type))); } } ty::TyTuple(args) => { for arg in args { - let arg = glue::get_drop_glue_type(scx.tcx(), arg); - if glue::type_needs_drop(scx.tcx(), arg) { + let arg = glue::get_drop_glue_type(scx, arg); + if scx.type_needs_drop(arg) { output.push(TransItem::DropGlue(DropGlueKind::Ty(arg))); } } @@ -969,7 +969,7 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { let (inner_source, inner_target) = (a, b); - if !type_is_sized(scx.tcx(), inner_source) { + if !scx.type_is_sized(inner_source) { (inner_source, inner_target) } else { scx.tcx().struct_lockstep_tails(inner_source, inner_target) @@ -1051,7 +1051,7 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a, output.extend(methods); } // Also add the destructor - let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty); + let dg_type = glue::get_drop_glue_type(scx, impl_ty); output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type))); } } @@ -1097,7 +1097,7 @@ fn visit_item(&mut self, item: &'v hir::Item) { def_id_to_string(self.scx.tcx(), def_id)); let ty = self.scx.tcx().item_type(def_id); - let ty = glue::get_drop_glue_type(self.scx.tcx(), ty); + let ty = glue::get_drop_glue_type(self.scx, ty); self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty))); } } diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index b1d61cea39ceceaaad7b2164e6c62dcfc1956633..71e17f1ea74051376c83ab4df19be3b06b1e9fac 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -14,24 +14,16 @@ use session::Session; use llvm; -use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; +use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind}; use llvm::{True, False, Bool, OperandBundleDef}; use rustc::hir::def::Def; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; -use rustc::infer::TransNormalize; -use rustc::mir::Mir; use rustc::util::common::MemoizationMap; use middle::lang_items::LangItem; -use rustc::ty::subst::Substs; -use abi::{Abi, FnType}; use base; -use build; use builder::Builder; -use callee::Callee; -use cleanup; use consts; -use debuginfo::{self, DebugLoc}; use declare; use machine; use monomorphize; @@ -40,34 +32,26 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::Layout; use rustc::traits::{self, SelectionContext, Reveal}; -use rustc::ty::fold::TypeFoldable; use rustc::hir; -use arena::TypedArena; use libc::{c_uint, c_char}; use std::borrow::Cow; use std::iter; use std::ops::Deref; use std::ffi::CString; -use std::cell::{Cell, RefCell, Ref}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; -use syntax_pos::{DUMMY_SP, Span}; +use syntax_pos::Span; pub use context::{CrateContext, SharedCrateContext}; -/// Is the type's representation size known at compile time? -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx, &tcx.empty_parameter_environment(), DUMMY_SP) -} - -pub fn type_is_fat_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { +pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { match ty.sty { ty::TyRawPtr(ty::TypeAndMut{ty, ..}) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyBox(ty) => { - !type_is_sized(tcx, ty) + !ccx.shared().type_is_sized(ty) } _ => { false @@ -79,14 +63,13 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - use machine::llsize_of_alloc; use type_of::sizing_type_of; - let tcx = ccx.tcx(); let simple = ty.is_scalar() || ty.is_unique() || ty.is_region_ptr() || ty.is_simd(); - if simple && !type_is_fat_ptr(tcx, ty) { + if simple && !type_is_fat_ptr(ccx, ty) { return true; } - if !type_is_sized(tcx, ty) { + if !ccx.shared().type_is_sized(ty) { return false; } match ty.sty { @@ -236,416 +219,139 @@ pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub struct BuilderRef_res { - pub b: BuilderRef, -} - -impl Drop for BuilderRef_res { - fn drop(&mut self) { - unsafe { - llvm::LLVMDisposeBuilder(self.b); - } - } -} - -pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res { - BuilderRef_res { - b: b - } -} - -pub fn validate_substs(substs: &Substs) { - assert!(!substs.needs_infer()); -} - -// Function context. Every LLVM function we create will have one of -// these. +// Function context. Every LLVM function we create will have one of these. pub struct FunctionContext<'a, 'tcx: 'a> { - // The MIR for this function. - pub mir: Option>>, - // The ValueRef returned from a call to llvm::LLVMAddFunction; the // address of the first instruction in the sequence of // instructions for this function that will go in the .text // section of the executable we're generating. pub llfn: ValueRef, - // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv - pub param_env: ty::ParameterEnvironment<'tcx>, - - // A pointer to where to store the return value. If the return type is - // immediate, this points to an alloca in the function. Otherwise, it's a - // pointer to the hidden first parameter of the function. After function - // construction, this should always be Some. - pub llretslotptr: Cell>, - - // These pub elements: "hoisted basic blocks" containing - // administrative activities that have to happen in only one place in - // the function, due to LLVM's quirks. // A marker for the place where we want to insert the function's static // allocas, so that LLVM will coalesce them into a single alloca call. - pub alloca_insert_pt: Cell>, - - // When working with landingpad-based exceptions this value is alloca'd and - // later loaded when using the resume instruction. This ends up being - // critical to chaining landing pads and resuing already-translated - // cleanups. - // - // Note that for cleanuppad-based exceptions this is not used. - pub landingpad_alloca: Cell>, - - // Describes the return/argument LLVM types and their ABI handling. - pub fn_ty: FnType, - - // If this function is being monomorphized, this contains the type - // substitutions used. - pub param_substs: &'tcx Substs<'tcx>, - - // The source span and nesting context where this function comes from, for - // error reporting and symbol generation. - pub span: Option, - - // The arena that blocks are allocated from. - pub block_arena: &'a TypedArena>, - - // The arena that landing pads are allocated from. - pub lpad_arena: TypedArena, + alloca_insert_pt: Option, // This function's enclosing crate context. pub ccx: &'a CrateContext<'a, 'tcx>, - // Used and maintained by the debuginfo module. - pub debug_context: debuginfo::FunctionDebugContext, - - // Cleanup scopes. - pub scopes: RefCell>>, + alloca_builder: Builder<'a, 'tcx>, } impl<'a, 'tcx> FunctionContext<'a, 'tcx> { - pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.mir.as_ref().map(Ref::clone).expect("fcx.mir was empty") + /// Create a function context for the given function. + /// Call FunctionContext::get_entry_block for the first entry block. + pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> { + let mut fcx = FunctionContext { + llfn: llfndecl, + alloca_insert_pt: None, + ccx: ccx, + alloca_builder: Builder::with_ccx(ccx), + }; + + let val = { + let entry_bcx = fcx.build_new_block("entry-block"); + let val = entry_bcx.load(C_null(Type::i8p(ccx))); + fcx.alloca_builder.position_at_start(entry_bcx.llbb()); + val + }; + + // Use a dummy instruction as the insertion point for all allocas. + // This is later removed in the drop of FunctionContext. + fcx.alloca_insert_pt = Some(val); + + fcx } - pub fn cleanup(&self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt - .get() - .unwrap()); - } + pub fn get_entry_block(&'a self) -> BlockAndBuilder<'a, 'tcx> { + BlockAndBuilder::new(unsafe { + llvm::LLVMGetFirstBasicBlock(self.llfn) + }, self) } - pub fn new_block(&'a self, - name: &str) - -> Block<'a, 'tcx> { + pub fn new_block(&'a self, name: &str) -> BasicBlockRef { unsafe { let name = CString::new(name).unwrap(); - let llbb = llvm::LLVMAppendBasicBlockInContext(self.ccx.llcx(), - self.llfn, - name.as_ptr()); - BlockS::new(llbb, self) + llvm::LLVMAppendBasicBlockInContext( + self.ccx.llcx(), + self.llfn, + name.as_ptr() + ) } } - pub fn monomorphize(&self, value: &T) -> T - where T: TransNormalize<'tcx> - { - monomorphize::apply_param_substs(self.ccx.shared(), - self.param_substs, - value) - } - - /// This is the same as `common::type_needs_drop`, except that it - /// may use or update caches within this `FunctionContext`. - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - self.ccx.tcx().type_needs_drop_given_env(ty, &self.param_env) + pub fn build_new_block(&'a self, name: &str) -> BlockAndBuilder<'a, 'tcx> { + BlockAndBuilder::new(self.new_block(name), self) } - pub fn eh_personality(&self) -> ValueRef { - // The exception handling personality function. - // - // If our compilation unit has the `eh_personality` lang item somewhere - // within it, then we just need to translate that. Otherwise, we're - // building an rlib which will depend on some upstream implementation of - // this function, so we just codegen a generic reference to it. We don't - // specify any of the types for the function, we just make it a symbol - // that LLVM can later use. - // - // Note that MSVC is a little special here in that we don't use the - // `eh_personality` lang item at all. Currently LLVM has support for - // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the - // *name of the personality function* to decide what kind of unwind side - // tables/landing pads to emit. It looks like Dwarf is used by default, - // injecting a dependency on the `_Unwind_Resume` symbol for resuming - // an "exception", but for MSVC we want to force SEH. This means that we - // can't actually have the personality function be our standard - // `rust_eh_personality` function, but rather we wired it up to the - // CRT's custom personality function, which forces LLVM to consider - // landing pads as "landing pads for SEH". - let ccx = self.ccx; - let tcx = ccx.tcx(); - match tcx.lang_items.eh_personality() { - Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => { - Callee::def(ccx, def_id, tcx.intern_substs(&[])).reify(ccx) - } - _ => { - if let Some(llpersonality) = ccx.eh_personality().get() { - return llpersonality - } - let name = if base::wants_msvc_seh(ccx.sess()) { - "__CxxFrameHandler3" - } else { - "rust_eh_personality" - }; - let fty = Type::variadic_func(&[], &Type::i32(ccx)); - let f = declare::declare_cfn(ccx, name, fty); - ccx.eh_personality().set(Some(f)); - f - } - } + pub fn alloca(&self, ty: Type, name: &str) -> ValueRef { + self.alloca_builder.dynamic_alloca(ty, name) } +} - // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, - // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> Callee<'tcx> { - use attributes; - let ccx = self.ccx; - let tcx = ccx.tcx(); - assert!(ccx.sess().target.target.options.custom_unwind_resume); - if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { - return Callee::def(ccx, def_id, tcx.intern_substs(&[])); - } - - let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { - unsafety: hir::Unsafety::Unsafe, - abi: Abi::C, - sig: ty::Binder(tcx.mk_fn_sig( - iter::once(tcx.mk_mut_ptr(tcx.types.u8)), - tcx.types.never, - false - )), - })); - - let unwresume = ccx.eh_unwind_resume(); - if let Some(llfn) = unwresume.get() { - return Callee::ptr(llfn, ty); +impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> { + fn drop(&mut self) { + unsafe { + llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap()); } - let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty); - attributes::unwind(llfn, true); - unwresume.set(Some(llfn)); - Callee::ptr(llfn, ty) } } -// Basic block context. We create a block context for each basic block -// (single-entry, single-exit sequence of instructions) we generate from Rust -// code. Each basic block we generate is attached to a function, typically -// with many basic blocks per function. All the basic blocks attached to a -// function are organized as a directed graph. -pub struct BlockS<'blk, 'tcx: 'blk> { +#[must_use] +pub struct BlockAndBuilder<'a, 'tcx: 'a> { // The BasicBlockRef returned from a call to // llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic // block to the function pointed to by llfn. We insert // instructions into that block by way of this block context. // The block pointing to this one in the function's digraph. - pub llbb: BasicBlockRef, - pub terminated: Cell, - pub unreachable: Cell, - - // If this block part of a landing pad, then this is `Some` indicating what - // kind of landing pad its in, otherwise this is none. - pub lpad: Cell>, + llbb: BasicBlockRef, // The function context for the function to which this block is // attached. - pub fcx: &'blk FunctionContext<'blk, 'tcx>, -} - -pub type Block<'blk, 'tcx> = &'blk BlockS<'blk, 'tcx>; - -impl<'blk, 'tcx> BlockS<'blk, 'tcx> { - pub fn new(llbb: BasicBlockRef, - fcx: &'blk FunctionContext<'blk, 'tcx>) - -> Block<'blk, 'tcx> { - fcx.block_arena.alloc(BlockS { - llbb: llbb, - terminated: Cell::new(false), - unreachable: Cell::new(false), - lpad: Cell::new(None), - fcx: fcx - }) - } - - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { - self.fcx.ccx - } - pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { - self.fcx - } - pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { - self.fcx.ccx.tcx() - } - pub fn sess(&self) -> &'blk Session { self.fcx.ccx.sess() } - - pub fn lpad(&self) -> Option<&'blk LandingPad> { - self.lpad.get() - } - - pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { - // FIXME: use an IVar? - self.lpad.set(lpad); - } + fcx: &'a FunctionContext<'a, 'tcx>, - pub fn set_lpad(&self, lpad: Option) { - self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p))) - } - - pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.fcx.mir() - } - - pub fn name(&self, name: ast::Name) -> String { - name.to_string() - } - - pub fn node_id_to_string(&self, id: ast::NodeId) -> String { - self.tcx().map.node_to_string(id).to_string() - } - - pub fn to_str(&self) -> String { - format!("[block {:p}]", self) - } - - pub fn monomorphize(&self, value: &T) -> T - where T: TransNormalize<'tcx> - { - monomorphize::apply_param_substs(self.fcx.ccx.shared(), - self.fcx.param_substs, - value) - } - - pub fn build(&'blk self) -> BlockAndBuilder<'blk, 'tcx> { - BlockAndBuilder::new(self, OwnedBuilder::new_with_ccx(self.ccx())) - } -} - -pub struct OwnedBuilder<'blk, 'tcx: 'blk> { - builder: Builder<'blk, 'tcx> + builder: Builder<'a, 'tcx>, } -impl<'blk, 'tcx> OwnedBuilder<'blk, 'tcx> { - pub fn new_with_ccx(ccx: &'blk CrateContext<'blk, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { - llvm::LLVMCreateBuilderInContext(ccx.llcx()) - }; - OwnedBuilder { - builder: Builder { - llbuilder: llbuilder, - ccx: ccx, - } - } - } -} - -impl<'blk, 'tcx> Drop for OwnedBuilder<'blk, 'tcx> { - fn drop(&mut self) { - unsafe { - llvm::LLVMDisposeBuilder(self.builder.llbuilder); - } - } -} - -pub struct BlockAndBuilder<'blk, 'tcx: 'blk> { - bcx: Block<'blk, 'tcx>, - owned_builder: OwnedBuilder<'blk, 'tcx>, -} - -impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> { - pub fn new(bcx: Block<'blk, 'tcx>, owned_builder: OwnedBuilder<'blk, 'tcx>) -> Self { +impl<'a, 'tcx> BlockAndBuilder<'a, 'tcx> { + pub fn new(llbb: BasicBlockRef, fcx: &'a FunctionContext<'a, 'tcx>) -> Self { + let builder = Builder::with_ccx(fcx.ccx); // Set the builder's position to this block's end. - owned_builder.builder.position_at_end(bcx.llbb); + builder.position_at_end(llbb); BlockAndBuilder { - bcx: bcx, - owned_builder: owned_builder, + llbb: llbb, + fcx: fcx, + builder: builder, } } - pub fn with_block(&self, f: F) -> R - where F: FnOnce(Block<'blk, 'tcx>) -> R - { - let result = f(self.bcx); - self.position_at_end(self.bcx.llbb); - result - } - - pub fn map_block(self, f: F) -> Self - where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx> - { - let BlockAndBuilder { bcx, owned_builder } = self; - let bcx = f(bcx); - BlockAndBuilder::new(bcx, owned_builder) - } - pub fn at_start(&self, f: F) -> R - where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R + where F: FnOnce(&BlockAndBuilder<'a, 'tcx>) -> R { - self.position_at_start(self.bcx.llbb); + self.position_at_start(self.llbb); let r = f(self); - self.position_at_end(self.bcx.llbb); + self.position_at_end(self.llbb); r } - // Methods delegated to bcx - - pub fn is_unreachable(&self) -> bool { - self.bcx.unreachable.get() - } - - pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { - self.bcx.ccx() - } - pub fn fcx(&self) -> &'blk FunctionContext<'blk, 'tcx> { - self.bcx.fcx() + pub fn fcx(&self) -> &'a FunctionContext<'a, 'tcx> { + self.fcx } - pub fn tcx(&self) -> TyCtxt<'blk, 'tcx, 'tcx> { - self.bcx.tcx() + pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.ccx.tcx() } - pub fn sess(&self) -> &'blk Session { - self.bcx.sess() + pub fn sess(&self) -> &'a Session { + self.ccx.sess() } pub fn llbb(&self) -> BasicBlockRef { - self.bcx.llbb - } - - pub fn mir(&self) -> Ref<'tcx, Mir<'tcx>> { - self.bcx.mir() - } - - pub fn monomorphize(&self, value: &T) -> T - where T: TransNormalize<'tcx> - { - self.bcx.monomorphize(value) - } - - pub fn set_lpad(&self, lpad: Option) { - self.bcx.set_lpad(lpad) - } - - pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) { - // FIXME: use an IVar? - self.bcx.set_lpad_ref(lpad); - } - - pub fn lpad(&self) -> Option<&'blk LandingPad> { - self.bcx.lpad() + self.llbb } } -impl<'blk, 'tcx> Deref for BlockAndBuilder<'blk, 'tcx> { - type Target = Builder<'blk, 'tcx>; +impl<'a, 'tcx> Deref for BlockAndBuilder<'a, 'tcx> { + type Target = Builder<'a, 'tcx>; fn deref(&self) -> &Self::Target { - &self.owned_builder.builder + &self.builder } } @@ -663,53 +369,33 @@ fn deref(&self) -> &Self::Target { /// When inside of a landing pad, each function call in LLVM IR needs to be /// annotated with which landing pad it's a part of. This is accomplished via /// the `OperandBundleDef` value created for MSVC landing pads. -pub struct LandingPad { - cleanuppad: Option, - operand: Option, +pub struct Funclet { + cleanuppad: ValueRef, + operand: OperandBundleDef, } -impl LandingPad { - pub fn gnu() -> LandingPad { - LandingPad { cleanuppad: None, operand: None } - } - - pub fn msvc(cleanuppad: ValueRef) -> LandingPad { - LandingPad { - cleanuppad: Some(cleanuppad), - operand: Some(OperandBundleDef::new("funclet", &[cleanuppad])), +impl Funclet { + pub fn new(cleanuppad: ValueRef) -> Funclet { + Funclet { + cleanuppad: cleanuppad, + operand: OperandBundleDef::new("funclet", &[cleanuppad]), } } - pub fn bundle(&self) -> Option<&OperandBundleDef> { - self.operand.as_ref() - } - - pub fn cleanuppad(&self) -> Option { + pub fn cleanuppad(&self) -> ValueRef { self.cleanuppad } -} -impl Clone for LandingPad { - fn clone(&self) -> LandingPad { - LandingPad { - cleanuppad: self.cleanuppad, - operand: self.cleanuppad.map(|p| { - OperandBundleDef::new("funclet", &[p]) - }), - } + pub fn bundle(&self) -> &OperandBundleDef { + &self.operand } } -pub struct Result<'blk, 'tcx: 'blk> { - pub bcx: Block<'blk, 'tcx>, - pub val: ValueRef -} - -impl<'b, 'tcx> Result<'b, 'tcx> { - pub fn new(bcx: Block<'b, 'tcx>, val: ValueRef) -> Result<'b, 'tcx> { - Result { - bcx: bcx, - val: val, +impl Clone for Funclet { + fn clone(&self) -> Funclet { + Funclet { + cleanuppad: self.cleanuppad, + operand: OperandBundleDef::new("funclet", &[self.cleanuppad]), } } } @@ -1016,43 +702,42 @@ pub fn langcall(tcx: TyCtxt, // all shifts). For 32- and 64-bit types, this matches the semantics // of Java. (See related discussion on #1877 and #10183.) -pub fn build_unchecked_lshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { +pub fn build_unchecked_lshift<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + lhs: ValueRef, + rhs: ValueRef +) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShl, lhs, rhs); // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); - build::Shl(bcx, lhs, rhs, binop_debug_loc) + let rhs = shift_mask_rhs(bcx, rhs); + bcx.shl(lhs, rhs) } -pub fn build_unchecked_rshift<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - lhs_t: Ty<'tcx>, - lhs: ValueRef, - rhs: ValueRef, - binop_debug_loc: DebugLoc) -> ValueRef { +pub fn build_unchecked_rshift<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, lhs_t: Ty<'tcx>, lhs: ValueRef, rhs: ValueRef +) -> ValueRef { let rhs = base::cast_shift_expr_rhs(bcx, hir::BinOp_::BiShr, lhs, rhs); // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bcx, rhs, binop_debug_loc); + let rhs = shift_mask_rhs(bcx, rhs); let is_signed = lhs_t.is_signed(); if is_signed { - build::AShr(bcx, lhs, rhs, binop_debug_loc) + bcx.ashr(lhs, rhs) } else { - build::LShr(bcx, lhs, rhs, binop_debug_loc) + bcx.lshr(lhs, rhs) } } -fn shift_mask_rhs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - rhs: ValueRef, - debug_loc: DebugLoc) -> ValueRef { +fn shift_mask_rhs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, rhs: ValueRef) -> ValueRef { let rhs_llty = val_ty(rhs); - build::And(bcx, rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false), debug_loc) + bcx.and(rhs, shift_mask_val(bcx, rhs_llty, rhs_llty, false)) } -pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llty: Type, - mask_llty: Type, - invert: bool) -> ValueRef { +pub fn shift_mask_val<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + llty: Type, + mask_llty: Type, + invert: bool +) -> ValueRef { let kind = llty.kind(); match kind { TypeKind::Integer => { @@ -1066,7 +751,7 @@ pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, }, TypeKind::Vector => { let mask = shift_mask_val(bcx, llty.element_type(), mask_llty.element_type(), invert); - build::VectorSplat(bcx, mask_llty.vector_length(), mask) + bcx.vector_splat(mask_llty.vector_length(), mask) }, _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), } diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 730a4025a59a8ab3b0bd301302039080e0ae762d..2e2644d91bb6c3584aadeec23a53e7b52cbd6b53 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -16,7 +16,7 @@ use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; use {debuginfo, machine}; -use base::{self, push_ctxt}; +use base; use trans_item::TransItem; use common::{CrateContext, val_ty}; use declare; @@ -221,7 +221,6 @@ pub fn trans_static(ccx: &CrateContext, attrs: &[ast::Attribute]) -> Result { unsafe { - let _icx = push_ctxt("trans_static"); let def_id = ccx.tcx().map.local_def_id(id); let g = get_static(ccx, def_id); diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 6435b20eeaa006f164b1a02b9df55a271a42d451..f292a70965004c0a2c3964b043d1842cc1889e5b 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -9,17 +9,16 @@ // except according to those terms. use llvm; -use llvm::{ContextRef, ModuleRef, ValueRef, BuilderRef}; -use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, - WorkProduct}; +use llvm::{ContextRef, ModuleRef, ValueRef}; +use rustc::dep_graph::{DepGraph, DepNode, DepTrackingMap, DepTrackingMapConfig, WorkProduct}; use middle::cstore::LinkMeta; +use rustc::hir; use rustc::hir::def::ExportMap; use rustc::hir::def_id::DefId; use rustc::traits; -use base; -use builder::Builder; -use common::BuilderRef_res; use debuginfo; +use callee::Callee; +use base; use declare; use glue::DropGlueKind; use monomorphize::Instance; @@ -40,11 +39,13 @@ use std::cell::{Cell, RefCell}; use std::marker::PhantomData; use std::ptr; +use std::iter; use std::rc::Rc; use std::str; use syntax::ast; use syntax::symbol::InternedString; -use abi::FnType; +use syntax_pos::DUMMY_SP; +use abi::{Abi, FnType}; pub struct Stats { pub n_glues_created: Cell, @@ -71,6 +72,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> { exported_symbols: NodeSet, link_meta: LinkMeta, tcx: TyCtxt<'a, 'tcx, 'tcx>, + empty_param_env: ty::ParameterEnvironment<'tcx>, stats: Stats, check_overflow: bool, @@ -140,7 +142,6 @@ pub struct LocalCrateContext<'tcx> { int_type: Type, opaque_vec_type: Type, str_slice_type: Type, - builder: BuilderRef_res, /// Holds the LLVM values for closure IDs. closure_vals: RefCell, ValueRef>>, @@ -153,11 +154,6 @@ pub struct LocalCrateContext<'tcx> { intrinsics: RefCell>, - /// Number of LLVM instructions translated into this `LocalCrateContext`. - /// This is used to perform some basic load-balancing to keep all LLVM - /// contexts around the same size. - n_llvm_insns: Cell, - /// Depth of the current type-of computation - used to bail out type_of_depth: Cell, @@ -316,38 +312,6 @@ fn next(&mut self) -> Option> { } } -/// The iterator produced by `CrateContext::maybe_iter`. -pub struct CrateContextMaybeIterator<'a, 'tcx: 'a> { - shared: &'a SharedCrateContext<'a, 'tcx>, - local_ccxs: &'a [LocalCrateContext<'tcx>], - index: usize, - single: bool, - origin: usize, -} - -impl<'a, 'tcx> Iterator for CrateContextMaybeIterator<'a, 'tcx> { - type Item = (CrateContext<'a, 'tcx>, bool); - - fn next(&mut self) -> Option<(CrateContext<'a, 'tcx>, bool)> { - if self.index >= self.local_ccxs.len() { - return None; - } - - let index = self.index; - self.index += 1; - if self.single { - self.index = self.local_ccxs.len(); - } - - let ccx = CrateContext { - shared: self.shared, - index: index, - local_ccxs: self.local_ccxs - }; - Some((ccx, index == self.origin)) - } -} - pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { Some(ref s) => &s[..], @@ -496,6 +460,7 @@ pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>, export_map: export_map, exported_symbols: exported_symbols, link_meta: link_meta, + empty_param_env: tcx.empty_parameter_environment(), tcx: tcx, stats: Stats { n_glues_created: Cell::new(0), @@ -516,6 +481,14 @@ pub fn new(tcx: TyCtxt<'b, 'tcx, 'tcx>, } } + pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + self.tcx.type_needs_drop_given_env(ty, &self.empty_param_env) + } + + pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + ty.is_sized(self.tcx, &self.empty_param_env, DUMMY_SP) + } + pub fn metadata_llmod(&self) -> ModuleRef { self.metadata_llmod } @@ -638,14 +611,12 @@ fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>, int_type: Type::from_ref(ptr::null_mut()), opaque_vec_type: Type::from_ref(ptr::null_mut()), str_slice_type: Type::from_ref(ptr::null_mut()), - builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)), closure_vals: RefCell::new(FxHashMap()), dbg_cx: dbg_cx, eh_personality: Cell::new(None), eh_unwind_resume: Cell::new(None), rust_try_fn: Cell::new(None), intrinsics: RefCell::new(FxHashMap()), - n_llvm_insns: Cell::new(0), type_of_depth: Cell::new(0), symbol_map: symbol_map, local_gen_sym_counter: Cell::new(0), @@ -671,10 +642,6 @@ fn new<'a>(shared: &SharedCrateContext<'a, 'tcx>, local_ccx.opaque_vec_type = opaque_vec_type; local_ccx.str_slice_type = str_slice_ty; - if shared.tcx.sess.count_llvm_insns() { - base::init_insn_ctxt() - } - local_ccx } } @@ -703,26 +670,10 @@ pub fn shared(&self) -> &'b SharedCrateContext<'b, 'tcx> { self.shared } - pub fn local(&self) -> &'b LocalCrateContext<'tcx> { + fn local(&self) -> &'b LocalCrateContext<'tcx> { &self.local_ccxs[self.index] } - /// Either iterate over only `self`, or iterate over all `CrateContext`s in - /// the `SharedCrateContext`. The iterator produces `(ccx, is_origin)` - /// pairs, where `is_origin` is `true` if `ccx` is `self` and `false` - /// otherwise. This method is useful for avoiding code duplication in - /// cases where it may or may not be necessary to translate code into every - /// context. - pub fn maybe_iter(&self, iter_all: bool) -> CrateContextMaybeIterator<'b, 'tcx> { - CrateContextMaybeIterator { - shared: self.shared, - index: if iter_all { 0 } else { self.index }, - single: !iter_all, - origin: self.index, - local_ccxs: self.local_ccxs, - } - } - pub fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { self.shared.tcx } @@ -731,14 +682,6 @@ pub fn sess<'a>(&'a self) -> &'a Session { &self.shared.tcx.sess } - pub fn builder<'a>(&'a self) -> Builder<'a, 'tcx> { - Builder::new(self) - } - - pub fn raw_builder<'a>(&'a self) -> BuilderRef { - self.local().builder.b - } - pub fn get_intrinsic(&self, key: &str) -> ValueRef { if let Some(v) = self.intrinsics().borrow().get(key).cloned() { return v; @@ -886,14 +829,6 @@ pub fn dbg_cx<'a>(&'a self) -> &'a Option> { &self.local().dbg_cx } - pub fn eh_personality<'a>(&'a self) -> &'a Cell> { - &self.local().eh_personality - } - - pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell> { - &self.local().eh_unwind_resume - } - pub fn rust_try_fn<'a>(&'a self) -> &'a Cell> { &self.local().rust_try_fn } @@ -902,10 +837,6 @@ fn intrinsics<'a>(&'a self) -> &'a RefCell> { &self.local().intrinsics } - pub fn count_llvm_insn(&self) { - self.local().n_llvm_insns.set(self.local().n_llvm_insns.get() + 1); - } - pub fn obj_size_bound(&self) -> u64 { self.tcx().data_layout.obj_size_bound() } @@ -974,6 +905,82 @@ pub fn generate_local_symbol_name(&self, prefix: &str) -> String { base_n::push_str(idx as u64, base_n::ALPHANUMERIC_ONLY, &mut name); name } + + pub fn eh_personality(&self) -> ValueRef { + // The exception handling personality function. + // + // If our compilation unit has the `eh_personality` lang item somewhere + // within it, then we just need to translate that. Otherwise, we're + // building an rlib which will depend on some upstream implementation of + // this function, so we just codegen a generic reference to it. We don't + // specify any of the types for the function, we just make it a symbol + // that LLVM can later use. + // + // Note that MSVC is a little special here in that we don't use the + // `eh_personality` lang item at all. Currently LLVM has support for + // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the + // *name of the personality function* to decide what kind of unwind side + // tables/landing pads to emit. It looks like Dwarf is used by default, + // injecting a dependency on the `_Unwind_Resume` symbol for resuming + // an "exception", but for MSVC we want to force SEH. This means that we + // can't actually have the personality function be our standard + // `rust_eh_personality` function, but rather we wired it up to the + // CRT's custom personality function, which forces LLVM to consider + // landing pads as "landing pads for SEH". + if let Some(llpersonality) = self.local().eh_personality.get() { + return llpersonality + } + let tcx = self.tcx(); + let llfn = match tcx.lang_items.eh_personality() { + Some(def_id) if !base::wants_msvc_seh(self.sess()) => { + Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self) + } + _ => { + let name = if base::wants_msvc_seh(self.sess()) { + "__CxxFrameHandler3" + } else { + "rust_eh_personality" + }; + let fty = Type::variadic_func(&[], &Type::i32(self)); + declare::declare_cfn(self, name, fty) + } + }; + self.local().eh_personality.set(Some(llfn)); + llfn + } + + // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, + // otherwise declares it as an external function. + pub fn eh_unwind_resume(&self) -> ValueRef { + use attributes; + let unwresume = &self.local().eh_unwind_resume; + if let Some(llfn) = unwresume.get() { + return llfn; + } + + let tcx = self.tcx(); + assert!(self.sess().target.target.options.custom_unwind_resume); + if let Some(def_id) = tcx.lang_items.eh_unwind_resume() { + let llfn = Callee::def(self, def_id, tcx.intern_substs(&[])).reify(self); + unwresume.set(Some(llfn)); + return llfn; + } + + let ty = tcx.mk_fn_ptr(tcx.mk_bare_fn(ty::BareFnTy { + unsafety: hir::Unsafety::Unsafe, + abi: Abi::C, + sig: ty::Binder(tcx.mk_fn_sig( + iter::once(tcx.mk_mut_ptr(tcx.types.u8)), + tcx.types.never, + false + )), + })); + + let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty); + attributes::unwind(llfn, true); + unwresume.set(Some(llfn)); + llfn + } } pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); diff --git a/src/librustc_trans/debuginfo/create_scope_map.rs b/src/librustc_trans/debuginfo/create_scope_map.rs index e0c1a80be394d047f7b7b3e78044c7a3080ddfd4..f5a8eeacf38adba37051666d798402945cb92170 100644 --- a/src/librustc_trans/debuginfo/create_scope_map.rs +++ b/src/librustc_trans/debuginfo/create_scope_map.rs @@ -44,8 +44,8 @@ pub fn is_valid(&self) -> bool { /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. -pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec { - let mir = fcx.mir(); +pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext) + -> IndexVec { let null_scope = MirDebugScope { scope_metadata: ptr::null_mut(), file_start_pos: BytePos(0), @@ -53,8 +53,8 @@ pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec data.fn_metadata, + let fn_metadata = match *debug_context { + FunctionDebugContext::RegularContext(ref data) => data.fn_metadata, FunctionDebugContext::DebugInfoDisabled | FunctionDebugContext::FunctionWithoutDebugInfo => { return scopes; diff --git a/src/librustc_trans/debuginfo/gdb.rs b/src/librustc_trans/debuginfo/gdb.rs index 8f937d3fe25cbd4e6716b22e0c3a9c4baa28bb1b..e8728a39993081bc3bcc45966b43537922ea2b37 100644 --- a/src/librustc_trans/debuginfo/gdb.rs +++ b/src/librustc_trans/debuginfo/gdb.rs @@ -13,37 +13,26 @@ use llvm; use common::{C_bytes, CrateContext, C_i32}; +use builder::Builder; use declare; use type_::Type; use session::config::NoDebugInfo; -use std::ffi::CString; use std::ptr; use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) { +pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext, builder: &Builder) { if needs_gdb_debug_scripts_section(ccx) { - let empty = CString::new("").unwrap(); - let gdb_debug_scripts_section_global = - get_or_insert_gdb_debug_scripts_section_global(ccx); + let gdb_debug_scripts_section_global = get_or_insert_gdb_debug_scripts_section_global(ccx); + // Load just the first byte as that's all that's necessary to force + // LLVM to keep around the reference to the global. + let indices = [C_i32(ccx, 0), C_i32(ccx, 0)]; + let element = builder.inbounds_gep(gdb_debug_scripts_section_global, &indices); + let volative_load_instruction = builder.volatile_load(element); unsafe { - // Load just the first byte as that's all that's necessary to force - // LLVM to keep around the reference to the global. - let indices = [C_i32(ccx, 0), C_i32(ccx, 0)]; - let element = - llvm::LLVMBuildInBoundsGEP(ccx.raw_builder(), - gdb_debug_scripts_section_global, - indices.as_ptr(), - indices.len() as ::libc::c_uint, - empty.as_ptr()); - let volative_load_instruction = - llvm::LLVMBuildLoad(ccx.raw_builder(), - element, - empty.as_ptr()); - llvm::LLVMSetVolatile(volative_load_instruction, llvm::True); llvm::LLVMSetAlignment(volative_load_instruction, 1); } } diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 4e511c05840d1aeb1ac09e9cdd355c26c351ce24..86099d241df686bec35b2594f87cb3ef78e990ab 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -27,7 +27,7 @@ use rustc::ty::subst::Substs; use abi::Abi; -use common::{CrateContext, FunctionContext, Block, BlockAndBuilder}; +use common::{CrateContext, BlockAndBuilder}; use monomorphize::{self, Instance}; use rustc::ty::{self, Ty}; use rustc::mir; @@ -55,6 +55,7 @@ pub use self::source_loc::start_emitting_source_locations; pub use self::metadata::create_global_var_metadata; pub use self::metadata::extend_scope_to_file; +pub use self::source_loc::set_source_location; #[allow(non_upper_case_globals)] const DW_TAG_auto_variable: c_uint = 0x100; @@ -65,7 +66,6 @@ pub struct CrateDebugContext<'tcx> { llcontext: ContextRef, builder: DIBuilderRef, - current_debug_location: Cell, created_files: RefCell>, created_enum_disr_types: RefCell>, @@ -83,40 +83,33 @@ pub fn new(llmod: ModuleRef) -> CrateDebugContext<'tcx> { let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) }; // DIBuilder inherits context from the module, so we'd better use the same one let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) }; - return CrateDebugContext { + CrateDebugContext { llcontext: llcontext, builder: builder, - current_debug_location: Cell::new(InternalDebugLocation::UnknownLocation), created_files: RefCell::new(FxHashMap()), created_enum_disr_types: RefCell::new(FxHashMap()), type_map: RefCell::new(TypeMap::new()), namespace_map: RefCell::new(DefIdMap()), composite_types_completed: RefCell::new(FxHashSet()), - }; + } } } pub enum FunctionDebugContext { - RegularContext(Box), + RegularContext(FunctionDebugContextData), DebugInfoDisabled, FunctionWithoutDebugInfo, } impl FunctionDebugContext { - fn get_ref<'a>(&'a self, - span: Span) - -> &'a FunctionDebugContextData { + fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData { match *self { - FunctionDebugContext::RegularContext(box ref data) => data, + FunctionDebugContext::RegularContext(ref data) => data, FunctionDebugContext::DebugInfoDisabled => { - span_bug!(span, - "{}", - FunctionDebugContext::debuginfo_disabled_message()); + span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); } FunctionDebugContext::FunctionWithoutDebugInfo => { - span_bug!(span, - "{}", - FunctionDebugContext::should_be_ignored_message()); + span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); } } } @@ -134,7 +127,6 @@ fn should_be_ignored_message() -> &'static str { pub struct FunctionDebugContextData { fn_metadata: DISubprogram, source_locations_enabled: Cell, - source_location_override: Cell, } pub enum VariableAccess<'a> { @@ -197,18 +189,6 @@ pub fn finalize(cx: &CrateContext) { }; } -/// Creates a function-specific debug context for a function w/o debuginfo. -pub fn empty_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>) - -> FunctionDebugContext { - if cx.sess().opts.debuginfo == NoDebugInfo { - return FunctionDebugContext::DebugInfoDisabled; - } - - // Clear the debug location so we don't assign them in the function prelude. - source_loc::set_debug_location(cx, None, UnknownLocation); - FunctionDebugContext::FunctionWithoutDebugInfo -} - /// Creates the function-specific debug context. /// /// Returns the FunctionDebugContext for the function which holds state needed @@ -225,15 +205,18 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, return FunctionDebugContext::DebugInfoDisabled; } - // Clear the debug location so we don't assign them in the function prelude. - // Do this here already, in case we do an early exit from this function. - source_loc::set_debug_location(cx, None, UnknownLocation); + for attr in cx.tcx().get_attrs(instance.def).iter() { + if attr.check_name("no_debug") { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } + } let containing_scope = get_containing_scope(cx, instance); let span = mir.span; // This can be the case for functions inlined from another crate if span == syntax_pos::DUMMY_SP { + // FIXME(simulacrum): Probably can't happen; remove. return FunctionDebugContext::FunctionWithoutDebugInfo; } @@ -293,10 +276,9 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = box FunctionDebugContextData { + let fn_debug_context = FunctionDebugContextData { fn_metadata: fn_metadata, source_locations_enabled: Cell::new(false), - source_location_override: Cell::new(false), }; return FunctionDebugContext::RegularContext(fn_debug_context); @@ -441,14 +423,15 @@ fn get_containing_scope<'ccx, 'tcx>(cx: &CrateContext<'ccx, 'tcx>, } } -pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: DIScope, - variable_access: VariableAccess, - variable_kind: VariableKind, - span: Span) { - let cx: &CrateContext = bcx.ccx(); +pub fn declare_local<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + dbg_context: &FunctionDebugContext, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: DIScope, + variable_access: VariableAccess, + variable_kind: VariableKind, + span: Span) { + let cx = bcx.ccx; let file = span_start(cx, span).file; let filename = file.name.clone(); @@ -483,10 +466,10 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, align as u64, ) }; - source_loc::set_debug_location(cx, None, + source_loc::set_debug_location(bcx, InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(cx.raw_builder()); + let debug_loc = llvm::LLVMGetCurrentDebugLocation(bcx.llbuilder); let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( DIB(cx), alloca, @@ -494,38 +477,18 @@ pub fn declare_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, address_operations.as_ptr(), address_operations.len() as c_uint, debug_loc, - bcx.llbb); + bcx.llbb()); - llvm::LLVMSetInstDebugLocation(::build::B(bcx).llbuilder, instr); + llvm::LLVMSetInstDebugLocation(bcx.llbuilder, instr); } } } match variable_kind { ArgumentVariable(_) | CapturedVariable => { - assert!(!bcx.fcx - .debug_context - .get_ref(span) - .source_locations_enabled - .get()); - source_loc::set_debug_location(cx, None, UnknownLocation); + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + source_loc::set_debug_location(bcx, UnknownLocation); } _ => { /* nothing to do */ } } } - -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub enum DebugLoc { - ScopeAt(DIScope, Span), - None -} - -impl DebugLoc { - pub fn apply(self, fcx: &FunctionContext) { - source_loc::set_source_location(fcx, None, self); - } - - pub fn apply_to_bcx(self, bcx: &BlockAndBuilder) { - source_loc::set_source_location(bcx.fcx(), Some(bcx), self); - } -} diff --git a/src/librustc_trans/debuginfo/source_loc.rs b/src/librustc_trans/debuginfo/source_loc.rs index 1aee27c144a36f9c0a9c8ed1ccc4f1f8ee3cf7a5..e02c8be19a2f477315c2e1cdf671ee205c7f6796 100644 --- a/src/librustc_trans/debuginfo/source_loc.rs +++ b/src/librustc_trans/debuginfo/source_loc.rs @@ -11,57 +11,40 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; -use super::metadata::{UNKNOWN_COLUMN_NUMBER}; -use super::{FunctionDebugContext, DebugLoc}; +use super::metadata::UNKNOWN_COLUMN_NUMBER; +use super::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; -use common::{CrateContext, FunctionContext}; use libc::c_uint; use std::ptr; -use syntax_pos::Pos; +use syntax_pos::{Span, Pos}; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). -pub fn set_source_location(fcx: &FunctionContext, - builder: Option<&Builder>, - debug_loc: DebugLoc) { - let builder = builder.map(|b| b.llbuilder); - let function_debug_context = match fcx.debug_context { +pub fn set_source_location( + debug_context: &FunctionDebugContext, builder: &Builder, scope: DIScope, span: Span +) { + let function_debug_context = match *debug_context { FunctionDebugContext::DebugInfoDisabled => return, FunctionDebugContext::FunctionWithoutDebugInfo => { - set_debug_location(fcx.ccx, builder, UnknownLocation); + set_debug_location(builder, UnknownLocation); return; } - FunctionDebugContext::RegularContext(box ref data) => data + FunctionDebugContext::RegularContext(ref data) => data }; - if function_debug_context.source_location_override.get() { - // Just ignore any attempts to set a new debug location while - // the override is active. - return; - } - let dbg_loc = if function_debug_context.source_locations_enabled.get() { - let (scope, span) = match debug_loc { - DebugLoc::ScopeAt(scope, span) => (scope, span), - DebugLoc::None => { - set_debug_location(fcx.ccx, builder, UnknownLocation); - return; - } - }; - - debug!("set_source_location: {}", - fcx.ccx.sess().codemap().span_to_string(span)); - let loc = span_start(fcx.ccx, span); + debug!("set_source_location: {}", builder.ccx.sess().codemap().span_to_string(span)); + let loc = span_start(builder.ccx, span); InternalDebugLocation::new(scope, loc.line, loc.col.to_usize()) } else { UnknownLocation }; - set_debug_location(fcx.ccx, builder, dbg_loc); + set_debug_location(builder, dbg_loc); } /// Enables emitting source locations for the given functions. @@ -70,9 +53,9 @@ pub fn set_source_location(fcx: &FunctionContext, /// they are disabled when beginning to translate a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is translated. -pub fn start_emitting_source_locations(fcx: &FunctionContext) { - match fcx.debug_context { - FunctionDebugContext::RegularContext(box ref data) => { +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { + match *dbg_context { + FunctionDebugContext::RegularContext(ref data) => { data.source_locations_enabled.set(true) }, _ => { /* safe to ignore */ } @@ -96,15 +79,7 @@ pub fn new(scope: DIScope, line: usize, col: usize) -> InternalDebugLocation { } } -pub fn set_debug_location(cx: &CrateContext, - builder: Option, - debug_location: InternalDebugLocation) { - if builder.is_none() { - if debug_location == debug_context(cx).current_debug_location.get() { - return; - } - } - +pub fn set_debug_location(builder: &Builder, debug_location: InternalDebugLocation) { let metadata_node = match debug_location { KnownLocation { scope, line, .. } => { // Always set the column to zero like Clang and GCC @@ -113,7 +88,7 @@ pub fn set_debug_location(cx: &CrateContext, unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation( - debug_context(cx).llcontext, + debug_context(builder.ccx).llcontext, line as c_uint, col as c_uint, scope, @@ -126,12 +101,7 @@ pub fn set_debug_location(cx: &CrateContext, } }; - if builder.is_none() { - debug_context(cx).current_debug_location.set(debug_location); - } - - let builder = builder.unwrap_or_else(|| cx.raw_builder()); unsafe { - llvm::LLVMSetCurrentDebugLocation(builder, metadata_node); + llvm::LLVMSetCurrentDebugLocation(builder.llbuilder, metadata_node); } } diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 90bc29c39e9b50ed7fd60ed531330feccb72ab8a..5fb4a0e088f6293c26390662f45501519c89fdec 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -19,13 +19,11 @@ use middle::lang_items::ExchangeFreeFnLangItem; use rustc::ty::subst::{Substs}; use rustc::traits; -use rustc::ty::{self, AdtKind, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::{self, AdtKind, Ty, TypeFoldable}; use adt; use base::*; -use build::*; -use callee::{Callee}; +use callee::Callee; use common::*; -use debuginfo::DebugLoc; use machine::*; use monomorphize; use trans_item::TransItem; @@ -34,69 +32,50 @@ use type_::Type; use value::Value; use Disr; +use cleanup::CleanupScope; -use arena::TypedArena; use syntax_pos::DUMMY_SP; -pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - size: ValueRef, - align: ValueRef, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("trans_exchange_free"); - +pub fn trans_exchange_free_dyn<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + v: ValueRef, + size: ValueRef, + align: ValueRef +) { let def_id = langcall(bcx.tcx(), None, "", ExchangeFreeFnLangItem); - let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align]; - Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[])) - .call(bcx, debug_loc, &args, None).bcx -} + let args = [bcx.pointercast(v, Type::i8p(bcx.ccx)), size, align]; + let callee = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])); -pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - v: ValueRef, - size: u64, - align: u32, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - trans_exchange_free_dyn(cx, - v, - C_uint(cx.ccx(), size), - C_uint(cx.ccx(), align), - debug_loc) + let ccx = bcx.ccx; + let fn_ty = callee.direct_fn_type(ccx, &[]); + + let llret = bcx.call(callee.reify(ccx), &args[..], None); + fn_ty.apply_attrs_callsite(llret); } -pub fn trans_exchange_free_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - ptr: ValueRef, - content_ty: Ty<'tcx>, - debug_loc: DebugLoc) - -> Block<'blk, 'tcx> { - assert!(type_is_sized(bcx.ccx().tcx(), content_ty)); - let sizing_type = sizing_type_of(bcx.ccx(), content_ty); - let content_size = llsize_of_alloc(bcx.ccx(), sizing_type); +pub fn trans_exchange_free_ty<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, ptr: ValueRef, content_ty: Ty<'tcx> +) { + assert!(bcx.ccx.shared().type_is_sized(content_ty)); + let sizing_type = sizing_type_of(bcx.ccx, content_ty); + let content_size = llsize_of_alloc(bcx.ccx, sizing_type); // `Box` does not allocate. if content_size != 0 { - let content_align = align_of(bcx.ccx(), content_ty); - trans_exchange_free(bcx, ptr, content_size, content_align, debug_loc) - } else { - bcx + let content_align = align_of(bcx.ccx, content_ty); + let ccx = bcx.ccx; + trans_exchange_free_dyn(bcx, ptr, C_uint(ccx, content_size), C_uint(ccx, content_align)); } } -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>) -> bool { - tcx.type_needs_drop_given_env(ty, &tcx.empty_parameter_environment()) -} - -pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - t: Ty<'tcx>) -> Ty<'tcx> { +pub fn get_drop_glue_type<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Ty<'tcx> { assert!(t.is_normalized_for_trans()); - let t = tcx.erase_regions(&t); + let t = scx.tcx().erase_regions(&t); // Even if there is no dtor for t, there might be one deeper down and we // might need to pass in the vtable ptr. - if !type_is_sized(tcx, t) { + if !scx.type_is_sized(t) { return t; } @@ -109,17 +88,16 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // returned `tcx.types.i8` does not appear unsound. The impact on // code quality is unknown at this time.) - if !type_needs_drop(tcx, t) { - return tcx.types.i8; + if !scx.type_needs_drop(t) { + return scx.tcx().types.i8; } match t.sty { - ty::TyBox(typ) if !type_needs_drop(tcx, typ) - && type_is_sized(tcx, typ) => { - tcx.infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { + ty::TyBox(typ) if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) => { + scx.tcx().infer_ctxt(None, None, traits::Reveal::All).enter(|infcx| { let layout = t.layout(&infcx).unwrap(); - if layout.size(&tcx.data_layout).bytes() == 0 { + if layout.size(&scx.tcx().data_layout).bytes() == 0 { // `Box` does not allocate. - tcx.types.i8 + scx.tcx().types.i8 } else { t } @@ -129,56 +107,37 @@ pub fn get_drop_glue_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn drop_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc) -> Block<'blk, 'tcx> { - drop_ty_core(bcx, v, t, debug_loc, false) +fn drop_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, t: Ty<'tcx>) { + call_drop_glue(bcx, v, t, false, None) } -pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc, - skip_dtor: bool) - -> Block<'blk, 'tcx> { +pub fn call_drop_glue<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + v: ValueRef, + t: Ty<'tcx>, + skip_dtor: bool, + funclet: Option<&'a Funclet>, +) { // NB: v is an *alias* of type t here, not a direct value. - debug!("drop_ty_core(t={:?}, skip_dtor={})", t, skip_dtor); - let _icx = push_ctxt("drop_ty"); - if bcx.fcx.type_needs_drop(t) { - let ccx = bcx.ccx(); + debug!("call_drop_glue(t={:?}, skip_dtor={})", t, skip_dtor); + if bcx.ccx.shared().type_needs_drop(t) { + let ccx = bcx.ccx; let g = if skip_dtor { DropGlueKind::TyContents(t) } else { DropGlueKind::Ty(t) }; let glue = get_drop_glue_core(ccx, g); - let glue_type = get_drop_glue_type(ccx.tcx(), t); + let glue_type = get_drop_glue_type(ccx.shared(), t); let ptr = if glue_type != t { - PointerCast(bcx, v, type_of(ccx, glue_type).ptr_to()) + bcx.pointercast(v, type_of(ccx, glue_type).ptr_to()) } else { v }; // No drop-hint ==> call standard drop glue - Call(bcx, glue, &[ptr], debug_loc); + bcx.call(glue, &[ptr], funclet.map(|b| b.bundle())); } - bcx -} - -pub fn drop_ty_immediate<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v: ValueRef, - t: Ty<'tcx>, - debug_loc: DebugLoc, - skip_dtor: bool) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("drop_ty_immediate"); - let vp = alloc_ty(bcx, t, ""); - call_lifetime_start(bcx, vp); - store_ty(bcx, v, vp, t); - let bcx = drop_ty_core(bcx, vp, t, debug_loc, skip_dtor); - call_lifetime_end(bcx, vp); - bcx } pub fn get_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ValueRef { @@ -212,9 +171,8 @@ pub fn map_ty(&self, mut f: F) -> DropGlueKind<'tcx> where F: FnMut(Ty<'tcx>) } } -fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - g: DropGlueKind<'tcx>) -> ValueRef { - let g = g.map_ty(|t| get_drop_glue_type(ccx.tcx(), t)); +fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) -> ValueRef { + let g = g.map_ty(|t| get_drop_glue_type(ccx.shared(), t)); match ccx.drop_glues().borrow().get(&g) { Some(&(glue, _)) => glue, None => { @@ -226,17 +184,12 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } } -pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - g: DropGlueKind<'tcx>) { - let tcx = ccx.tcx(); - assert_eq!(g.ty(), get_drop_glue_type(tcx, g.ty())); - let (llfn, fn_ty) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - - let (arena, fcx): (TypedArena<_>, FunctionContext); - arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &arena); +pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKind<'tcx>) { + assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty())); + let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone(); - let bcx = fcx.init(false); + let fcx = FunctionContext::new(ccx, llfn); + let bcx = fcx.get_entry_block(); ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1); // All glue functions take values passed *by alias*; this is a @@ -247,19 +200,91 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // llfn is expected be declared to take a parameter of the appropriate // type, so we don't need to explicitly cast the function parameter. - let bcx = make_drop_glue(bcx, get_param(llfn, 0), g); - fcx.finish(bcx, DebugLoc::None); + // NB: v0 is an *alias* of type t here, not a direct value. + // Only drop the value when it ... well, we used to check for + // non-null, (and maybe we need to continue doing so), but we now + // must definitely check for special bit-patterns corresponding to + // the special dtor markings. + let v0 = get_param(llfn, 0); + let t = g.ty(); + + let skip_dtor = match g { + DropGlueKind::Ty(_) => false, + DropGlueKind::TyContents(_) => true + }; + + let bcx = match t.sty { + ty::TyBox(content_ty) => { + // Support for TyBox is built-in and its drop glue is + // special. It may move to library and have Drop impl. As + // a safe-guard, assert TyBox not used with TyContents. + assert!(!skip_dtor); + if !bcx.ccx.shared().type_is_sized(content_ty) { + let llval = get_dataptr(&bcx, v0); + let llbox = bcx.load(llval); + drop_ty(&bcx, v0, content_ty); + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + let info = get_meta(&bcx, v0); + let info = bcx.load(info); + let (llsize, llalign) = size_and_align_of_dst(&bcx, content_ty, info); + + // `Box` does not allocate. + let needs_free = bcx.icmp(llvm::IntNE, llsize, C_uint(bcx.ccx, 0u64)); + if const_to_opt_uint(needs_free) == Some(0) { + bcx + } else { + let next_cx = bcx.fcx().build_new_block("next"); + let cond_cx = bcx.fcx().build_new_block("cond"); + bcx.cond_br(needs_free, cond_cx.llbb(), next_cx.llbb()); + trans_exchange_free_dyn(&cond_cx, llbox, llsize, llalign); + cond_cx.br(next_cx.llbb()); + next_cx + } + } else { + let llval = v0; + let llbox = bcx.load(llval); + drop_ty(&bcx, llbox, content_ty); + trans_exchange_free_ty(&bcx, llbox, content_ty); + bcx + } + } + ty::TyDynamic(..) => { + // No support in vtable for distinguishing destroying with + // versus without calling Drop::drop. Assert caller is + // okay with always calling the Drop impl, if any. + // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments + assert!(!skip_dtor); + let data_ptr = get_dataptr(&bcx, v0); + let vtable_ptr = bcx.load(get_meta(&bcx, v0)); + let dtor = bcx.load(vtable_ptr); + bcx.call(dtor, &[bcx.pointercast(bcx.load(data_ptr), Type::i8p(bcx.ccx))], None); + bcx + } + ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { + trans_custom_dtor(bcx, t, v0, def.is_union()) + } + ty::TyAdt(def, ..) if def.is_union() => { + bcx + } + _ => { + if bcx.ccx.shared().type_needs_drop(t) { + drop_structural_ty(bcx, v0, t) + } else { + bcx + } + } + }; + bcx.ret_void(); } -fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - v0: ValueRef, - shallow_drop: bool) - -> Block<'blk, 'tcx> +fn trans_custom_dtor<'a, 'tcx>(mut bcx: BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + v0: ValueRef, + shallow_drop: bool) + -> BlockAndBuilder<'a, 'tcx> { debug!("trans_custom_dtor t: {}", t); let tcx = bcx.tcx(); - let mut bcx = bcx; let def = t.ty_adt_def().unwrap(); @@ -269,23 +294,23 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // // FIXME (#14875) panic-in-drop semantics might be unsupported; we // might well consider changing below to more direct code. - let contents_scope = bcx.fcx.push_custom_cleanup_scope(); - // Issue #23611: schedule cleanup of contents, re-inspecting the // discriminant (if any) in case of variant swap in drop code. - if !shallow_drop { - bcx.fcx.schedule_drop_adt_contents(contents_scope, v0, t); - } + let contents_scope = if !shallow_drop { + bcx.fcx().schedule_drop_adt_contents(v0, t) + } else { + CleanupScope::noop() + }; let (sized_args, unsized_args); - let args: &[ValueRef] = if type_is_sized(tcx, t) { + let args: &[ValueRef] = if bcx.ccx.shared().type_is_sized(t) { sized_args = [v0]; &sized_args } else { // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments unsized_args = [ - Load(bcx, get_dataptr(bcx, v0)), - Load(bcx, get_meta(bcx, v0)) + bcx.load(get_dataptr(&bcx, v0)), + bcx.load(get_meta(&bcx, v0)) ]; &unsized_args }; @@ -294,39 +319,44 @@ fn trans_custom_dtor<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def_id: tcx.lang_items.drop_trait().unwrap(), substs: tcx.mk_substs_trait(t, &[]) }); - let vtbl = match fulfill_obligation(bcx.ccx().shared(), DUMMY_SP, trait_ref) { + let vtbl = match fulfill_obligation(bcx.ccx.shared(), DUMMY_SP, trait_ref) { traits::VtableImpl(data) => data, _ => bug!("dtor for {:?} is not an impl???", t) }; let dtor_did = def.destructor().unwrap(); - bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs) - .call(bcx, DebugLoc::None, args, None).bcx; - - bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) + let callee = Callee::def(bcx.ccx, dtor_did, vtbl.substs); + let fn_ty = callee.direct_fn_type(bcx.ccx, &[]); + let llret; + if let Some(landing_pad) = contents_scope.landing_pad { + let normal_bcx = bcx.fcx().build_new_block("normal-return"); + llret = bcx.invoke(callee.reify(bcx.ccx), args, normal_bcx.llbb(), landing_pad, None); + bcx = normal_bcx; + } else { + llret = bcx.call(callee.reify(bcx.ccx), args, None); + } + fn_ty.apply_attrs_callsite(llret); + contents_scope.trans(&bcx); + bcx } -pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, - t: Ty<'tcx>, info: ValueRef) - -> (ValueRef, ValueRef) { +pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, info: ValueRef) + -> (ValueRef, ValueRef) { debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); - if type_is_sized(bcx.tcx(), t) { - let sizing_type = sizing_type_of(bcx.ccx(), t); - let size = llsize_of_alloc(bcx.ccx(), sizing_type); - let align = align_of(bcx.ccx(), t); + if bcx.ccx.shared().type_is_sized(t) { + let sizing_type = sizing_type_of(bcx.ccx, t); + let size = llsize_of_alloc(bcx.ccx, sizing_type); + let align = align_of(bcx.ccx, t); debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", t, Value(info), size, align); - let size = C_uint(bcx.ccx(), size); - let align = C_uint(bcx.ccx(), align); + let size = C_uint(bcx.ccx, size); + let align = C_uint(bcx.ccx, align); return (size, align); } - if bcx.is_unreachable() { - let llty = Type::int(bcx.ccx()); - return (C_undef(llty), C_undef(llty)); - } match t.sty { ty::TyAdt(def, substs) => { - let ccx = bcx.ccx(); + let ccx = bcx.ccx; // First get the size of all statically known fields. // Don't use type_of::sizing_type_of because that expects t to be sized, // and it also rounds up to alignment, which we want to avoid, @@ -389,7 +419,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, // // `(size + (align-1)) & -align` - let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64)); + let addend = bcx.sub(align, C_uint(bcx.ccx, 1_u64)); let size = bcx.and(bcx.add(size, addend), bcx.neg(align)); (size, align) @@ -397,7 +427,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, ty::TyDynamic(..) => { // info points to the vtable and the second entry in the vtable is the // dynamic size of the object. - let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to()); + let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to()); let size_ptr = bcx.gepi(info, &[1]); let align_ptr = bcx.gepi(info, &[2]); (bcx.load(size_ptr), bcx.load(align_ptr)) @@ -406,126 +436,40 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>, let unit_ty = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); - let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); - let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); - (bcx.mul(info, C_uint(bcx.ccx(), unit_size)), - C_uint(bcx.ccx(), unit_align)) + let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); + let unit_align = llalign_of_min(bcx.ccx, llunit_ty); + let unit_size = llsize_of_alloc(bcx.ccx, llunit_ty); + (bcx.mul(info, C_uint(bcx.ccx, unit_size)), + C_uint(bcx.ccx, unit_align)) } _ => bug!("Unexpected unsized type, found {}", t) } } -fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - v0: ValueRef, - g: DropGlueKind<'tcx>) - -> Block<'blk, 'tcx> { - let t = g.ty(); - - let skip_dtor = match g { DropGlueKind::Ty(_) => false, DropGlueKind::TyContents(_) => true }; - // NB: v0 is an *alias* of type t here, not a direct value. - let _icx = push_ctxt("make_drop_glue"); - - // Only drop the value when it ... well, we used to check for - // non-null, (and maybe we need to continue doing so), but we now - // must definitely check for special bit-patterns corresponding to - // the special dtor markings. - - match t.sty { - ty::TyBox(content_ty) => { - // Support for TyBox is built-in and its drop glue is - // special. It may move to library and have Drop impl. As - // a safe-guard, assert TyBox not used with TyContents. - assert!(!skip_dtor); - if !type_is_sized(bcx.tcx(), content_ty) { - let llval = get_dataptr(bcx, v0); - let llbox = Load(bcx, llval); - let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); - // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments - let info = get_meta(bcx, v0); - let info = Load(bcx, info); - let (llsize, llalign) = - size_and_align_of_dst(&bcx.build(), content_ty, info); - - // `Box` does not allocate. - let needs_free = ICmp(bcx, - llvm::IntNE, - llsize, - C_uint(bcx.ccx(), 0u64), - DebugLoc::None); - with_cond(bcx, needs_free, |bcx| { - trans_exchange_free_dyn(bcx, llbox, llsize, llalign, DebugLoc::None) - }) - } else { - let llval = v0; - let llbox = Load(bcx, llval); - let bcx = drop_ty(bcx, llbox, content_ty, DebugLoc::None); - trans_exchange_free_ty(bcx, llbox, content_ty, DebugLoc::None) - } - } - ty::TyDynamic(..) => { - // No support in vtable for distinguishing destroying with - // versus without calling Drop::drop. Assert caller is - // okay with always calling the Drop impl, if any. - // FIXME(#36457) -- we should pass unsized values to drop glue as two arguments - assert!(!skip_dtor); - let data_ptr = get_dataptr(bcx, v0); - let vtable_ptr = Load(bcx, get_meta(bcx, v0)); - let dtor = Load(bcx, vtable_ptr); - Call(bcx, - dtor, - &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], - DebugLoc::None); - bcx - } - ty::TyAdt(def, ..) if def.dtor_kind().is_present() && !skip_dtor => { - trans_custom_dtor(bcx, t, v0, def.is_union()) - } - ty::TyAdt(def, ..) if def.is_union() => { - bcx - } - _ => { - if bcx.fcx.type_needs_drop(t) { - drop_structural_ty(bcx, v0, t) - } else { - bcx - } - } - } -} - // Iterates through the elements of a structural type, dropping them. -fn drop_structural_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - av: ValueRef, - t: Ty<'tcx>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("drop_structural_ty"); - - fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, - t: Ty<'tcx>, - av: adt::MaybeSizedValue, - variant: &'tcx ty::VariantDef, - substs: &Substs<'tcx>) - -> Block<'blk, 'tcx> { - let _icx = push_ctxt("iter_variant"); +fn drop_structural_ty<'a, 'tcx>(cx: BlockAndBuilder<'a, 'tcx>, + av: ValueRef, + t: Ty<'tcx>) + -> BlockAndBuilder<'a, 'tcx> { + fn iter_variant<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, + t: Ty<'tcx>, + av: adt::MaybeSizedValue, + variant: &'tcx ty::VariantDef, + substs: &Substs<'tcx>) { let tcx = cx.tcx(); - let mut cx = cx; - for (i, field) in variant.fields.iter().enumerate() { let arg = monomorphize::field_ty(tcx, substs, field); - cx = drop_ty(cx, - adt::trans_field_ptr(cx, t, av, Disr::from(variant.disr_val), i), - arg, DebugLoc::None); + let field_ptr = adt::trans_field_ptr(&cx, t, av, Disr::from(variant.disr_val), i); + drop_ty(&cx, field_ptr, arg); } - return cx; } - let value = if type_is_sized(cx.tcx(), t) { + let value = if cx.ccx.shared().type_is_sized(t) { adt::MaybeSizedValue::sized(av) } else { // FIXME(#36457) -- we should pass unsized values as two arguments - let data = Load(cx, get_dataptr(cx, av)); - let info = Load(cx, get_meta(cx, av)); + let data = cx.load(get_dataptr(&cx, av)); + let info = cx.load(get_meta(&cx, av)); adt::MaybeSizedValue::unsized_(data, info) }; @@ -533,67 +477,65 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, match t.sty { ty::TyClosure(def_id, substs) => { for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() { - let llupvar = adt::trans_field_ptr(cx, t, value, Disr(0), i); - cx = drop_ty(cx, llupvar, upvar_ty, DebugLoc::None); + let llupvar = adt::trans_field_ptr(&cx, t, value, Disr(0), i); + drop_ty(&cx, llupvar, upvar_ty); } } ty::TyArray(_, n) => { - let base = get_dataptr(cx, value.value); - let len = C_uint(cx.ccx(), n); + let base = get_dataptr(&cx, value.value); + let len = C_uint(cx.ccx, n); let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(cx, base, unit_ty, len, - |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + cx = tvec::slice_for_each(&cx, base, unit_ty, len, |bb, vv| drop_ty(bb, vv, unit_ty)); } ty::TySlice(_) | ty::TyStr => { let unit_ty = t.sequence_element_type(cx.tcx()); - cx = tvec::slice_for_each(cx, value.value, unit_ty, value.meta, - |bb, vv| drop_ty(bb, vv, unit_ty, DebugLoc::None)); + cx = tvec::slice_for_each(&cx, value.value, unit_ty, value.meta, + |bb, vv| drop_ty(bb, vv, unit_ty)); } ty::TyTuple(ref args) => { for (i, arg) in args.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, t, value, Disr(0), i); - cx = drop_ty(cx, llfld_a, *arg, DebugLoc::None); + let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr(0), i); + drop_ty(&cx, llfld_a, *arg); } } ty::TyAdt(adt, substs) => match adt.adt_kind() { AdtKind::Struct => { let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None); for (i, &Field(_, field_ty)) in fields.iter().enumerate() { - let llfld_a = adt::trans_field_ptr(cx, t, value, Disr::from(discr), i); + let llfld_a = adt::trans_field_ptr(&cx, t, value, Disr::from(discr), i); - let val = if type_is_sized(cx.tcx(), field_ty) { + let val = if cx.ccx.shared().type_is_sized(field_ty) { llfld_a } else { // FIXME(#36457) -- we should pass unsized values as two arguments - let scratch = alloc_ty(cx, field_ty, "__fat_ptr_iter"); - Store(cx, llfld_a, get_dataptr(cx, scratch)); - Store(cx, value.meta, get_meta(cx, scratch)); + let scratch = alloc_ty(&cx, field_ty, "__fat_ptr_iter"); + cx.store(llfld_a, get_dataptr(&cx, scratch)); + cx.store(value.meta, get_meta(&cx, scratch)); scratch }; - cx = drop_ty(cx, val, field_ty, DebugLoc::None); + drop_ty(&cx, val, field_ty); } } AdtKind::Union => { bug!("Union in `glue::drop_structural_ty`"); } AdtKind::Enum => { - let fcx = cx.fcx; - let ccx = fcx.ccx; let n_variants = adt.variants.len(); // NB: we must hit the discriminant first so that structural // comparison know not to proceed when the discriminants differ. - match adt::trans_switch(cx, t, av, false) { + match adt::trans_switch(&cx, t, av, false) { (adt::BranchKind::Single, None) => { if n_variants != 0 { assert!(n_variants == 1); - cx = iter_variant(cx, t, adt::MaybeSizedValue::sized(av), + iter_variant(&cx, t, adt::MaybeSizedValue::sized(av), &adt.variants[0], substs); } } (adt::BranchKind::Switch, Some(lldiscrim_a)) => { - cx = drop_ty(cx, lldiscrim_a, cx.tcx().types.isize, DebugLoc::None); + let tcx = cx.tcx(); + drop_ty(&cx, lldiscrim_a, tcx.types.isize); // Create a fall-through basic block for the "else" case of // the switch instruction we're about to generate. Note that @@ -608,27 +550,23 @@ fn iter_variant<'blk, 'tcx>(cx: Block<'blk, 'tcx>, // from the outer function, and any other use case will only // call this for an already-valid enum in which case the `ret // void` will never be hit. - let ret_void_cx = fcx.new_block("enum-iter-ret-void"); - RetVoid(ret_void_cx, DebugLoc::None); - let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants); - let next_cx = fcx.new_block("enum-iter-next"); + let ret_void_cx = cx.fcx().build_new_block("enum-iter-ret-void"); + ret_void_cx.ret_void(); + let llswitch = cx.switch(lldiscrim_a, ret_void_cx.llbb(), n_variants); + let next_cx = cx.fcx().build_new_block("enum-iter-next"); for variant in &adt.variants { - let variant_cx = fcx.new_block(&format!("enum-iter-variant-{}", - &variant.disr_val - .to_string())); - let case_val = adt::trans_case(cx, t, Disr::from(variant.disr_val)); - AddCase(llswitch, case_val, variant_cx.llbb); - let variant_cx = iter_variant(variant_cx, - t, - value, - variant, - substs); - Br(variant_cx, next_cx.llbb, DebugLoc::None); + let variant_cx_name = format!("enum-iter-variant-{}", + &variant.disr_val.to_string()); + let variant_cx = cx.fcx().build_new_block(&variant_cx_name); + let case_val = adt::trans_case(&cx, t, Disr::from(variant.disr_val)); + variant_cx.add_case(llswitch, case_val, variant_cx.llbb()); + iter_variant(&variant_cx, t, value, variant, substs); + variant_cx.br(next_cx.llbb()); } cx = next_cx; } - _ => ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), + _ => cx.ccx.sess().unimpl("value from adt::trans_switch in drop_structural_ty"), } } }, diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 577ffbad1348ba07673018c6958c0f04bdeeef32..b7116ba1f338baadec8f4abebeabe3e0ea588945 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -10,7 +10,6 @@ #![allow(non_upper_case_globals)] -use arena::TypedArena; use intrinsics::{self, Intrinsic}; use libc; use llvm; @@ -18,9 +17,7 @@ use abi::{Abi, FnType}; use adt; use base::*; -use build::*; use common::*; -use debuginfo::DebugLoc; use declare; use glue; use type_of; @@ -33,7 +30,7 @@ use syntax::symbol::Symbol; use rustc::session::Session; -use syntax_pos::{Span, DUMMY_SP}; +use syntax_pos::Span; use std::cmp::Ordering; use std::iter; @@ -79,6 +76,7 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { "roundf32" => "llvm.round.f32", "roundf64" => "llvm.round.f64", "assume" => "llvm.assume", + "abort" => "llvm.trap", _ => return None }; Some(ccx.get_intrinsic(&llvm_name)) @@ -87,19 +85,15 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// add them to librustc_trans/trans/context.rs -pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, - callee_ty: Ty<'tcx>, - fn_ty: &FnType, - llargs: &[ValueRef], - llresult: ValueRef, - call_debug_location: DebugLoc) - -> Result<'blk, 'tcx> { - let fcx = bcx.fcx; - let ccx = fcx.ccx; +pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + callee_ty: Ty<'tcx>, + fn_ty: &FnType, + llargs: &[ValueRef], + llresult: ValueRef, + span: Span) { + let ccx = bcx.ccx; let tcx = bcx.tcx(); - let _icx = push_ctxt("trans_intrinsic_call"); - let (def_id, substs, fty) = match callee_ty.sty { ty::TyFnDef(def_id, substs, ref fty) => (def_id, substs, fty), _ => bug!("expected fn item type, found {}", callee_ty) @@ -110,223 +104,150 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let ret_ty = sig.output(); let name = &*tcx.item_name(def_id).as_str(); - let span = match call_debug_location { - DebugLoc::ScopeAt(_, span) => span, - DebugLoc::None => { - span_bug!(fcx.span.unwrap_or(DUMMY_SP), - "intrinsic `{}` called with missing span", name); - } - }; - - // These are the only intrinsic functions that diverge. - if name == "abort" { - let llfn = ccx.get_intrinsic(&("llvm.trap")); - Call(bcx, llfn, &[], call_debug_location); - Unreachable(bcx); - return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); - } else if name == "unreachable" { - Unreachable(bcx); - return Result::new(bcx, C_nil(ccx)); - } - let llret_ty = type_of::type_of(ccx, ret_ty); let simple = get_simple_intrinsic(ccx, name); - let llval = match (simple, name) { - (Some(llfn), _) => { - Call(bcx, llfn, &llargs, call_debug_location) + let llval = match name { + _ if simple.is_some() => { + bcx.call(simple.unwrap(), &llargs, None) } - (_, "likely") => { + "unreachable" => { + return; + }, + "likely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - Call(bcx, expect, &[llargs[0], C_bool(ccx, true)], call_debug_location) + bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None) } - (_, "unlikely") => { + "unlikely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - Call(bcx, expect, &[llargs[0], C_bool(ccx, false)], call_debug_location) + bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) } - (_, "try") => { - bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult, - call_debug_location); + "try" => { + try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult); C_nil(ccx) } - (_, "breakpoint") => { + "breakpoint" => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); - Call(bcx, llfn, &[], call_debug_location) + bcx.call(llfn, &[], None) } - (_, "size_of") => { + "size_of" => { let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } - (_, "size_of_val") => { + "size_of_val" => { let tp_ty = substs.type_at(0); - if !type_is_sized(tcx, tp_ty) { + if !bcx.ccx.shared().type_is_sized(tp_ty) { let (llsize, _) = - glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llsize } else { let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) } } - (_, "min_align_of") => { + "min_align_of" => { let tp_ty = substs.type_at(0); C_uint(ccx, type_of::align_of(ccx, tp_ty)) } - (_, "min_align_of_val") => { + "min_align_of_val" => { let tp_ty = substs.type_at(0); - if !type_is_sized(tcx, tp_ty) { + if !bcx.ccx.shared().type_is_sized(tp_ty) { let (_, llalign) = - glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign } else { C_uint(ccx, type_of::align_of(ccx, tp_ty)) } } - (_, "pref_align_of") => { + "pref_align_of" => { let tp_ty = substs.type_at(0); let lltp_ty = type_of::type_of(ccx, tp_ty); C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty)) } - (_, "drop_in_place") => { - let tp_ty = substs.type_at(0); - let is_sized = type_is_sized(tcx, tp_ty); - let ptr = if is_sized { - llargs[0] - } else { - // FIXME(#36457) -- we should pass unsized values as two arguments - let scratch = alloc_ty(bcx, tp_ty, "drop"); - call_lifetime_start(bcx, scratch); - Store(bcx, llargs[0], get_dataptr(bcx, scratch)); - Store(bcx, llargs[1], get_meta(bcx, scratch)); - scratch - }; - glue::drop_ty(bcx, ptr, tp_ty, call_debug_location); - if !is_sized { - call_lifetime_end(bcx, ptr); - } - C_nil(ccx) - } - (_, "type_name") => { + "type_name" => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); C_str_slice(ccx, ty_name) } - (_, "type_id") => { + "type_id" => { C_u64(ccx, ccx.tcx().type_id_hash(substs.type_at(0))) } - (_, "init") => { - let tp_ty = substs.type_at(0); - if !type_is_zero_size(ccx, tp_ty) { - // Just zero out the stack slot. (See comment on base::memzero for explanation) - init_zero_mem(bcx, llresult, tp_ty); + "init" => { + let ty = substs.type_at(0); + if !type_is_zero_size(ccx, ty) { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large data + // structures, and the generated code will be awful. (A telltale sign of this is + // large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_uint(ccx, 1usize)); } C_nil(ccx) } // Effectively no-ops - (_, "uninit") | (_, "forget") => { + "uninit" | "forget" => { C_nil(ccx) } - (_, "needs_drop") => { + "needs_drop" => { let tp_ty = substs.type_at(0); - C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty)) + C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty)) } - (_, "offset") => { + "offset" => { let ptr = llargs[0]; let offset = llargs[1]; - InBoundsGEP(bcx, ptr, &[offset]) + bcx.inbounds_gep(ptr, &[offset]) } - (_, "arith_offset") => { + "arith_offset" => { let ptr = llargs[0]; let offset = llargs[1]; - GEP(bcx, ptr, &[offset]) + bcx.gep(ptr, &[offset]) } - (_, "copy_nonoverlapping") => { - copy_intrinsic(bcx, - false, - false, - substs.type_at(0), - llargs[1], - llargs[0], - llargs[2], - call_debug_location) + "copy_nonoverlapping" => { + copy_intrinsic(bcx, false, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) } - (_, "copy") => { - copy_intrinsic(bcx, - true, - false, - substs.type_at(0), - llargs[1], - llargs[0], - llargs[2], - call_debug_location) + "copy" => { + copy_intrinsic(bcx, true, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) } - (_, "write_bytes") => { - memset_intrinsic(bcx, - false, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) + "write_bytes" => { + memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } - (_, "volatile_copy_nonoverlapping_memory") => { - copy_intrinsic(bcx, - false, - true, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) + "volatile_copy_nonoverlapping_memory" => { + copy_intrinsic(bcx, false, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } - (_, "volatile_copy_memory") => { - copy_intrinsic(bcx, - true, - true, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) + "volatile_copy_memory" => { + copy_intrinsic(bcx, true, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } - (_, "volatile_set_memory") => { - memset_intrinsic(bcx, - true, - substs.type_at(0), - llargs[0], - llargs[1], - llargs[2], - call_debug_location) + "volatile_set_memory" => { + memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) } - (_, "volatile_load") => { + "volatile_load" => { let tp_ty = substs.type_at(0); let mut ptr = llargs[0]; if let Some(ty) = fn_ty.ret.cast { - ptr = PointerCast(bcx, ptr, ty.ptr_to()); + ptr = bcx.pointercast(ptr, ty.ptr_to()); } - let load = VolatileLoad(bcx, ptr); + let load = bcx.volatile_load(ptr); unsafe { llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); } to_immediate(bcx, load, tp_ty) }, - (_, "volatile_store") => { + "volatile_store" => { let tp_ty = substs.type_at(0); - if type_is_fat_ptr(bcx.tcx(), tp_ty) { - VolatileStore(bcx, llargs[1], get_dataptr(bcx, llargs[0])); - VolatileStore(bcx, llargs[2], get_meta(bcx, llargs[0])); + if type_is_fat_ptr(bcx.ccx, tp_ty) { + bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); + bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0])); } else { let val = if fn_ty.args[1].is_indirect() { - Load(bcx, llargs[1]) + bcx.load(llargs[1]) } else { from_immediate(bcx, llargs[1]) }; - let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to()); - let store = VolatileStore(bcx, val, ptr); + let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); + let store = bcx.volatile_store(val, ptr); unsafe { llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); } @@ -334,49 +255,58 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, C_nil(ccx) }, - (_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") | - (_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") | - (_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") | - (_, "unchecked_div") | (_, "unchecked_rem") => { + "ctlz" | "cttz" | "ctpop" | "bswap" | + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | + "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" => { let sty = &arg_tys[0].sty; match int_type_width_signed(sty, ccx) { Some((width, signed)) => match name { - "ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width), - llargs[0], call_debug_location), - "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), - llargs[0], call_debug_location), - "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, call_debug_location), + "ctlz" | "cttz" => { + let y = C_bool(bcx.ccx, false); + let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); + bcx.call(llfn, &[llargs[0], y], None) + } + "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &llargs, None), "bswap" => { if width == 8 { llargs[0] // byte swap a u8/i8 is just a no-op } else { - Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, call_debug_location) + bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), + &llargs, None) } } "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { let intrinsic = format!("llvm.{}{}.with.overflow.i{}", if signed { 's' } else { 'u' }, &name[..3], width); - with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult, - call_debug_location) + let llfn = bcx.ccx.get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let val = bcx.call(llfn, &[llargs[0], llargs[1]], None); + let result = bcx.extract_value(val, 0); + let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx)); + bcx.store(result, bcx.struct_gep(llresult, 0)); + bcx.store(overflow, bcx.struct_gep(llresult, 1)); + + C_nil(bcx.ccx) }, - "overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location), - "overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location), - "overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location), + "overflowing_add" => bcx.add(llargs[0], llargs[1]), + "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), + "overflowing_mul" => bcx.mul(llargs[0], llargs[1]), "unchecked_div" => if signed { - SDiv(bcx, llargs[0], llargs[1], call_debug_location) + bcx.sdiv(llargs[0], llargs[1]) } else { - UDiv(bcx, llargs[0], llargs[1], call_debug_location) + bcx.udiv(llargs[0], llargs[1]) }, "unchecked_rem" => if signed { - SRem(bcx, llargs[0], llargs[1], call_debug_location) + bcx.srem(llargs[0], llargs[1]) } else { - URem(bcx, llargs[0], llargs[1], call_debug_location) + bcx.urem(llargs[0], llargs[1]) }, _ => bug!(), }, @@ -390,17 +320,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, } }, - (_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") | - (_, "frem_fast") => { + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { let sty = &arg_tys[0].sty; match float_type_width(sty) { Some(_width) => match name { - "fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location), - "fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location), - "fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location), - "fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location), - "frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location), + "fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]), + "fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]), + "fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]), + "fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]), + "frem_fast" => bcx.frem_fast(llargs[0], llargs[1]), _ => bug!(), }, None => { @@ -414,7 +343,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, }, - (_, "discriminant_value") => { + "discriminant_value" => { let val_ty = substs.type_at(0); match val_ty.sty { ty::TyAdt(adt, ..) if adt.is_enum() => { @@ -424,17 +353,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, _ => C_null(llret_ty) } } - (_, name) if name.starts_with("simd_") => { + name if name.starts_with("simd_") => { generic_simd_intrinsic(bcx, name, callee_ty, &llargs, ret_ty, llret_ty, - call_debug_location, span) } // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst - (_, name) if name.starts_with("atomic_") => { + name if name.starts_with("atomic_") => { use llvm::AtomicOrdering::*; let split: Vec<&str> = name.split('_').collect(); @@ -464,22 +392,25 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, _ => ccx.sess().fatal("Atomic intrinsic not in correct format"), }; + let invalid_monomorphization = |sty| { + span_invalid_monomorphization_error(tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, sty)); + }; + match split[1] { "cxchg" | "cxchgweak" => { let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2], - order, failorder, weak); - let result = ExtractValue(bcx, val, 0); - let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); - Store(bcx, result, StructGEP(bcx, llresult, 0)); - Store(bcx, success, StructGEP(bcx, llresult, 1)); + let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, + failorder, weak); + let result = bcx.extract_value(val, 0); + let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); + bcx.store(result, bcx.struct_gep(llresult, 0)); + bcx.store(success, bcx.struct_gep(llresult, 1)); } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); } C_nil(ccx) } @@ -487,12 +418,9 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, "load" => { let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { - AtomicLoad(bcx, llargs[0], order) + bcx.atomic_load(llargs[0], order) } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); C_nil(ccx) } } @@ -500,23 +428,20 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, "store" => { let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { - AtomicStore(bcx, llargs[1], llargs[0], order); + bcx.atomic_store(llargs[1], llargs[0], order); } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); } C_nil(ccx) } "fence" => { - AtomicFence(bcx, order, llvm::SynchronizationScope::CrossThread); + bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); C_nil(ccx) } "singlethreadfence" => { - AtomicFence(bcx, order, llvm::SynchronizationScope::SingleThread); + bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); C_nil(ccx) } @@ -539,20 +464,16 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, let sty = &substs.type_at(0).sty; if int_type_width_signed(sty, ccx).is_some() { - AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order) + bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) } else { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + invalid_monomorphization(sty); C_nil(ccx) } } } - } - (..) => { + _ => { let intr = match Intrinsic::find(&name) { Some(intr) => intr, None => bug!("unknown intrinsic '{}'", name), @@ -581,18 +502,15 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, *any_changes_needed |= llvm_elem.is_some(); let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, - any_changes_needed)); + let elem = one(ty_to_type(ccx, t, any_changes_needed)); vec![elem.ptr_to()] } Vector(ref t, ref llvm_elem, length) => { *any_changes_needed |= llvm_elem.is_some(); let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, - any_changes_needed)); - vec![Type::vector(&elem, - length as u64)] + let elem = one(ty_to_type(ccx, t, any_changes_needed)); + vec![Type::vector(&elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() @@ -613,11 +531,11 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, // qux` to be converted into `foo, bar, baz, qux`, integer // arguments to be truncated as needed and pointers to be // cast. - fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - t: &intrinsics::Type, - arg_type: Ty<'tcx>, - llarg: ValueRef) - -> Vec + fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + t: &intrinsics::Type, + arg_type: Ty<'tcx>, + llarg: ValueRef) + -> Vec { match *t { intrinsics::Type::Aggregate(true, ref contents) => { @@ -627,29 +545,27 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.fcx.type_needs_drop(arg_type)); + assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); let arg = adt::MaybeSizedValue::sized(llarg); (0..contents.len()) .map(|i| { - Load(bcx, adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) + bcx.load(adt::trans_field_ptr(bcx, arg_type, arg, Disr(0), i)) }) .collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![PointerCast(bcx, llarg, - llvm_elem.ptr_to())] + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); + vec![bcx.pointercast(llarg, llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false)); - vec![BitCast(bcx, llarg, - Type::vector(&llvm_elem, length as u64))] + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); + vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))] + vec![bcx.trunc(llarg, Type::ix(bcx.ccx, llvm_width as u64))] } _ => vec![llarg], } @@ -686,7 +602,7 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let f = declare::declare_cfn(ccx, name, Type::func(&inputs, &outputs)); - Call(bcx, f, &llargs, call_debug_location) + bcx.call(f, &llargs, None) } }; @@ -696,8 +612,8 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, assert!(!flatten); for i in 0..elems.len() { - let val = ExtractValue(bcx, val, i); - Store(bcx, val, StructGEP(bcx, llresult, i)); + let val = bcx.extract_value(val, i); + bcx.store(val, bcx.struct_gep(llresult, i)); } C_nil(ccx) } @@ -706,11 +622,10 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, } }; - if val_ty(llval) != Type::void(ccx) && - machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { + if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { if let Some(ty) = fn_ty.ret.cast { - let ptr = PointerCast(bcx, llresult, ty.ptr_to()); - let store = Store(bcx, llval, ptr); + let ptr = bcx.pointercast(llresult, ty.ptr_to()); + let store = bcx.store(llval, ptr); unsafe { llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty)); } @@ -718,20 +633,17 @@ fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, store_ty(bcx, llval, llresult, ret_ty); } } - - Result::new(bcx, llresult) } -fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - allow_overlap: bool, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - src: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let ccx = bcx.ccx(); +fn copy_intrinsic<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + allow_overlap: bool, + volatile: bool, + tp_ty: Ty<'tcx>, + dst: ValueRef, + src: ValueRef, + count: ValueRef) + -> ValueRef { + let ccx = bcx.ccx; let lltp_ty = type_of::type_of(ccx, tp_ty); let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); let size = machine::llsize_of(ccx, lltp_ty); @@ -745,92 +657,49 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); - let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); - let src_ptr = PointerCast(bcx, src, Type::i8p(ccx)); + let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); + let src_ptr = bcx.pointercast(src, Type::i8p(ccx)); let llfn = ccx.get_intrinsic(&name); - Call(bcx, - llfn, - &[dst_ptr, - src_ptr, - Mul(bcx, size, count, DebugLoc::None), - align, - C_bool(ccx, volatile)], - call_debug_location) + bcx.call(llfn, + &[dst_ptr, + src_ptr, + bcx.mul(size, count), + align, + C_bool(ccx, volatile)], + None) } -fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - volatile: bool, - tp_ty: Ty<'tcx>, - dst: ValueRef, - val: ValueRef, - count: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let ccx = bcx.ccx(); - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); +fn memset_intrinsic<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + volatile: bool, + ty: Ty<'tcx>, + dst: ValueRef, + val: ValueRef, + count: ValueRef +) -> ValueRef { + let ccx = bcx.ccx; + let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); + let lltp_ty = type_of::type_of(ccx, ty); let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); - - let name = format!("llvm.memset.p0i8.i{}", int_size); - - let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx)); - let llfn = ccx.get_intrinsic(&name); - - Call(bcx, - llfn, - &[dst_ptr, - val, - Mul(bcx, size, count, DebugLoc::None), - align, - C_bool(ccx, volatile)], - call_debug_location) + let dst = bcx.pointercast(dst, Type::i8p(ccx)); + call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } -fn count_zeros_intrinsic(bcx: Block, - name: &str, - val: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let y = C_bool(bcx.ccx(), false); - let llfn = bcx.ccx().get_intrinsic(&name); - Call(bcx, llfn, &[val, y], call_debug_location) -} - -fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - name: &str, - a: ValueRef, - b: ValueRef, - out: ValueRef, - call_debug_location: DebugLoc) - -> ValueRef { - let llfn = bcx.ccx().get_intrinsic(&name); - - // Convert `i1` to a `bool`, and write it to the out parameter - let val = Call(bcx, llfn, &[a, b], call_debug_location); - let result = ExtractValue(bcx, val, 0); - let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); - Store(bcx, result, StructGEP(bcx, out, 0)); - Store(bcx, overflow, StructGEP(bcx, out, 1)); - - C_nil(bcx.ccx()) -} - -fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - local_ptr: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { +fn try_intrinsic<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef, +) { if bcx.sess().no_landing_pads() { - Call(bcx, func, &[data], dloc); - Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); - bcx + bcx.call(func, &[data], None); + bcx.store(C_null(Type::i8p(&bcx.ccx)), dest); } else if wants_msvc_seh(bcx.sess()) { - trans_msvc_try(bcx, func, data, local_ptr, dest, dloc) + trans_msvc_try(bcx, func, data, local_ptr, dest); } else { - trans_gnu_try(bcx, func, data, local_ptr, dest, dloc) + trans_gnu_try(bcx, func, data, local_ptr, dest); } } @@ -841,26 +710,24 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // instructions are meant to work for all targets, as of the time of this // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. -fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - local_ptr: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { - let ccx = bcx.ccx(); - let dloc = DebugLoc::None; - - SetPersonalityFn(bcx, bcx.fcx.eh_personality()); - - let normal = bcx.fcx.new_block("normal"); - let catchswitch = bcx.fcx.new_block("catchswitch"); - let catchpad = bcx.fcx.new_block("catchpad"); - let caught = bcx.fcx.new_block("caught"); - - let func = llvm::get_param(bcx.fcx.llfn, 0); - let data = llvm::get_param(bcx.fcx.llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); +fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef) { + let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let ccx = bcx.ccx; + + bcx.set_personality_fn(bcx.ccx.eh_personality()); + + let normal = bcx.fcx().build_new_block("normal"); + let catchswitch = bcx.fcx().build_new_block("catchswitch"); + let catchpad = bcx.fcx().build_new_block("catchpad"); + let caught = bcx.fcx().build_new_block("caught"); + + let func = llvm::get_param(bcx.fcx().llfn, 0); + let data = llvm::get_param(bcx.fcx().llfn, 1); + let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); // We're generating an IR snippet that looks like: // @@ -902,37 +769,37 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = Alloca(bcx, i64p, "slot"); - Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc); + let slot = bcx.fcx().alloca(i64p, "slot"); + bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), + None); - Ret(normal, C_i32(ccx, 0), dloc); + normal.ret(C_i32(ccx, 0)); - let cs = CatchSwitch(catchswitch, None, None, 1); - AddHandler(catchswitch, cs, catchpad.llbb); + let cs = catchswitch.catch_switch(None, None, 1); + catchswitch.add_handler(cs, catchpad.llbb()); let tcx = ccx.tcx(); let tydesc = match tcx.lang_items.msvc_try_filter() { Some(did) => ::consts::get_static(ccx, did), None => bug!("msvc_try_filter not defined"), }; - let tok = CatchPad(catchpad, cs, &[tydesc, C_i32(ccx, 0), slot]); - let addr = Load(catchpad, slot); - let arg1 = Load(catchpad, addr); + let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]); + let addr = catchpad.load(slot); + let arg1 = catchpad.load(addr); let val1 = C_i32(ccx, 1); - let arg2 = Load(catchpad, InBoundsGEP(catchpad, addr, &[val1])); - let local_ptr = BitCast(catchpad, local_ptr, i64p); - Store(catchpad, arg1, local_ptr); - Store(catchpad, arg2, InBoundsGEP(catchpad, local_ptr, &[val1])); - CatchRet(catchpad, tok, caught.llbb); + let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1])); + let local_ptr = catchpad.bitcast(local_ptr, i64p); + catchpad.store(arg1, local_ptr); + catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1])); + catchpad.catch_ret(tok, caught.llbb()); - Ret(caught, C_i32(ccx, 1), dloc); + caught.ret(C_i32(ccx, 1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); - Store(bcx, ret, dest); - return bcx + let ret = bcx.call(llfn, &[func, data, local_ptr], None); + bcx.store(ret, dest); } // Definition of the standard "try" function for Rust using the GNU-like model @@ -946,15 +813,13 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // function calling it, and that function may already have other personality // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. -fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - func: ValueRef, - data: ValueRef, - local_ptr: ValueRef, - dest: ValueRef, - dloc: DebugLoc) -> Block<'blk, 'tcx> { - let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { - let ccx = bcx.ccx(); - let dloc = DebugLoc::None; +fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + func: ValueRef, + data: ValueRef, + local_ptr: ValueRef, + dest: ValueRef) { + let llfn = get_rust_try_fn(bcx.fcx(), &mut |bcx| { + let ccx = bcx.ccx; // Translates the shims described above: // @@ -973,14 +838,14 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bcx.fcx.new_block("then"); - let catch = bcx.fcx.new_block("catch"); + let then = bcx.fcx().build_new_block("then"); + let catch = bcx.fcx().build_new_block("catch"); - let func = llvm::get_param(bcx.fcx.llfn, 0); - let data = llvm::get_param(bcx.fcx.llfn, 1); - let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); - Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc); - Ret(then, C_i32(ccx, 0), dloc); + let func = llvm::get_param(bcx.fcx().llfn, 0); + let data = llvm::get_param(bcx.fcx().llfn, 1); + let local_ptr = llvm::get_param(bcx.fcx().llfn, 2); + bcx.invoke(func, &[data], then.llbb(), catch.llbb(), None); + then.ret(C_i32(ccx, 0)); // Type indicator for the exception being thrown. // @@ -990,18 +855,17 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, // rust_try ignores the selector. let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let vals = LandingPad(catch, lpad_ty, bcx.fcx.eh_personality(), 1); - AddClause(catch, vals, C_null(Type::i8p(ccx))); - let ptr = ExtractValue(catch, vals, 0); - Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to())); - Ret(catch, C_i32(ccx, 1), dloc); + let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn); + catch.add_clause(vals, C_null(Type::i8p(ccx))); + let ptr = catch.extract_value(vals, 0); + catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to())); + catch.ret(C_i32(ccx, 1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc); - Store(bcx, ret, dest); - return bcx; + let ret = bcx.call(llfn, &[func, data, local_ptr], None); + bcx.store(ret, dest); } // Helper function to give a Block to a closure to translate a shim function. @@ -1010,11 +874,10 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, name: &str, inputs: Vec>, output: Ty<'tcx>, - trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) + trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false); - let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]); let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy { unsafety: hir::Unsafety::Unsafe, @@ -1022,11 +885,8 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, sig: ty::Binder(sig) })); let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty); - let (fcx, block_arena); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - trans(fcx.init(true)); - fcx.cleanup(); + let fcx = FunctionContext::new(ccx, llfn); + trans(fcx.get_entry_block()); llfn } @@ -1035,7 +895,7 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, // // This function is only generated once and is then cached. fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, - trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) + trans: &mut for<'b> FnMut(BlockAndBuilder<'b, 'tcx>)) -> ValueRef { let ccx = fcx.ccx; if let Some(llfn) = ccx.rust_try_fn().get() { @@ -1060,16 +920,15 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { span_err!(a, b, E0511, "{}", c); } -fn generic_simd_intrinsic<'blk, 'tcx, 'a> - (bcx: Block<'blk, 'tcx>, - name: &str, - callee_ty: Ty<'tcx>, - llargs: &[ValueRef], - ret_ty: Ty<'tcx>, - llret_ty: Type, - call_debug_location: DebugLoc, - span: Span) -> ValueRef -{ +fn generic_simd_intrinsic<'a, 'tcx>( + bcx: &BlockAndBuilder<'a, 'tcx>, + name: &str, + callee_ty: Ty<'tcx>, + llargs: &[ValueRef], + ret_ty: Ty<'tcx>, + llret_ty: Type, + span: Span +) -> ValueRef { // macros for error handling: macro_rules! emit_error { ($msg: tt) => { @@ -1087,7 +946,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> ($cond: expr, $($fmt: tt)*) => { if !$cond { emit_error!($($fmt)*); - return C_nil(bcx.ccx()) + return C_nil(bcx.ccx) } } } @@ -1138,8 +997,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> llargs[1], in_elem, llret_ty, - cmp_op, - call_debug_location) + cmp_op) } if name.starts_with("simd_shuffle") { @@ -1179,7 +1037,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> arg_idx, total_len); None } - Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)), + Some(idx) => Some(C_i32(bcx.ccx, idx as i32)), } }) .collect(); @@ -1188,20 +1046,20 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a> None => return C_null(llret_ty) }; - return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices)) + return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices)) } if name == "simd_insert" { require!(in_elem == arg_tys[2], "expected inserted type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, arg_tys[2]); - return InsertElement(bcx, llargs[0], llargs[2], llargs[1]) + return bcx.insert_element(llargs[0], llargs[2], llargs[1]) } if name == "simd_extract" { require!(ret_ty == in_elem, "expected return type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, ret_ty); - return ExtractElement(bcx, llargs[0], llargs[1]) + return bcx.extract_element(llargs[0], llargs[1]) } if name == "simd_cast" { @@ -1237,34 +1095,34 @@ enum Style { Float, Int(/* is signed? */ bool), Unsupported } match (in_style, out_style) { (Style::Int(in_is_signed), Style::Int(_)) => { return match in_width.cmp(&out_width) { - Ordering::Greater => Trunc(bcx, llargs[0], llret_ty), + Ordering::Greater => bcx.trunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], Ordering::Less => if in_is_signed { - SExt(bcx, llargs[0], llret_ty) + bcx.sext(llargs[0], llret_ty) } else { - ZExt(bcx, llargs[0], llret_ty) + bcx.zext(llargs[0], llret_ty) } } } (Style::Int(in_is_signed), Style::Float) => { return if in_is_signed { - SIToFP(bcx, llargs[0], llret_ty) + bcx.sitofp(llargs[0], llret_ty) } else { - UIToFP(bcx, llargs[0], llret_ty) + bcx.uitofp(llargs[0], llret_ty) } } (Style::Float, Style::Int(out_is_signed)) => { return if out_is_signed { - FPToSI(bcx, llargs[0], llret_ty) + bcx.fptosi(llargs[0], llret_ty) } else { - FPToUI(bcx, llargs[0], llret_ty) + bcx.fptoui(llargs[0], llret_ty) } } (Style::Float, Style::Float) => { return match in_width.cmp(&out_width) { - Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty), + Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], - Ordering::Less => FPExt(bcx, llargs[0], llret_ty) + Ordering::Less => bcx.fpext(llargs[0], llret_ty) } } _ => {/* Unsupported. Fallthrough. */} @@ -1275,13 +1133,13 @@ enum Style { Float, Int(/* is signed? */ bool), Unsupported } ret_ty, out_elem); } macro_rules! arith { - ($($name: ident: $($($p: ident),* => $call: expr),*;)*) => { + ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { $( if name == stringify!($name) { match in_elem.sty { $( $(ty::$p(_))|* => { - return $call(bcx, llargs[0], llargs[1], call_debug_location) + return bcx.$call(llargs[0], llargs[1]) } )* _ => {}, @@ -1294,15 +1152,15 @@ enum Style { Float, Int(/* is signed? */ bool), Unsupported } } } arith! { - simd_add: TyUint, TyInt => Add, TyFloat => FAdd; - simd_sub: TyUint, TyInt => Sub, TyFloat => FSub; - simd_mul: TyUint, TyInt => Mul, TyFloat => FMul; - simd_div: TyFloat => FDiv; - simd_shl: TyUint, TyInt => Shl; - simd_shr: TyUint => LShr, TyInt => AShr; - simd_and: TyUint, TyInt => And; - simd_or: TyUint, TyInt => Or; - simd_xor: TyUint, TyInt => Xor; + simd_add: TyUint, TyInt => add, TyFloat => fadd; + simd_sub: TyUint, TyInt => sub, TyFloat => fsub; + simd_mul: TyUint, TyInt => mul, TyFloat => fmul; + simd_div: TyFloat => fdiv; + simd_shl: TyUint, TyInt => shl; + simd_shr: TyUint => lshr, TyInt => ashr; + simd_and: TyUint, TyInt => and; + simd_or: TyUint, TyInt => or; + simd_xor: TyUint, TyInt => xor; } span_bug!(span, "unknown SIMD intrinsic"); } diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index d842827b6feadc70654b9fd3e34f9d3a16ab4c71..2fb0e8c24c540de4200c1c78074b316772837125 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -36,6 +36,7 @@ #![feature(slice_patterns)] #![feature(staged_api)] #![feature(unicode)] +#![feature(conservative_impl_trait)] use rustc::dep_graph::WorkProduct; @@ -95,8 +96,6 @@ pub mod back { mod assert_module_sources; mod attributes; mod base; -mod basic_block; -mod build; mod builder; mod cabi_aarch64; mod cabi_arm; diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index aa9b900fa465396573edae7f1f02801a0de67667..cf50e7be2afb5c1692b3532ec84b0f33d5d1f3ae 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -9,16 +9,11 @@ // except according to those terms. use attributes; -use arena::TypedArena; use llvm::{ValueRef, get_params}; use rustc::traits; -use abi::FnType; -use base::*; -use build::*; -use callee::Callee; +use callee::{Callee, CalleeData}; use common::*; use consts; -use debuginfo::DebugLoc; use declare; use glue; use machine; @@ -32,15 +27,15 @@ const VTABLE_OFFSET: usize = 3; /// Extracts a method from a trait object's vtable, at the specified index. -pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, - llvtable: ValueRef, - vtable_index: usize) - -> ValueRef { +pub fn get_virtual_method<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + llvtable: ValueRef, + vtable_index: usize) + -> ValueRef { // Load the data pointer from the object. debug!("get_virtual_method(vtable_index={}, llvtable={:?})", vtable_index, Value(llvtable)); - Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])) + bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET])) } /// Generate a shim function that allows an object type like `SomeTrait` to @@ -67,36 +62,47 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, callee: Callee<'tcx>) -> ValueRef { - let _icx = push_ctxt("trans_object_shim"); - let tcx = ccx.tcx(); - debug!("trans_object_shim({:?})", callee); - let (sig, abi, function_name) = match callee.ty.sty { - ty::TyFnDef(def_id, substs, f) => { + let function_name = match callee.ty.sty { + ty::TyFnDef(def_id, substs, _) => { let instance = Instance::new(def_id, substs); - (&f.sig, f.abi, instance.symbol_name(ccx.shared())) + instance.symbol_name(ccx.shared()) } _ => bug!() }; - let sig = tcx.erase_late_bound_regions_and_normalize(sig); - let fn_ty = FnType::new(ccx, abi, &sig, &[]); - let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty); attributes::set_frame_pointer_elimination(ccx, llfn); - let (block_arena, fcx): (TypedArena<_>, FunctionContext); - block_arena = TypedArena::new(); - fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena); - let mut bcx = fcx.init(false); - - let dest = fcx.llretslotptr.get(); - let llargs = get_params(fcx.llfn); - bcx = callee.call(bcx, DebugLoc::None, - &llargs[fcx.fn_ty.ret.is_indirect() as usize..], dest).bcx; - - fcx.finish(bcx, DebugLoc::None); + let fcx = FunctionContext::new(ccx, llfn); + let bcx = fcx.get_entry_block(); + + let mut llargs = get_params(fcx.llfn); + let fn_ret = callee.ty.fn_ret(); + let fn_ty = callee.direct_fn_type(ccx, &[]); + + let fn_ptr = match callee.data { + CalleeData::Virtual(idx) => { + let fn_ptr = get_virtual_method(&bcx, + llargs.remove(fn_ty.ret.is_indirect() as usize + 1), idx); + let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); + bcx.pointercast(fn_ptr, llty) + }, + _ => bug!("trans_object_shim called with non-virtual callee"), + }; + let llret = bcx.call(fn_ptr, &llargs, None); + fn_ty.apply_attrs_callsite(llret); + + if fn_ret.0.is_never() { + bcx.unreachable(); + } else { + if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() { + bcx.ret_void(); + } else { + bcx.ret(llret); + } + } llfn } @@ -115,7 +121,6 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> ValueRef { let tcx = ccx.tcx(); - let _icx = push_ctxt("meth::get_vtable"); debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index e4d0533ec878422bc60b6e52f0ba2d1a6471afae..8df24da7135887e4ecdea99dfcf1467ce2d7bc5b 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -16,31 +16,30 @@ use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; -use common::{self, Block, BlockAndBuilder}; -use glue; +use common; +use super::MirContext; use super::rvalue; -pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>, - mir: &mir::Mir<'tcx>) -> BitVector { - let bcx = bcx.build(); - let mut analyzer = LocalAnalyzer::new(mir, &bcx); +pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { + let mir = mircx.mir; + let mut analyzer = LocalAnalyzer::new(mircx); analyzer.visit_mir(mir); for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { - let ty = bcx.monomorphize(&ty); + let ty = mircx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); if ty.is_scalar() || ty.is_unique() || ty.is_region_ptr() || ty.is_simd() || - common::type_is_zero_size(bcx.ccx(), ty) + common::type_is_zero_size(mircx.ccx, ty) { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - assert!(common::type_is_immediate(bcx.ccx(), ty) || - common::type_is_fat_ptr(bcx.tcx(), ty)); - } else if common::type_is_imm_pair(bcx.ccx(), ty) { + assert!(common::type_is_immediate(mircx.ccx, ty) || + common::type_is_fat_ptr(mircx.ccx, ty)); + } else if common::type_is_imm_pair(mircx.ccx, ty) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -56,22 +55,18 @@ pub fn lvalue_locals<'bcx, 'tcx>(bcx: Block<'bcx,'tcx>, analyzer.lvalue_locals } -struct LocalAnalyzer<'mir, 'bcx: 'mir, 'tcx: 'bcx> { - mir: &'mir mir::Mir<'tcx>, - bcx: &'mir BlockAndBuilder<'bcx, 'tcx>, +struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> { + cx: &'mir MirContext<'a, 'tcx>, lvalue_locals: BitVector, seen_assigned: BitVector } -impl<'mir, 'bcx, 'tcx> LocalAnalyzer<'mir, 'bcx, 'tcx> { - fn new(mir: &'mir mir::Mir<'tcx>, - bcx: &'mir BlockAndBuilder<'bcx, 'tcx>) - -> LocalAnalyzer<'mir, 'bcx, 'tcx> { +impl<'mir, 'a, 'tcx> LocalAnalyzer<'mir, 'a, 'tcx> { + fn new(mircx: &'mir MirContext<'a, 'tcx>) -> LocalAnalyzer<'mir, 'a, 'tcx> { LocalAnalyzer { - mir: mir, - bcx: bcx, - lvalue_locals: BitVector::new(mir.local_decls.len()), - seen_assigned: BitVector::new(mir.local_decls.len()) + cx: mircx, + lvalue_locals: BitVector::new(mircx.mir.local_decls.len()), + seen_assigned: BitVector::new(mircx.mir.local_decls.len()) } } @@ -87,7 +82,7 @@ fn mark_assigned(&mut self, local: mir::Local) { } } -impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> { +impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { fn visit_assign(&mut self, block: mir::BasicBlock, lvalue: &mir::Lvalue<'tcx>, @@ -97,7 +92,7 @@ fn visit_assign(&mut self, if let mir::Lvalue::Local(index) = *lvalue { self.mark_assigned(index); - if !rvalue::rvalue_creates_operand(self.mir, self.bcx, rvalue) { + if !rvalue::rvalue_creates_operand(rvalue) { self.mark_as_lvalue(index); } } else { @@ -117,7 +112,7 @@ fn visit_terminator_kind(&mut self, literal: mir::Literal::Item { def_id, .. }, .. }), ref args, .. - } if Some(def_id) == self.bcx.tcx().lang_items.box_free_fn() => { + } if Some(def_id) == self.cx.ccx.tcx().lang_items.box_free_fn() => { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -140,10 +135,10 @@ fn visit_lvalue(&mut self, // Allow uses of projections of immediate pair fields. if let mir::Lvalue::Projection(ref proj) = *lvalue { if let mir::Lvalue::Local(_) = proj.base { - let ty = proj.base.ty(self.mir, self.bcx.tcx()); + let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); - let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx())); - if common::type_is_imm_pair(self.bcx.ccx(), ty) { + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); + if common::type_is_imm_pair(self.cx.ccx, ty) { if let mir::ProjectionElem::Field(..) = proj.elem { if let LvalueContext::Consume = context { return; @@ -171,11 +166,11 @@ fn visit_lvalue(&mut self, } LvalueContext::Drop => { - let ty = lvalue.ty(self.mir, self.bcx.tcx()); - let ty = self.bcx.monomorphize(&ty.to_ty(self.bcx.tcx())); + let ty = lvalue.ty(self.cx.mir, self.cx.ccx.tcx()); + let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); // Only need the lvalue if we're actually dropping it. - if glue::type_needs_drop(self.bcx.tcx(), ty) { + if self.cx.ccx.shared().type_needs_drop(ty) { self.mark_as_lvalue(index); } } @@ -200,10 +195,7 @@ pub enum CleanupKind { Internal { funclet: mir::BasicBlock } } -pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>, - mir: &mir::Mir<'tcx>) - -> IndexVec -{ +pub fn cleanup_kinds<'a, 'tcx>(mir: &mir::Mir<'tcx>) -> IndexVec { fn discover_masters<'tcx>(result: &mut IndexVec, mir: &mir::Mir<'tcx>) { for (bb, data) in mir.basic_blocks().iter_enumerated() { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index fe087bc495121c3d7460cf5a26c6c5690c67123e..5ad52b3d252cb1c29089560b4f651257f850c7d5 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -8,20 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, ValueRef}; +use llvm::{self, ValueRef, BasicBlockRef}; use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; use rustc::middle::lang_items; use rustc::ty::{self, layout}; use rustc::mir; use abi::{Abi, FnType, ArgType}; use adt; -use base; -use build; +use base::{self, Lifetime}; use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}; -use common::{self, Block, BlockAndBuilder, LandingPad}; +use common::{self, BlockAndBuilder, Funclet}; use common::{C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; -use debuginfo::DebugLoc; use Disr; use machine::{llalign_of_min, llbitsize_of_real}; use meth; @@ -29,6 +27,7 @@ use glue; use type_::Type; +use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::fx::FxHashMap; use syntax::symbol::Symbol; @@ -39,21 +38,27 @@ use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -use std::cell::Ref as CellRef; +use std::ptr; -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { - pub fn trans_block(&mut self, bb: mir::BasicBlock) { - let mut bcx = self.bcx(bb); - let data = &CellRef::clone(&self.mir)[bb]; +impl<'a, 'tcx> MirContext<'a, 'tcx> { + pub fn trans_block(&mut self, bb: mir::BasicBlock, + funclets: &IndexVec>) { + let mut bcx = self.build_block(bb); + let data = &self.mir[bb]; debug!("trans_block({:?}={:?})", bb, data); + let funclet = match self.cleanup_kinds[bb] { + CleanupKind::Internal { funclet } => funclets[funclet].as_ref(), + _ => funclets[bb].as_ref(), + }; + // Create the cleanup bundle, if needed. - let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad()); - let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle()); + let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); + let cleanup_bundle = funclet.map(|l| l.bundle()); let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| { - let lltarget = this.blocks[bb].llbb; + let lltarget = this.blocks[bb]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[bb] { CleanupKind::Funclet => { @@ -70,7 +75,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { }; let llblock = |this: &mut Self, target: mir::BasicBlock| { - let lltarget = this.blocks[target].llbb; + let lltarget = this.blocks[target]; if let Some(cp) = cleanup_pad { match this.cleanup_kinds[target] { @@ -79,8 +84,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.fcx.new_block(name).build(); - trampoline.set_personality_fn(this.fcx.eh_personality()); + let trampoline = this.fcx.build_new_block(name); trampoline.cleanup_ret(cp, Some(lltarget)); trampoline.llbb() } @@ -93,7 +97,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { (this.cleanup_kinds[bb], this.cleanup_kinds[target]) { // jump *into* cleanup - need a landing pad if GNU - this.landing_pad_to(target).llbb + this.landing_pad_to(target) } else { lltarget } @@ -108,23 +112,22 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { debug!("trans_block: terminator: {:?}", terminator); let span = terminator.source_info.span; - let debug_loc = self.debug_loc(terminator.source_info); - debug_loc.apply_to_bcx(&bcx); - debug_loc.apply(bcx.fcx()); + self.set_debug_loc(&bcx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { bcx.cleanup_ret(cleanup_pad, None); } else { - let llpersonality = bcx.fcx().eh_personality(); - bcx.set_personality_fn(llpersonality); - let ps = self.get_personality_slot(&bcx); let lp = bcx.load(ps); - bcx.with_block(|bcx| { - base::call_lifetime_end(bcx, ps); - base::trans_unwind_resume(bcx, lp); - }); + Lifetime::End.call(&bcx, ps); + if !bcx.sess().target.target.options.custom_unwind_resume { + bcx.resume(lp); + } else { + let exc_ptr = bcx.extract_value(lp, 0); + bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle); + bcx.unreachable(); + } } } @@ -143,9 +146,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => { let discr_lvalue = self.trans_lvalue(&bcx, discr); let ty = discr_lvalue.ty.to_ty(bcx.tcx()); - let discr = bcx.with_block(|bcx| - adt::trans_get_discr(bcx, ty, discr_lvalue.llval, None, true) - ); + let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true); let mut bb_hist = FxHashMap(); for target in targets { @@ -162,16 +163,15 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { // We're generating an exhaustive switch, so the else branch // can't be hit. Branching to an unreachable instruction // lets LLVM know this - _ => (None, self.unreachable_block().llbb) + _ => (None, self.unreachable_block()) }; let switch = bcx.switch(discr, default_blk, targets.len()); assert_eq!(adt_def.variants.len(), targets.len()); for (adt_variant, &target) in adt_def.variants.iter().zip(targets) { if default_bb != Some(target) { let llbb = llblock(self, target); - let llval = bcx.with_block(|bcx| adt::trans_case( - bcx, ty, Disr::from(adt_variant.disr_val))); - build::AddCase(switch, llval, llbb) + let llval = adt::trans_case(&bcx, ty, Disr::from(adt_variant.disr_val)); + bcx.add_case(switch, llval, llbb) } } } @@ -179,17 +179,17 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { let (otherwise, targets) = targets.split_last().unwrap(); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); - let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty)); + let discr = base::to_immediate(&bcx, discr, switch_ty); let switch = bcx.switch(discr, llblock(self, *otherwise), values.len()); for (value, target) in values.iter().zip(targets) { - let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty); + let val = Const::from_constval(bcx.ccx, value.clone(), switch_ty); let llbb = llblock(self, *target); - build::AddCase(switch, val.llval, llbb) + bcx.add_case(switch, val.llval, llbb) } } mir::TerminatorKind::Return => { - let ret = bcx.fcx().fn_ty.ret; + let ret = self.fn_ty.ret; if ret.is_ignore() || ret.is_indirect() { bcx.ret_void(); return; @@ -208,14 +208,14 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let llscratch = build::AllocaFcx(bcx.fcx(), ret.original_ty, "ret"); + let llscratch = bcx.fcx().alloca(ret.original_ty, "ret"); self.store_operand(&bcx, llscratch, op); llscratch } Ref(llval) => llval }; let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to())); - let llalign = llalign_of_min(bcx.ccx(), ret.ty); + let llalign = llalign_of_min(bcx.ccx, ret.ty); unsafe { llvm::LLVMSetAlignment(load, llalign); } @@ -233,21 +233,21 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(&self.mir, bcx.tcx()).to_ty(bcx.tcx()); - let ty = bcx.monomorphize(&ty); + let ty = self.monomorphize(&ty); // Double check for necessity to drop - if !glue::type_needs_drop(bcx.tcx(), ty) { + if !bcx.ccx.shared().type_needs_drop(ty) { funclet_br(self, bcx, target); return; } let lvalue = self.trans_lvalue(&bcx, location); - let drop_fn = glue::get_drop_glue(bcx.ccx(), ty); - let drop_ty = glue::get_drop_glue_type(bcx.tcx(), ty); - let is_sized = common::type_is_sized(bcx.tcx(), ty); + let drop_fn = glue::get_drop_glue(bcx.ccx, ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); + let is_sized = bcx.ccx.shared().type_is_sized(ty); let llvalue = if is_sized { if drop_ty != ty { - bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx(), drop_ty).ptr_to()) + bcx.pointercast(lvalue.llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) } else { lvalue.llval } @@ -259,18 +259,16 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { // but I am shooting for a quick fix to #35546 // here that can be cleanly backported to beta, so // I want to avoid touching all of trans. - bcx.with_block(|bcx| { - let scratch = base::alloc_ty(bcx, ty, "drop"); - base::call_lifetime_start(bcx, scratch); - build::Store(bcx, lvalue.llval, base::get_dataptr(bcx, scratch)); - build::Store(bcx, lvalue.llextra, base::get_meta(bcx, scratch)); - scratch - }) + let scratch = base::alloc_ty(&bcx, ty, "drop"); + Lifetime::Start.call(&bcx, scratch); + bcx.store(lvalue.llval, base::get_dataptr(&bcx, scratch)); + bcx.store(lvalue.llextra, base::get_meta(&bcx, scratch)); + scratch }; if let Some(unwind) = unwind { bcx.invoke(drop_fn, &[llvalue], - self.blocks[target].llbb, + self.blocks[target], llblock(self, unwind), cleanup_bundle); } else { @@ -290,7 +288,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bcx.ccx().check_overflow() { + if !bcx.ccx.check_overflow() { use rustc_const_math::ConstMathErr::Overflow; use rustc_const_math::Op::Neg; @@ -306,27 +304,27 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { } // Pass the condition through llvm.expect for branch hinting. - let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); - let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx(), expected)], None); + let expect = bcx.ccx.get_intrinsic(&"llvm.expect.i1"); + let cond = bcx.call(expect, &[cond, C_bool(bcx.ccx, expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.fcx.new_block("panic"); + let panic_block = self.fcx.build_new_block("panic"); if expected { - bcx.cond_br(cond, lltarget, panic_block.llbb); + bcx.cond_br(cond, lltarget, panic_block.llbb()); } else { - bcx.cond_br(cond, panic_block.llbb, lltarget); + bcx.cond_br(cond, panic_block.llbb(), lltarget); } // After this point, bcx is the block for the call to panic. - bcx = panic_block.build(); - debug_loc.apply_to_bcx(&bcx); + bcx = panic_block; + self.set_debug_loc(&bcx, terminator.source_info); // Get the location information. let loc = bcx.sess().codemap().lookup_char_pos(span.lo); let filename = Symbol::intern(&loc.file.name).as_str(); - let filename = C_str_slice(bcx.ccx(), filename); - let line = C_u32(bcx.ccx(), loc.line as u32); + let filename = C_str_slice(bcx.ccx, filename); + let line = C_u32(bcx.ccx, loc.line as u32); // Put together the arguments to the panic entry point. let (lang_item, args, const_err) = match *msg { @@ -343,9 +341,9 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { }) }); - let file_line = C_struct(bcx.ccx(), &[filename, line], false); - let align = llalign_of_min(bcx.ccx(), common::val_ty(file_line)); - let file_line = consts::addr_of(bcx.ccx(), + let file_line = C_struct(bcx.ccx, &[filename, line], false); + let align = llalign_of_min(bcx.ccx, common::val_ty(file_line)); + let file_line = consts::addr_of(bcx.ccx, file_line, align, "panic_bounds_check_loc"); @@ -355,12 +353,12 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { } mir::AssertMessage::Math(ref err) => { let msg_str = Symbol::intern(err.description()).as_str(); - let msg_str = C_str_slice(bcx.ccx(), msg_str); - let msg_file_line = C_struct(bcx.ccx(), + let msg_str = C_str_slice(bcx.ccx, msg_str); + let msg_file_line = C_struct(bcx.ccx, &[msg_str, filename, line], false); - let align = llalign_of_min(bcx.ccx(), common::val_ty(msg_file_line)); - let msg_file_line = consts::addr_of(bcx.ccx(), + let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line)); + let msg_file_line = consts::addr_of(bcx.ccx, msg_file_line, align, "panic_loc"); @@ -384,15 +382,15 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { // Obtain the panic entry point. let def_id = common::langcall(bcx.tcx(), Some(span), "", lang_item); - let callee = Callee::def(bcx.ccx(), def_id, - bcx.ccx().empty_substs_for_def_id(def_id)); - let llfn = callee.reify(bcx.ccx()); + let callee = Callee::def(bcx.ccx, def_id, + bcx.ccx.empty_substs_for_def_id(def_id)); + let llfn = callee.reify(bcx.ccx); // Translate the actual panic invoke/call. if let Some(unwind) = cleanup { bcx.invoke(llfn, &args, - self.unreachable_block().llbb, + self.unreachable_block(), llblock(self, unwind), cleanup_bundle); } else { @@ -411,7 +409,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { let (mut callee, abi, sig) = match callee.ty.sty { ty::TyFnDef(def_id, substs, f) => { - (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig) + (Callee::def(bcx.ccx, def_id, substs), f.abi, &f.sig) } ty::TyFnPtr(f) => { (Callee { @@ -443,6 +441,65 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { return; } + // FIXME: This should proxy to the drop glue in the future when the ABI matches; + // most of the below code was copied from the match arm for TerminatorKind::Drop. + if intrinsic == Some("drop_in_place") { + let &(_, target) = destination.as_ref().unwrap(); + let ty = if let ty::TyFnDef(_, substs, _) = callee.ty.sty { + substs.type_at(0) + } else { + bug!("Unexpected ty: {}", callee.ty); + }; + + // Double check for necessity to drop + if !bcx.ccx.shared().type_needs_drop(ty) { + funclet_br(self, bcx, target); + return; + } + + let ptr = self.trans_operand(&bcx, &args[0]); + let (llval, llextra) = match ptr.val { + Immediate(llptr) => (llptr, ptr::null_mut()), + Pair(llptr, llextra) => (llptr, llextra), + Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty) + }; + + let drop_fn = glue::get_drop_glue(bcx.ccx, ty); + let drop_ty = glue::get_drop_glue_type(bcx.ccx.shared(), ty); + let is_sized = bcx.ccx.shared().type_is_sized(ty); + let llvalue = if is_sized { + if drop_ty != ty { + bcx.pointercast(llval, type_of::type_of(bcx.ccx, drop_ty).ptr_to()) + } else { + llval + } + } else { + // FIXME(#36457) Currently drop glue takes sized + // values as a `*(data, meta)`, but elsewhere in + // MIR we pass `(data, meta)` as two separate + // arguments. It would be better to fix drop glue, + // but I am shooting for a quick fix to #35546 + // here that can be cleanly backported to beta, so + // I want to avoid touching all of trans. + let scratch = base::alloc_ty(&bcx, ty, "drop"); + Lifetime::Start.call(&bcx, scratch); + bcx.store(llval, base::get_dataptr(&bcx, scratch)); + bcx.store(llextra, base::get_meta(&bcx, scratch)); + scratch + }; + if let Some(unwind) = *cleanup { + bcx.invoke(drop_fn, + &[llvalue], + self.blocks[target], + llblock(self, unwind), + cleanup_bundle); + } else { + bcx.call(drop_fn, &[llvalue], cleanup_bundle); + funclet_br(self, bcx, target); + } + return; + } + if intrinsic == Some("transmute") { let &(ref dest, target) = destination.as_ref().unwrap(); self.with_lvalue_ref(&bcx, dest, |this, dest| { @@ -456,9 +513,9 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { let extra_args = &args[sig.inputs().len()..]; let extra_args = extra_args.iter().map(|op_arg| { let op_ty = op_arg.ty(&self.mir, bcx.tcx()); - bcx.monomorphize(&op_ty) + self.monomorphize(&op_ty) }).collect::>(); - let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args); + let fn_ty = callee.direct_fn_type(bcx.ccx, &extra_args); // The arguments we'll be passing. Plus one to account for outptr, if used. let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; @@ -519,7 +576,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { let fn_ptr = match callee.data { NamedTupleConstructor(_) => { // FIXME translate this like mir::Rvalue::Aggregate. - callee.reify(bcx.ccx()) + callee.reify(bcx.ccx) } Intrinsic => { use intrinsic::trans_intrinsic_call; @@ -537,10 +594,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { bug!("Cannot use direct operand with an intrinsic call") }; - bcx.with_block(|bcx| { - trans_intrinsic_call(bcx, callee.ty, &fn_ty, - &llargs, dest, debug_loc); - }); + trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest, + terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { // Make a fake operand for store_return @@ -554,8 +609,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { if let Some((_, target)) = *destination { funclet_br(self, bcx, target); } else { - // trans_intrinsic_call already used Unreachable. - // bcx.unreachable(); + bcx.unreachable(); } return; @@ -573,15 +627,15 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { }; let invokeret = bcx.invoke(fn_ptr, &llargs, - ret_bcx.llbb, + ret_bcx, llblock(self, cleanup), cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); - if destination.is_some() { - let ret_bcx = ret_bcx.build(); + if let Some((_, target)) = *destination { + let ret_bcx = self.build_block(target); ret_bcx.at_start(|ret_bcx| { - debug_loc.apply_to_bcx(ret_bcx); + self.set_debug_loc(&ret_bcx, terminator.source_info); let op = OperandRef { val: Immediate(invokeret), ty: sig.output(), @@ -608,7 +662,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) { } fn trans_argument(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -616,14 +670,12 @@ fn trans_argument(&mut self, callee: &mut CalleeData) { if let Pair(a, b) = op.val { // Treat the values in a fat pointer separately. - if common::type_is_fat_ptr(bcx.tcx(), op.ty) { + if common::type_is_fat_ptr(bcx.ccx, op.ty) { let (ptr, meta) = (a, b); if *next_idx == 0 { if let Virtual(idx) = *callee { - let llfn = bcx.with_block(|bcx| { - meth::get_virtual_method(bcx, meta, idx) - }); - let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to(); + let llfn = meth::get_virtual_method(bcx, meta, idx); + let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); *callee = Fn(bcx.pointercast(llfn, llty)); } } @@ -655,7 +707,7 @@ fn trans_argument(&mut self, let (mut llval, by_ref) = match op.val { Immediate(_) | Pair(..) => { if arg.is_indirect() || arg.cast.is_some() { - let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg"); + let llscratch = bcx.fcx().alloca(arg.original_ty, "arg"); self.store_operand(bcx, llscratch, op); (llscratch, true) } else { @@ -667,13 +719,13 @@ fn trans_argument(&mut self, if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if arg.original_ty == Type::i1(bcx.ccx()) { + if arg.original_ty == Type::i1(bcx.ccx) { // We store bools as i8 so we need to truncate to i1. llval = bcx.load_range_assert(llval, 0, 2, llvm::False); llval = bcx.trunc(llval, arg.original_ty); } else if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.ptr_to())); - let llalign = llalign_of_min(bcx.ccx(), arg.ty); + let llalign = llalign_of_min(bcx.ccx, arg.ty); unsafe { llvm::LLVMSetAlignment(llval, llalign); } @@ -686,7 +738,7 @@ fn trans_argument(&mut self, } fn trans_arguments_untupled(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, fn_ty: &FnType, @@ -705,9 +757,9 @@ fn trans_arguments_untupled(&mut self, Ref(llval) => { let base = adt::MaybeSizedValue::sized(llval); for (n, &ty) in arg_types.iter().enumerate() { - let ptr = adt::trans_field_ptr_builder(bcx, tuple.ty, base, Disr(0), n); - let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { - let (lldata, llextra) = base::load_fat_ptr_builder(bcx, ptr, ty); + let ptr = adt::trans_field_ptr(bcx, tuple.ty, base, Disr(0), n); + let val = if common::type_is_fat_ptr(bcx.ccx, ty) { + let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty); Pair(lldata, llextra) } else { // trans_argument will load this if it needs to @@ -722,7 +774,7 @@ fn trans_arguments_untupled(&mut self, } Immediate(llval) => { - let l = bcx.ccx().layout_of(tuple.ty); + let l = bcx.ccx.layout_of(tuple.ty); let v = if let layout::Univariant { ref variant, .. } = *l { variant } else { @@ -731,8 +783,8 @@ fn trans_arguments_untupled(&mut self, for (n, &ty) in arg_types.iter().enumerate() { let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize); // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx())); + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx)); } // If the tuple is immediate, the elements are as well let op = OperandRef { @@ -747,8 +799,8 @@ fn trans_arguments_untupled(&mut self, for (n, &ty) in arg_types.iter().enumerate() { let mut elem = elems[n]; // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx()) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx())); + if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { + elem = bcx.trunc(elem, Type::i1(bcx.ccx)); } // Pair is always made up of immediates let op = OperandRef { @@ -762,91 +814,61 @@ fn trans_arguments_untupled(&mut self, } - fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef { - let ccx = bcx.ccx(); + fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> ValueRef { + let ccx = bcx.ccx; if let Some(slot) = self.llpersonalityslot { slot } else { let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - bcx.with_block(|bcx| { - let slot = base::alloca(bcx, llretty, "personalityslot"); - self.llpersonalityslot = Some(slot); - base::call_lifetime_start(bcx, slot); - slot - }) + let slot = bcx.fcx().alloca(llretty, "personalityslot"); + self.llpersonalityslot = Some(slot); + Lifetime::Start.call(bcx, slot); + slot } } /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx> - { + fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef { if let Some(block) = self.landing_pads[target_bb] { return block; } - if base::wants_msvc_seh(self.fcx.ccx.sess()) { + if base::wants_msvc_seh(self.ccx.sess()) { return self.blocks[target_bb]; } - let target = self.bcx(target_bb); + let target = self.build_block(target_bb); - let block = self.fcx.new_block("cleanup"); - self.landing_pads[target_bb] = Some(block); + let bcx = self.fcx.build_new_block("cleanup"); + self.landing_pads[target_bb] = Some(bcx.llbb()); - let bcx = block.build(); - let ccx = bcx.ccx(); - let llpersonality = self.fcx.eh_personality(); + let ccx = bcx.ccx; + let llpersonality = self.ccx.eh_personality(); let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn); bcx.set_cleanup(llretval); let slot = self.get_personality_slot(&bcx); bcx.store(llretval, slot); bcx.br(target.llbb()); - block + bcx.llbb() } - pub fn init_cpad(&mut self, bb: mir::BasicBlock) { - let bcx = self.bcx(bb); - let data = &self.mir[bb]; - debug!("init_cpad({:?})", data); - - match self.cleanup_kinds[bb] { - CleanupKind::NotCleanup => { - bcx.set_lpad(None) - } - _ if !base::wants_msvc_seh(bcx.sess()) => { - bcx.set_lpad(Some(LandingPad::gnu())) - } - CleanupKind::Internal { funclet } => { - // FIXME: is this needed? - bcx.set_personality_fn(self.fcx.eh_personality()); - bcx.set_lpad_ref(self.bcx(funclet).lpad()); - } - CleanupKind::Funclet => { - bcx.set_personality_fn(self.fcx.eh_personality()); - DebugLoc::None.apply_to_bcx(&bcx); - let cleanup_pad = bcx.cleanup_pad(None, &[]); - bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad))); - } - }; - } - - fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> { + fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { - let bl = self.fcx.new_block("unreachable"); - bl.build().unreachable(); - self.unreachable_block = Some(bl); - bl + let bl = self.fcx.build_new_block("unreachable"); + bl.unreachable(); + self.unreachable_block = Some(bl.llbb()); + bl.llbb() }) } - fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> { - self.blocks[bb].build() + pub fn build_block(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'a, 'tcx> { + BlockAndBuilder::new(self.blocks[bb], self.fcx) } - fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { // If the return is ignored, we can just return a do-nothing ReturnDest @@ -863,18 +885,14 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, return if fn_ret_ty.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, ret_ty, "tmp_ret") - }); + let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); llargs.push(tmp); ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, ret_ty, "tmp_ret") - }); + let tmp = base::alloc_ty(bcx, ret_ty, "tmp_ret"); ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) @@ -895,27 +913,27 @@ fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, } } - fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, src: &mir::Operand<'tcx>, dst: LvalueRef<'tcx>) { let mut val = self.trans_operand(bcx, src); if let ty::TyFnDef(def_id, substs, _) = val.ty.sty { - let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx())); - let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype); + let llouttype = type_of::type_of(bcx.ccx, dst.ty.to_ty(bcx.tcx())); + let out_type_size = llbitsize_of_real(bcx.ccx, llouttype); if out_type_size != 0 { // FIXME #19925 Remove this hack after a release cycle. - let f = Callee::def(bcx.ccx(), def_id, substs); + let f = Callee::def(bcx.ccx, def_id, substs); let ty = match f.ty.sty { ty::TyFnDef(.., f) => bcx.tcx().mk_fn_ptr(f), _ => f.ty }; val = OperandRef { - val: Immediate(f.reify(bcx.ccx())), + val: Immediate(f.reify(bcx.ccx)), ty: ty }; } } - let llty = type_of::type_of(bcx.ccx(), val.ty); + let llty = type_of::type_of(bcx.ccx, val.ty); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); self.store_operand(bcx, cast_ptr, val); } @@ -923,7 +941,7 @@ fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, // Stores the return value of a function call into it's final location. fn store_return(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, dest: ReturnDest, ret_ty: ArgType, op: OperandRef<'tcx>) { @@ -939,9 +957,7 @@ fn store_return(&mut self, DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { - let tmp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, op.ty, "tmp_ret") - }); + let tmp = base::alloc_ty(bcx, op.ty, "tmp_ret"); ret_ty.store(bcx, op.immediate(), tmp); self.trans_load(bcx, tmp, op.ty) } else { diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index bca81fa36458f4726c2041d696148e1ed811e6fe..08f68f8d49c78a8465ff7c5ec56d239a10e0b729 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -25,7 +25,7 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use {abi, adt, base, Disr, machine}; use callee::Callee; -use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty, type_is_sized}; +use common::{self, BlockAndBuilder, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_floating_f64, C_integral}; use common::{C_null, C_struct, C_str_slice, C_undef, C_uint}; use common::{const_to_opt_int, const_to_opt_uint}; @@ -401,7 +401,7 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) .projection_ty(tcx, &projection.elem); let base = tr_base.to_const(span); let projected_ty = self.monomorphize(&projected_ty).to_ty(tcx); - let is_sized = common::type_is_sized(tcx, projected_ty); + let is_sized = self.ccx.shared().type_is_sized(projected_ty); let (projected, llextra) = match projection.elem { mir::ProjectionElem::Deref => { @@ -598,11 +598,11 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be in constants. - assert!(common::type_is_fat_ptr(tcx, cast_ty)); + assert!(common::type_is_fat_ptr(self.ccx, cast_ty)); let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer type").ty; - let (base, old_info) = if !common::type_is_sized(tcx, pointee_ty) { + let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) { // Normally, the source is a thin pointer and we are // adding extra info to make a fat pointer. The exception // is when we are upcasting an existing object fat pointer @@ -685,9 +685,9 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, mir::CastKind::Misc => { // Casts from a fat-ptr. let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty); let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty); - if common::type_is_fat_ptr(tcx, operand.ty) { + if common::type_is_fat_ptr(self.ccx, operand.ty) { let (data_ptr, meta_ptr) = operand.get_fat_ptr(); - if common::type_is_fat_ptr(tcx, cast_ty) { + if common::type_is_fat_ptr(self.ccx, cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); let data_cast = consts::ptrcast(data_ptr, ll_cft[0]); @@ -716,7 +716,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, let base = match tr_lvalue.base { Base::Value(llval) => { // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug) - let align = if type_is_sized(self.ccx.tcx(), ty) { + let align = if self.ccx.shared().type_is_sized(ty) { type_of::align_of(self.ccx, ty) } else { self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign @@ -731,7 +731,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, Base::Static(llval) => llval }; - let ptr = if common::type_is_sized(tcx, ty) { + let ptr = if self.ccx.shared().type_is_sized(ty) { base } else { C_struct(self.ccx, &[base, tr_lvalue.llextra], false) @@ -945,40 +945,39 @@ pub fn const_scalar_checked_binop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_constant(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, constant: &mir::Constant<'tcx>) -> Const<'tcx> { debug!("trans_constant({:?})", constant); - let ty = bcx.monomorphize(&constant.ty); + let ty = self.monomorphize(&constant.ty); let result = match constant.literal.clone() { mir::Literal::Item { def_id, substs } => { // Shortcut for zero-sized types, including function item // types, which would not work with MirConstContext. - if common::type_is_zero_size(bcx.ccx(), ty) { - let llty = type_of::type_of(bcx.ccx(), ty); + if common::type_is_zero_size(bcx.ccx, ty) { + let llty = type_of::type_of(bcx.ccx, ty); return Const::new(C_null(llty), ty); } - let substs = bcx.monomorphize(&substs); + let substs = self.monomorphize(&substs); let instance = Instance::new(def_id, substs); - MirConstContext::trans_def(bcx.ccx(), instance, IndexVec::new()) + MirConstContext::trans_def(bcx.ccx, instance, IndexVec::new()) } mir::Literal::Promoted { index } => { let mir = &self.mir.promoted[index]; - MirConstContext::new(bcx.ccx(), mir, bcx.fcx().param_substs, - IndexVec::new()).trans() + MirConstContext::new(bcx.ccx, mir, self.param_substs, IndexVec::new()).trans() } mir::Literal::Value { value } => { - Ok(Const::from_constval(bcx.ccx(), value, ty)) + Ok(Const::from_constval(bcx.ccx, value, ty)) } }; let result = result.unwrap_or_else(|_| { // We've errored, so we don't have to produce working code. - let llty = type_of::type_of(bcx.ccx(), ty); + let llty = type_of::type_of(bcx.ccx, ty); Const::new(C_undef(llty), ty) }); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index d28c466e230bab664971e4702f71acf6eba0a9e2..0cd7f007c5df92dda45b67fee48d100b808ac524 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -44,13 +44,13 @@ pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty } } - pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, + pub fn alloca<'a>(bcx: &BlockAndBuilder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> { assert!(!ty.has_erasable_regions()); - let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); + let lltemp = base::alloc_ty(bcx, ty, name); LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) } @@ -67,14 +67,14 @@ pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { } } -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_lvalue(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> LvalueRef<'tcx> { debug!("trans_lvalue(lvalue={:?})", lvalue); - let ccx = bcx.ccx(); + let ccx = bcx.ccx; let tcx = bcx.tcx(); if let mir::Lvalue::Local(index) = *lvalue { @@ -103,7 +103,7 @@ pub fn trans_lvalue(&mut self, let ptr = self.trans_consume(bcx, base); let projected_ty = LvalueTy::from_ty(ptr.ty) .projection_ty(tcx, &mir::ProjectionElem::Deref); - let projected_ty = bcx.monomorphize(&projected_ty); + let projected_ty = self.monomorphize(&projected_ty); let (llptr, llextra) = match ptr.val { OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), OperandValue::Pair(llptr, llextra) => (llptr, llextra), @@ -118,14 +118,14 @@ pub fn trans_lvalue(&mut self, mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); - let projected_ty = bcx.monomorphize(&projected_ty); + let projected_ty = self.monomorphize(&projected_ty); let project_index = |llindex| { let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty { // Slices already point to the array element type. bcx.inbounds_gep(tr_base.llval, &[llindex]) } else { - let zero = common::C_uint(bcx.ccx(), 0u64); + let zero = common::C_uint(bcx.ccx, 0u64); bcx.inbounds_gep(tr_base.llval, &[zero, llindex]) }; element @@ -140,14 +140,14 @@ pub fn trans_lvalue(&mut self, LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v, }; let discr = discr as u64; - let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx)); + let is_sized = self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)); let base = if is_sized { adt::MaybeSizedValue::sized(tr_base.llval) } else { adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) }; - let llprojected = adt::trans_field_ptr_builder(bcx, base_ty, base, - Disr(discr), field.index()); + let llprojected = adt::trans_field_ptr(bcx, base_ty, base, Disr(discr), + field.index()); let llextra = if is_sized { ptr::null_mut() } else { @@ -162,19 +162,19 @@ pub fn trans_lvalue(&mut self, mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_uint(bcx.ccx(), offset); + let lloffset = C_uint(bcx.ccx, offset); (project_index(lloffset), ptr::null_mut()) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_uint(bcx.ccx(), offset); - let lllen = tr_base.len(bcx.ccx()); + let lloffset = C_uint(bcx.ccx, offset); + let lllen = tr_base.len(bcx.ccx); let llindex = bcx.sub(lllen, lloffset); (project_index(llindex), ptr::null_mut()) } mir::ProjectionElem::Subslice { from, to } => { - let llindex = C_uint(bcx.ccx(), from); + let llindex = C_uint(bcx.ccx, from); let llbase = project_index(llindex); let base_ty = tr_base.ty.to_ty(bcx.tcx()); @@ -183,14 +183,14 @@ pub fn trans_lvalue(&mut self, // must cast the lvalue pointer type to the new // array type (*[%_; new_len]). let base_ty = self.monomorphized_lvalue_ty(lvalue); - let llbasety = type_of::type_of(bcx.ccx(), base_ty).ptr_to(); + let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to(); let llbase = bcx.pointercast(llbase, llbasety); (llbase, ptr::null_mut()) } ty::TySlice(..) => { assert!(tr_base.llextra != ptr::null_mut()); let lllen = bcx.sub(tr_base.llextra, - C_uint(bcx.ccx(), from+to)); + C_uint(bcx.ccx, from+to)); (llbase, lllen) } _ => bug!("unexpected type {:?} in Subslice", base_ty) @@ -214,7 +214,7 @@ pub fn trans_lvalue(&mut self, // Perform an action using the given Lvalue. // If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot // is created first, then used as an operand to update the Lvalue. - pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, + pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, f: F) -> U where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U { @@ -235,9 +235,9 @@ pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, // See comments in LocalRef::new_operand as to why // we always have Some in a ZST LocalRef::Operand. let ty = self.monomorphized_lvalue_ty(lvalue); - if common::type_is_zero_size(bcx.ccx(), ty) { + if common::type_is_zero_size(bcx.ccx, ty) { // Pass an undef pointer as no stores can actually occur. - let llptr = C_undef(type_of(bcx.ccx(), ty).ptr_to()); + let llptr = C_undef(type_of(bcx.ccx, ty).ptr_to()); f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty))) } else { bug!("Lvalue local already set"); @@ -255,13 +255,13 @@ pub fn with_lvalue_ref(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>, /// /// nmatsakis: is this still necessary? Not sure. fn prepare_index(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { - let ccx = bcx.ccx(); - let index_size = machine::llbitsize_of_real(bcx.ccx(), common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx(), ccx.int_type()); + let ccx = bcx.ccx; + let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); + let int_size = machine::llbitsize_of_real(bcx.ccx, ccx.int_type()); if index_size < int_size { bcx.zext(llindex, ccx.int_type()) } else if index_size > int_size { @@ -272,8 +272,8 @@ fn prepare_index(&mut self, } pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { - let tcx = self.fcx.ccx.tcx(); + let tcx = self.ccx.tcx(); let lvalue_ty = lvalue.ty(&self.mir, tcx); - self.fcx.monomorphize(&lvalue_ty.to_ty(tcx)) + self.monomorphize(&lvalue_ty.to_ty(tcx)) } } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 94dc9a5fdb48946b308d3f29086044b1ec7bb0e1..7a50e5cbe8c79a4d8bef09bd7fc76e718e8e4321 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -9,40 +9,50 @@ // except according to those terms. use libc::c_uint; -use llvm::{self, ValueRef}; +use llvm::{self, ValueRef, BasicBlockRef}; +use llvm::debuginfo::DIScope; use rustc::ty::{self, layout}; -use rustc::mir; +use rustc::mir::{self, Mir}; use rustc::mir::tcx::LvalueTy; +use rustc::ty::subst::Substs; +use rustc::infer::TransNormalize; +use rustc::ty::TypeFoldable; use session::config::FullDebugInfo; use base; -use common::{self, Block, BlockAndBuilder, CrateContext, FunctionContext, C_null}; -use debuginfo::{self, declare_local, DebugLoc, VariableAccess, VariableKind, FunctionDebugContext}; +use common::{self, BlockAndBuilder, CrateContext, FunctionContext, C_null, Funclet}; +use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; +use monomorphize::{self, Instance}; +use abi::FnType; use type_of; -use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos}; +use syntax_pos::{DUMMY_SP, NO_EXPANSION, COMMAND_LINE_EXPN, BytePos, Span}; use syntax::symbol::keywords; +use syntax::abi::Abi; -use std::cell::Ref; use std::iter; -use basic_block::BasicBlock; - use rustc_data_structures::bitvec::BitVector; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; pub use self::constant::trans_static_initializer; +use self::analyze::CleanupKind; use self::lvalue::{LvalueRef}; use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for translating MIR. -pub struct MirContext<'bcx, 'tcx:'bcx> { - mir: Ref<'tcx, mir::Mir<'tcx>>, +pub struct MirContext<'a, 'tcx:'a> { + mir: &'a mir::Mir<'tcx>, + + debug_context: debuginfo::FunctionDebugContext, + + fcx: &'a common::FunctionContext<'a, 'tcx>, - /// Function context - fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, + ccx: &'a CrateContext<'a, 'tcx>, + + fn_ty: FnType, /// When unwinding is initiated, we have to store this personality /// value somewhere so that we can load it and re-use it in the @@ -54,17 +64,17 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { llpersonalityslot: Option, /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec>, + blocks: IndexVec, /// The funclet status of each basic block cleanup_kinds: IndexVec, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>>, + landing_pads: IndexVec>, /// Cached unreachable block - unreachable_block: Option>, + unreachable_block: Option, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `LvalueRef` representing an alloca, but not always: @@ -85,18 +95,28 @@ pub struct MirContext<'bcx, 'tcx:'bcx> { /// Debug information for MIR scopes. scopes: IndexVec, + + /// If this function is being monomorphized, this contains the type substitutions used. + param_substs: &'tcx Substs<'tcx>, } -impl<'blk, 'tcx> MirContext<'blk, 'tcx> { - pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { +impl<'a, 'tcx> MirContext<'a, 'tcx> { + pub fn monomorphize(&self, value: &T) -> T + where T: TransNormalize<'tcx> { + monomorphize::apply_param_substs(self.ccx.shared(), self.param_substs, value) + } + + pub fn set_debug_loc(&mut self, bcx: &BlockAndBuilder, source_info: mir::SourceInfo) { + let (scope, span) = self.debug_loc(source_info); + debuginfo::set_source_location(&self.debug_context, bcx, scope, span); + } + + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (DIScope, Span) { // Bail out if debug info emission is not enabled. - match self.fcx.debug_context { + match self.debug_context { FunctionDebugContext::DebugInfoDisabled | FunctionDebugContext::FunctionWithoutDebugInfo => { - // Can't return DebugLoc::None here because intrinsic::trans_intrinsic_call() - // relies on debug location to obtain span of the call site. - return DebugLoc::ScopeAt(self.scopes[source_info.scope].scope_metadata, - source_info.span); + return (self.scopes[source_info.scope].scope_metadata, source_info.span); } FunctionDebugContext::RegularContext(_) =>{} } @@ -106,13 +126,12 @@ pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { // (unless the crate is being compiled with `-Z debug-macros`). if source_info.span.expn_id == NO_EXPANSION || source_info.span.expn_id == COMMAND_LINE_EXPN || - self.fcx.ccx.sess().opts.debugging_opts.debug_macros { + self.ccx.sess().opts.debugging_opts.debug_macros { - let scope_metadata = self.scope_metadata_for_loc(source_info.scope, - source_info.span.lo); - DebugLoc::ScopeAt(scope_metadata, source_info.span) + let scope = self.scope_metadata_for_loc(source_info.scope, source_info.span.lo); + (scope, source_info.span) } else { - let cm = self.fcx.ccx.sess().codemap(); + let cm = self.ccx.sess().codemap(); // Walk up the macro expansion chain until we reach a non-expanded span. let mut span = source_info.span; while span.expn_id != NO_EXPANSION && span.expn_id != COMMAND_LINE_EXPN { @@ -123,9 +142,9 @@ pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> DebugLoc { break; } } - let scope_metadata = self.scope_metadata_for_loc(source_info.scope, span.lo); + let scope = self.scope_metadata_for_loc(source_info.scope, span.lo); // Use span of the outermost call site, while keeping the original lexical scope - DebugLoc::ScopeAt(scope_metadata, span) + (scope, span) } } @@ -138,10 +157,8 @@ fn scope_metadata_for_loc(&self, scope_id: mir::VisibilityScope, pos: BytePos) let scope_metadata = self.scopes[scope_id].scope_metadata; if pos < self.scopes[scope_id].file_start_pos || pos >= self.scopes[scope_id].file_end_pos { - let cm = self.fcx.ccx.sess().codemap(); - debuginfo::extend_scope_to_file(self.fcx.ccx, - scope_metadata, - &cm.lookup_char_pos(pos).file) + let cm = self.ccx.sess().codemap(); + debuginfo::extend_scope_to_file(self.ccx, scope_metadata, &cm.lookup_char_pos(pos).file) } else { scope_metadata } @@ -154,7 +171,7 @@ enum LocalRef<'tcx> { } impl<'tcx> LocalRef<'tcx> { - fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, + fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>, ty: ty::Ty<'tcx>) -> LocalRef<'tcx> { if common::type_is_zero_size(ccx, ty) { // Zero-size temporaries aren't always initialized, which @@ -180,19 +197,22 @@ fn new_operand<'bcx>(ccx: &CrateContext<'bcx, 'tcx>, /////////////////////////////////////////////////////////////////////////// -pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { - let bcx = fcx.init(true).build(); - let mir = bcx.mir(); +pub fn trans_mir<'a, 'tcx: 'a>( + fcx: &'a FunctionContext<'a, 'tcx>, + fn_ty: FnType, + mir: &'a Mir<'tcx>, + instance: Instance<'tcx>, + sig: &ty::FnSig<'tcx>, + abi: Abi, +) { + let debug_context = + debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir); + let bcx = fcx.get_entry_block(); - // Analyze the temps to determine which must be lvalues - // FIXME - let (lvalue_locals, cleanup_kinds) = bcx.with_block(|bcx| { - (analyze::lvalue_locals(bcx, &mir), - analyze::cleanup_kinds(bcx, &mir)) - }); + let cleanup_kinds = analyze::cleanup_kinds(&mir); // Allocate a `Block` for every basic block - let block_bcxs: IndexVec> = + let block_bcxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK { fcx.new_block("start") @@ -202,11 +222,13 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(fcx); + let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context); let mut mircx = MirContext { - mir: Ref::clone(&mir), + mir: mir, fcx: fcx, + fn_ty: fn_ty, + ccx: fcx.ccx, llpersonalityslot: None, blocks: block_bcxs, unreachable_block: None, @@ -214,15 +236,22 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { landing_pads: IndexVec::from_elem(None, mir.basic_blocks()), scopes: scopes, locals: IndexVec::new(), + debug_context: debug_context, + param_substs: { + assert!(!instance.substs.needs_infer()); + instance.substs + }, }; + let lvalue_locals = analyze::lvalue_locals(&mircx); + // Allocate variable and temp allocas mircx.locals = { - let args = arg_local_refs(&bcx, &mir, &mircx.scopes, &lvalue_locals); + let args = arg_local_refs(&bcx, &mircx, &mircx.scopes, &lvalue_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let ty = bcx.monomorphize(&decl.ty); + let ty = mircx.monomorphize(&decl.ty); if let Some(name) = decl.name { // User variable @@ -232,27 +261,21 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { if !lvalue_locals.contains(local.index()) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bcx.ccx(), ty); + return LocalRef::new_operand(bcx.ccx, ty); } debug!("alloc: {:?} ({}) -> lvalue", local, name); let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); if dbg { - let dbg_loc = mircx.debug_loc(source_info); - if let DebugLoc::ScopeAt(scope, span) = dbg_loc { - bcx.with_block(|bcx| { - declare_local(bcx, name, ty, scope, - VariableAccess::DirectVariable { alloca: lvalue.llval }, - VariableKind::LocalVariable, span); - }); - } else { - panic!("Unexpected"); - } + let (scope, span) = mircx.debug_loc(source_info); + declare_local(&bcx, &mircx.debug_context, name, ty, scope, + VariableAccess::DirectVariable { alloca: lvalue.llval }, + VariableKind::LocalVariable, span); } LocalRef::Lvalue(lvalue) } else { // Temporary or return pointer - if local == mir::RETURN_POINTER && fcx.fn_ty.ret.is_indirect() { + if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); let llretptr = llvm::get_param(fcx.llfn, 0); LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty))) @@ -264,7 +287,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bcx.ccx(), ty) + LocalRef::new_operand(bcx.ccx, ty) } } }; @@ -278,57 +301,61 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) { // Branch to the START block let start_bcx = mircx.blocks[mir::START_BLOCK]; - bcx.br(start_bcx.llbb); + bcx.br(start_bcx); // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(fcx); - - let mut visited = BitVector::new(mir.basic_blocks().len()); + debuginfo::start_emitting_source_locations(&mircx.debug_context); + + let funclets: IndexVec> = + mircx.cleanup_kinds.iter_enumerated().map(|(bb, cleanup_kind)| { + if let CleanupKind::Funclet = *cleanup_kind { + let bcx = mircx.build_block(bb); + bcx.set_personality_fn(mircx.ccx.eh_personality()); + if base::wants_msvc_seh(fcx.ccx.sess()) { + return Some(Funclet::new(bcx.cleanup_pad(None, &[]))); + } + } - let mut rpo = traversal::reverse_postorder(&mir); + None + }).collect(); - // Prepare each block for translation. - for (bb, _) in rpo.by_ref() { - mircx.init_cpad(bb); - } - rpo.reset(); + let rpo = traversal::reverse_postorder(&mir); + let mut visited = BitVector::new(mir.basic_blocks().len()); // Translate the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); - mircx.trans_block(bb); + mircx.trans_block(bb, &funclets); } // Remove blocks that haven't been visited, or have no // predecessors. for bb in mir.basic_blocks().indices() { - let block = mircx.blocks[bb]; - let block = BasicBlock(block.llbb); // Unreachable block if !visited.contains(bb.index()) { debug!("trans_mir: block {:?} was not visited", bb); - block.delete(); + unsafe { + llvm::LLVMDeleteBasicBlock(mircx.blocks[bb]); + } } } - - DebugLoc::None.apply(fcx); - fcx.cleanup(); } /// Produce, for each argument, a `ValueRef` pointing at the /// argument's value. As arguments are lvalues, these are always /// indirect. -fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, - mir: &mir::Mir<'tcx>, - scopes: &IndexVec, - lvalue_locals: &BitVector) - -> Vec> { +fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, + mircx: &MirContext<'a, 'tcx>, + scopes: &IndexVec, + lvalue_locals: &BitVector) + -> Vec> { + let mir = mircx.mir; let fcx = bcx.fcx(); let tcx = bcx.tcx(); let mut idx = 0; - let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize; + let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::ARGUMENT_VISIBILITY_SCOPE]; @@ -340,7 +367,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; - let arg_ty = bcx.monomorphize(&arg_decl.ty); + let arg_ty = mircx.monomorphize(&arg_decl.ty); if Some(local) == mir.spread_arg { // This argument (e.g. the last argument in the "rust-call" ABI) @@ -353,22 +380,18 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, _ => bug!("spread argument isn't a tuple?!") }; - let lltemp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) - }); + let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { let dst = bcx.struct_gep(lltemp, i); - let arg = &fcx.fn_ty.args[idx]; + let arg = &mircx.fn_ty.args[idx]; idx += 1; - if common::type_is_fat_ptr(tcx, tupled_arg_ty) { + if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) { // We pass fat pointers as two words, but inside the tuple // they are the two sub-fields of a single aggregate field. - let meta = &fcx.fn_ty.args[idx]; + let meta = &mircx.fn_ty.args[idx]; idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, - base::get_dataptr_builder(bcx, dst)); - meta.store_fn_arg(bcx, &mut llarg_idx, - base::get_meta_builder(bcx, dst)); + arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst)); + meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst)); } else { arg.store_fn_arg(bcx, &mut llarg_idx, dst); } @@ -376,20 +399,25 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // Now that we have one alloca that contains the aggregate value, // we can create one debuginfo entry for the argument. - bcx.with_block(|bcx| arg_scope.map(|scope| { + arg_scope.map(|scope| { let variable_access = VariableAccess::DirectVariable { alloca: lltemp }; - declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), - arg_ty, scope, variable_access, - VariableKind::ArgumentVariable(arg_index + 1), - bcx.fcx().span.unwrap_or(DUMMY_SP)); - })); + declare_local( + bcx, + &mircx.debug_context, + arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg_ty, scope, + variable_access, + VariableKind::ArgumentVariable(arg_index + 1), + DUMMY_SP + ); + }); return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty))); } - let arg = &fcx.fn_ty.args[idx]; + let arg = &mircx.fn_ty.args[idx]; idx += 1; let llval = if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo { // Don't copy an indirect argument to an alloca, the caller @@ -406,7 +434,7 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, !arg.is_indirect() && arg.cast.is_none() && arg_scope.is_none() { if arg.is_ignore() { - return LocalRef::new_operand(bcx.ccx(), arg_ty); + return LocalRef::new_operand(bcx.ccx, arg_ty); } // We don't have to cast or keep the argument in the alloca. @@ -417,8 +445,8 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, } let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint); llarg_idx += 1; - let val = if common::type_is_fat_ptr(tcx, arg_ty) { - let meta = &fcx.fn_ty.args[idx]; + let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { + let meta = &mircx.fn_ty.args[idx]; idx += 1; assert_eq!((meta.cast, meta.pad), (None, None)); let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint); @@ -433,19 +461,15 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, }; return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); } else { - let lltemp = bcx.with_block(|bcx| { - base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)) - }); - if common::type_is_fat_ptr(tcx, arg_ty) { + let lltemp = base::alloc_ty(&bcx, arg_ty, &format!("arg{}", arg_index)); + if common::type_is_fat_ptr(bcx.ccx, arg_ty) { // we pass fat pointers as two words, but we want to // represent them internally as a pointer to two words, // so make an alloca to store them in. - let meta = &fcx.fn_ty.args[idx]; + let meta = &mircx.fn_ty.args[idx]; idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, - base::get_dataptr_builder(bcx, lltemp)); - meta.store_fn_arg(bcx, &mut llarg_idx, - base::get_meta_builder(bcx, lltemp)); + arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp)); + meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp)); } else { // otherwise, arg is passed by value, so make a // temporary and store it there @@ -453,13 +477,19 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, } lltemp }; - bcx.with_block(|bcx| arg_scope.map(|scope| { + arg_scope.map(|scope| { // Is this a regular argument? if arg_index > 0 || mir.upvar_decls.is_empty() { - declare_local(bcx, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, - scope, VariableAccess::DirectVariable { alloca: llval }, - VariableKind::ArgumentVariable(arg_index + 1), - bcx.fcx().span.unwrap_or(DUMMY_SP)); + declare_local( + bcx, + &mircx.debug_context, + arg_decl.name.unwrap_or(keywords::Invalid.name()), + arg_ty, + scope, + VariableAccess::DirectVariable { alloca: llval }, + VariableKind::ArgumentVariable(arg_index + 1), + DUMMY_SP + ); return; } @@ -483,17 +513,14 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - use base::*; - use build::*; - use common::*; - let alloc = alloca(bcx, val_ty(llval), "__debuginfo_env_ptr"); - Store(bcx, llval, alloc); + let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr"); + bcx.store(llval, alloc); alloc } else { llval }; - let layout = bcx.ccx().layout_of(closure_ty); + let layout = bcx.ccx.layout_of(closure_ty); let offsets = match *layout { layout::Univariant { ref variant, .. } => &variant.offsets[..], _ => bug!("Closures are only supposed to be Univariant") @@ -502,7 +529,6 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { let byte_offset_of_var_in_env = offsets[i].bytes(); - let ops = unsafe { [llvm::LLVMRustDIBuilderCreateOpDeref(), llvm::LLVMRustDIBuilderCreateOpPlus(), @@ -527,11 +553,18 @@ fn arg_local_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, alloca: env_ptr, address_operations: &ops }; - declare_local(bcx, decl.debug_name, ty, scope, variable_access, - VariableKind::CapturedVariable, - bcx.fcx().span.unwrap_or(DUMMY_SP)); + declare_local( + bcx, + &mircx.debug_context, + decl.debug_name, + ty, + scope, + variable_access, + VariableKind::CapturedVariable, + DUMMY_SP + ); } - })); + }); LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))) }).collect() } diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 83e1d03c689abda4ebfd9b438fe4ee47a60f2fa9..a15d51d9da64dcc2850bb2d03bcd87950f06df0a 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -14,7 +14,7 @@ use rustc_data_structures::indexed_vec::Idx; use base; -use common::{self, Block, BlockAndBuilder}; +use common::{self, BlockAndBuilder}; use value::Value; use type_of; use type_::Type; @@ -73,7 +73,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { } } -impl<'bcx, 'tcx> OperandRef<'tcx> { +impl<'a, 'tcx> OperandRef<'tcx> { /// Asserts that this operand refers to a scalar and returns /// a reference to its value. pub fn immediate(self) -> ValueRef { @@ -85,18 +85,18 @@ pub fn immediate(self) -> ValueRef { /// If this operand is a Pair, we return an /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) + pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Pair(a, b) = self.val { // Reconstruct the immediate aggregate. - let llty = type_of::type_of(bcx.ccx(), self.ty); + let llty = type_of::type_of(bcx.ccx, self.ty); let mut llpair = common::C_undef(llty); let elems = [a, b]; for i in 0..2 { let mut elem = elems[i]; // Extend boolean i1's to i8. - if common::val_ty(elem) == Type::i1(bcx.ccx()) { - elem = bcx.zext(elem, Type::i8(bcx.ccx())); + if common::val_ty(elem) == Type::i1(bcx.ccx) { + elem = bcx.zext(elem, Type::i8(bcx.ccx)); } llpair = bcx.insert_value(llpair, elem, i); } @@ -107,23 +107,23 @@ pub fn pack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) /// If this operand is a pair in an Immediate, /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) + pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'a, 'tcx>) -> OperandRef<'tcx> { if let OperandValue::Immediate(llval) = self.val { // Deconstruct the immediate aggregate. - if common::type_is_imm_pair(bcx.ccx(), self.ty) { + if common::type_is_imm_pair(bcx.ccx, self.ty) { debug!("Operand::unpack_if_pair: unpacking {:?}", self); let mut a = bcx.extract_value(llval, 0); let mut b = bcx.extract_value(llval, 1); - let pair_fields = common::type_pair_fields(bcx.ccx(), self.ty); + let pair_fields = common::type_pair_fields(bcx.ccx, self.ty); if let Some([a_ty, b_ty]) = pair_fields { if a_ty.is_bool() { - a = bcx.trunc(a, Type::i1(bcx.ccx())); + a = bcx.trunc(a, Type::i1(bcx.ccx)); } if b_ty.is_bool() { - b = bcx.trunc(b, Type::i1(bcx.ccx())); + b = bcx.trunc(b, Type::i1(bcx.ccx)); } } @@ -134,29 +134,29 @@ pub fn unpack_if_pair(mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) } } -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_load(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, llval: ValueRef, ty: Ty<'tcx>) -> OperandRef<'tcx> { debug!("trans_load: {:?} @ {:?}", Value(llval), ty); - let val = if common::type_is_fat_ptr(bcx.tcx(), ty) { - let (lldata, llextra) = base::load_fat_ptr_builder(bcx, llval, ty); + let val = if common::type_is_fat_ptr(bcx.ccx, ty) { + let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty); OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx(), ty) { - let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx(), ty).unwrap(); + } else if common::type_is_imm_pair(bcx.ccx, ty) { + let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap(); let a_ptr = bcx.struct_gep(llval, 0); let b_ptr = bcx.struct_gep(llval, 1); OperandValue::Pair( - base::load_ty_builder(bcx, a_ptr, a_ty), - base::load_ty_builder(bcx, b_ptr, b_ty) + base::load_ty(bcx, a_ptr, a_ty), + base::load_ty(bcx, b_ptr, b_ty) ) - } else if common::type_is_immediate(bcx.ccx(), ty) { - OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty)) + } else if common::type_is_immediate(bcx.ccx, ty) { + OperandValue::Immediate(base::load_ty(bcx, llval, ty)) } else { OperandValue::Ref(llval) }; @@ -165,7 +165,7 @@ pub fn trans_load(&mut self, } pub fn trans_consume(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>) -> OperandRef<'tcx> { @@ -197,7 +197,7 @@ pub fn trans_consume(&mut self, let llval = [a, b][f.index()]; let op = OperandRef { val: OperandValue::Immediate(llval), - ty: bcx.monomorphize(&ty) + ty: self.monomorphize(&ty) }; // Handle nested pairs. @@ -217,7 +217,7 @@ pub fn trans_consume(&mut self, } pub fn trans_operand(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, operand: &mir::Operand<'tcx>) -> OperandRef<'tcx> { @@ -230,7 +230,7 @@ pub fn trans_operand(&mut self, mir::Operand::Constant(ref constant) => { let val = self.trans_constant(bcx, constant); - let operand = val.to_operand(bcx.ccx()); + let operand = val.to_operand(bcx.ccx); if let OperandValue::Ref(ptr) = operand.val { // If this is a OperandValue::Ref to an immediate constant, load it. self.trans_load(bcx, ptr, operand.ty) @@ -242,33 +242,23 @@ pub fn trans_operand(&mut self, } pub fn store_operand(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, lldest: ValueRef, - operand: OperandRef<'tcx>) - { - debug!("store_operand: operand={:?} lldest={:?}", operand, lldest); - bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand)) - } - - pub fn store_operand_direct(&mut self, - bcx: Block<'bcx, 'tcx>, - lldest: ValueRef, - operand: OperandRef<'tcx>) - { + operand: OperandRef<'tcx>) { + debug!("store_operand: operand={:?}", operand); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. - if common::type_is_zero_size(bcx.ccx(), operand.ty) { + if common::type_is_zero_size(bcx.ccx, operand.ty) { return; } match operand.val { OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty), OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty), OperandValue::Pair(a, b) => { - use build::*; let a = base::from_immediate(bcx, a); let b = base::from_immediate(bcx, b); - Store(bcx, a, StructGEP(bcx, lldest, 0)); - Store(bcx, b, StructGEP(bcx, lldest, 1)); + bcx.store(a, bcx.struct_gep(lldest, 0)); + bcx.store(b, bcx.struct_gep(lldest, 1)); } } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 2ee49db477864d02abeea9964b67e53ba10b30bd..b17550087edf77c837870561b2f5dba8c0d333e2 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -13,13 +13,13 @@ use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::Layout; use rustc::mir; +use middle::lang_items::ExchangeMallocFnLangItem; use asm; use base; use callee::Callee; -use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder, Result}; +use common::{self, val_ty, C_bool, C_null, C_uint, BlockAndBuilder}; use common::{C_integral}; -use debuginfo::DebugLoc; use adt; use machine; use type_::Type; @@ -33,13 +33,12 @@ use super::operand::{OperandRef, OperandValue}; use super::lvalue::{LvalueRef}; -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_rvalue(&mut self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, dest: LvalueRef<'tcx>, - rvalue: &mir::Rvalue<'tcx>, - debug_loc: DebugLoc) - -> BlockAndBuilder<'bcx, 'tcx> + rvalue: &mir::Rvalue<'tcx>) + -> BlockAndBuilder<'a, 'tcx> { debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})", Value(dest.llval), rvalue); @@ -54,12 +53,12 @@ pub fn trans_rvalue(&mut self, } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { - let cast_ty = bcx.monomorphize(&cast_ty); + let cast_ty = self.monomorphize(&cast_ty); - if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { + if common::type_is_fat_ptr(bcx.ccx, cast_ty) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc); + let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, temp); return bcx; } @@ -70,71 +69,57 @@ pub fn trans_rvalue(&mut self, // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); let operand = operand.pack_if_pair(&bcx); - bcx.with_block(|bcx| { - match operand.val { - OperandValue::Pair(..) => bug!(), - OperandValue::Immediate(llval) => { - // unsize from an immediate structure. We don't - // really need a temporary alloca here, but - // avoiding it would require us to have - // `coerce_unsized_into` use extractvalue to - // index into the struct, and this case isn't - // important enough for it. - debug!("trans_rvalue: creating ugly alloca"); - let lltemp = base::alloc_ty(bcx, operand.ty, "__unsize_temp"); - base::store_ty(bcx, llval, lltemp, operand.ty); - base::coerce_unsized_into(bcx, - lltemp, operand.ty, - dest.llval, cast_ty); - } - OperandValue::Ref(llref) => { - base::coerce_unsized_into(bcx, - llref, operand.ty, - dest.llval, cast_ty); - } + let llref = match operand.val { + OperandValue::Pair(..) => bug!(), + OperandValue::Immediate(llval) => { + // unsize from an immediate structure. We don't + // really need a temporary alloca here, but + // avoiding it would require us to have + // `coerce_unsized_into` use extractvalue to + // index into the struct, and this case isn't + // important enough for it. + debug!("trans_rvalue: creating ugly alloca"); + let lltemp = base::alloc_ty(&bcx, operand.ty, "__unsize_temp"); + base::store_ty(&bcx, llval, lltemp, operand.ty); + lltemp } - }); + OperandValue::Ref(llref) => llref + }; + base::coerce_unsized_into(&bcx, llref, operand.ty, dest.llval, cast_ty); bcx } mir::Rvalue::Repeat(ref elem, ref count) => { let tr_elem = self.trans_operand(&bcx, elem); let size = count.value.as_u64(bcx.tcx().sess.target.uint_type); - let size = C_uint(bcx.ccx(), size); - let base = base::get_dataptr_builder(&bcx, dest.llval); - let bcx = bcx.map_block(|block| { - tvec::slice_for_each(block, base, tr_elem.ty, size, |block, llslot| { - self.store_operand_direct(block, llslot, tr_elem); - block - }) - }); - bcx + let size = C_uint(bcx.ccx, size); + let base = base::get_dataptr(&bcx, dest.llval); + tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| { + self.store_operand(bcx, llslot, tr_elem); + }) } mir::Rvalue::Aggregate(ref kind, ref operands) => { match *kind { mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { let disr = Disr::from(adt_def.variants[variant_index].disr_val); - bcx.with_block(|bcx| { - adt::trans_set_discr(bcx, - dest.ty.to_ty(bcx.tcx()), dest.llval, Disr::from(disr)); - }); + let dest_ty = dest.ty.to_ty(bcx.tcx()); + adt::trans_set_discr(&bcx, dest_ty, dest.llval, Disr::from(disr)); for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx(), op.ty) { + if !common::type_is_zero_size(bcx.ccx, op.ty) { let val = adt::MaybeSizedValue::sized(dest.llval); let field_index = active_field_index.unwrap_or(i); - let lldest_i = adt::trans_field_ptr_builder(&bcx, - dest.ty.to_ty(bcx.tcx()), - val, disr, field_index); + let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr, + field_index); self.store_operand(&bcx, lldest_i, op); } } }, _ => { // If this is a tuple or closure, we need to translate GEP indices. - let layout = bcx.ccx().layout_of(dest.ty.to_ty(bcx.tcx())); + let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); let translation = if let Layout::Univariant { ref variant, .. } = *layout { Some(&variant.memory_index) } else { @@ -143,7 +128,7 @@ pub fn trans_rvalue(&mut self, for (i, operand) in operands.iter().enumerate() { let op = self.trans_operand(&bcx, operand); // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx(), op.ty) { + if !common::type_is_zero_size(bcx.ccx, op.ty) { // Note: perhaps this should be StructGep, but // note that in some cases the values here will // not be structs but arrays. @@ -171,16 +156,13 @@ pub fn trans_rvalue(&mut self, self.trans_operand(&bcx, input).immediate() }).collect(); - bcx.with_block(|bcx| { - asm::trans_inline_asm(bcx, asm, outputs, input_vals); - }); - + asm::trans_inline_asm(&bcx, asm, outputs, input_vals); bcx } _ => { - assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue)); - let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue, debug_loc); + assert!(rvalue_creates_operand(rvalue)); + let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); self.store_operand(&bcx, dest.llval, temp); bcx } @@ -188,27 +170,25 @@ pub fn trans_rvalue(&mut self, } pub fn trans_rvalue_operand(&mut self, - bcx: BlockAndBuilder<'bcx, 'tcx>, - rvalue: &mir::Rvalue<'tcx>, - debug_loc: DebugLoc) - -> (BlockAndBuilder<'bcx, 'tcx>, OperandRef<'tcx>) + bcx: BlockAndBuilder<'a, 'tcx>, + rvalue: &mir::Rvalue<'tcx>) + -> (BlockAndBuilder<'a, 'tcx>, OperandRef<'tcx>) { - assert!(rvalue_creates_operand(&self.mir, &bcx, rvalue), - "cannot trans {:?} to operand", rvalue); + assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { let operand = self.trans_operand(&bcx, source); debug!("cast operand is {:?}", operand); - let cast_ty = bcx.monomorphize(&cast_ty); + let cast_ty = self.monomorphize(&cast_ty); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.ty.sty { ty::TyFnDef(def_id, substs, _) => { OperandValue::Immediate( - Callee::def(bcx.ccx(), def_id, substs) - .reify(bcx.ccx())) + Callee::def(bcx.ccx, def_id, substs) + .reify(bcx.ccx)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.ty) @@ -222,7 +202,7 @@ pub fn trans_rvalue_operand(&mut self, mir::CastKind::Unsize => { // unsize targets other than to a fat pointer currently // can't be operands. - assert!(common::type_is_fat_ptr(bcx.tcx(), cast_ty)); + assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty)); match operand.val { OperandValue::Pair(lldata, llextra) => { @@ -232,16 +212,14 @@ pub fn trans_rvalue_operand(&mut self, // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty); + let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); let lldata = bcx.pointercast(lldata, llcast_ty); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize - let (lldata, llextra) = bcx.with_block(|bcx| { - base::unsize_thin_ptr(bcx, lldata, - operand.ty, cast_ty) - }); + let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata, + operand.ty, cast_ty); OperandValue::Pair(lldata, llextra) } OperandValue::Ref(_) => { @@ -250,11 +228,11 @@ pub fn trans_rvalue_operand(&mut self, } } } - mir::CastKind::Misc if common::type_is_fat_ptr(bcx.tcx(), operand.ty) => { - let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty); - let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty); + mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => { + let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); + let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty); if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { - if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { + if common::type_is_fat_ptr(bcx.ccx, cast_ty) { let ll_cft = ll_cast_ty.field_types(); let ll_fft = ll_from_ty.field_types(); let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); @@ -271,19 +249,17 @@ pub fn trans_rvalue_operand(&mut self, } } mir::CastKind::Misc => { - debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); + debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty)); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty); - let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty); + let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty); + let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty); let (llval, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let l = bcx.ccx().layout_of(operand.ty); + let l = bcx.ccx.layout_of(operand.ty); let discr = match operand.val { OperandValue::Immediate(llval) => llval, OperandValue::Ref(llptr) => { - bcx.with_block(|bcx| { - adt::trans_get_discr(bcx, operand.ty, llptr, None, true) - }) + adt::trans_get_discr(&bcx, operand.ty, llptr, None, true) } OperandValue::Pair(..) => bug!("Unexpected Pair operand") }; @@ -376,7 +352,7 @@ pub fn trans_rvalue_operand(&mut self, // Note: lvalues are indirect, so storing the `llval` into the // destination effectively creates a reference. - let operand = if common::type_is_sized(bcx.tcx(), ty) { + let operand = if bcx.ccx.shared().type_is_sized(ty) { OperandRef { val: OperandValue::Immediate(tr_lvalue.llval), ty: ref_ty, @@ -394,7 +370,7 @@ pub fn trans_rvalue_operand(&mut self, mir::Rvalue::Len(ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); let operand = OperandRef { - val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx())), + val: OperandValue::Immediate(tr_lvalue.len(bcx.ccx)), ty: bcx.tcx().types.usize, }; (bcx, operand) @@ -403,7 +379,7 @@ pub fn trans_rvalue_operand(&mut self, mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.tcx(), lhs.ty) { + let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) { match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { @@ -461,26 +437,27 @@ pub fn trans_rvalue_operand(&mut self, } mir::Rvalue::Box(content_ty) => { - let content_ty: Ty<'tcx> = bcx.monomorphize(&content_ty); - let llty = type_of::type_of(bcx.ccx(), content_ty); - let llsize = machine::llsize_of(bcx.ccx(), llty); - let align = type_of::align_of(bcx.ccx(), content_ty); - let llalign = C_uint(bcx.ccx(), align); + let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); + let llty = type_of::type_of(bcx.ccx, content_ty); + let llsize = machine::llsize_of(bcx.ccx, llty); + let align = type_of::align_of(bcx.ccx, content_ty); + let llalign = C_uint(bcx.ccx, align); let llty_ptr = llty.ptr_to(); let box_ty = bcx.tcx().mk_box(content_ty); - let mut llval = None; - let bcx = bcx.map_block(|bcx| { - let Result { bcx, val } = base::malloc_raw_dyn(bcx, - llty_ptr, - box_ty, - llsize, - llalign, - debug_loc); - llval = Some(val); - bcx - }); + + // Allocate space: + let def_id = match bcx.tcx().lang_items.require(ExchangeMallocFnLangItem) { + Ok(id) => id, + Err(s) => { + bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); + } + }; + let r = Callee::def(bcx.ccx, def_id, bcx.tcx().intern_substs(&[])) + .reify(bcx.ccx); + let val = bcx.pointercast(bcx.call(r, &[llsize, llalign], None), llty_ptr); + let operand = OperandRef { - val: OperandValue::Immediate(llval.unwrap()), + val: OperandValue::Immediate(val), ty: box_ty, }; (bcx, operand) @@ -500,7 +477,7 @@ pub fn trans_rvalue_operand(&mut self, } pub fn trans_scalar_binop(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -542,26 +519,11 @@ pub fn trans_scalar_binop(&mut self, mir::BinOp::BitOr => bcx.or(lhs, rhs), mir::BinOp::BitAnd => bcx.and(lhs, rhs), mir::BinOp::BitXor => bcx.xor(lhs, rhs), - mir::BinOp::Shl => { - bcx.with_block(|bcx| { - common::build_unchecked_lshift(bcx, - lhs, - rhs, - DebugLoc::None) - }) - } - mir::BinOp::Shr => { - bcx.with_block(|bcx| { - common::build_unchecked_rshift(bcx, - input_ty, - lhs, - rhs, - DebugLoc::None) - }) - } + mir::BinOp::Shl => common::build_unchecked_lshift(bcx, lhs, rhs), + mir::BinOp::Shr => common::build_unchecked_rshift(bcx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_nil { - C_bool(bcx.ccx(), match op { + C_bool(bcx.ccx, match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -575,8 +537,8 @@ pub fn trans_scalar_binop(&mut self, let (lhs, rhs) = if is_bool { // FIXME(#36856) -- extend the bools into `i8` because // LLVM's i1 comparisons are broken. - (bcx.zext(lhs, Type::i8(bcx.ccx())), - bcx.zext(rhs, Type::i8(bcx.ccx()))) + (bcx.zext(lhs, Type::i8(bcx.ccx)), + bcx.zext(rhs, Type::i8(bcx.ccx))) } else { (lhs, rhs) }; @@ -590,7 +552,7 @@ pub fn trans_scalar_binop(&mut self, } pub fn trans_fat_ptr_binop(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: mir::BinOp, lhs_addr: ValueRef, lhs_extra: ValueRef, @@ -637,7 +599,7 @@ pub fn trans_fat_ptr_binop(&mut self, } pub fn trans_scalar_checked_binop(&mut self, - bcx: &BlockAndBuilder<'bcx, 'tcx>, + bcx: &BlockAndBuilder<'a, 'tcx>, op: mir::BinOp, lhs: ValueRef, rhs: ValueRef, @@ -646,9 +608,9 @@ pub fn trans_scalar_checked_binop(&mut self, // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bcx.ccx().check_overflow() { + if !bcx.ccx.check_overflow() { let val = self.trans_scalar_binop(bcx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, C_bool(bcx.ccx(), false)); + return OperandValue::Pair(val, C_bool(bcx.ccx, false)); } // First try performing the operation on constants, which @@ -656,7 +618,7 @@ pub fn trans_scalar_checked_binop(&mut self, // This is necessary to determine when an overflow Assert // will always panic at runtime, and produce a warning. if let Some((val, of)) = const_scalar_checked_binop(bcx.tcx(), op, lhs, rhs, input_ty) { - return OperandValue::Pair(val, C_bool(bcx.ccx(), of)); + return OperandValue::Pair(val, C_bool(bcx.ccx, of)); } let (val, of) = match op { @@ -677,9 +639,7 @@ pub fn trans_scalar_checked_binop(&mut self, mir::BinOp::Shl | mir::BinOp::Shr => { let lhs_llty = val_ty(lhs); let rhs_llty = val_ty(rhs); - let invert_mask = bcx.with_block(|bcx| { - common::shift_mask_val(bcx, lhs_llty, rhs_llty, true) - }); + let invert_mask = common::shift_mask_val(&bcx, lhs_llty, rhs_llty, true); let outer_bits = bcx.and(rhs, invert_mask); let of = bcx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); @@ -696,9 +656,7 @@ pub fn trans_scalar_checked_binop(&mut self, } } -pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>, - _bcx: &BlockAndBuilder<'bcx, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) -> bool { +pub fn rvalue_creates_operand(rvalue: &mir::Rvalue) -> bool { match *rvalue { mir::Rvalue::Ref(..) | mir::Rvalue::Len(..) | @@ -789,5 +747,5 @@ fn get_overflow_intrinsic(oop: OverflowOp, bcx: &BlockAndBuilder, ty: Ty) -> Val }, }; - bcx.ccx().get_intrinsic(&name) + bcx.ccx.get_intrinsic(&name) } diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 296a0e8049e080377904cddbcc23e514861af116..cc85f68c197ec34551df74a1742b7d171ddf5bc5 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -18,57 +18,52 @@ use super::super::adt; use super::super::disr::Disr; -impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { +impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, statement: &mir::Statement<'tcx>) - -> BlockAndBuilder<'bcx, 'tcx> { + -> BlockAndBuilder<'a, 'tcx> { debug!("trans_statement(statement={:?})", statement); - let debug_loc = self.debug_loc(statement.source_info); - debug_loc.apply_to_bcx(&bcx); - debug_loc.apply(bcx.fcx()); + self.set_debug_loc(&bcx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref lvalue, ref rvalue) => { if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { LocalRef::Lvalue(tr_dest) => { - self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) + self.trans_rvalue(bcx, tr_dest, rvalue) } LocalRef::Operand(None) => { - let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue, - debug_loc); + let (bcx, operand) = self.trans_rvalue_operand(bcx, rvalue); self.locals[index] = LocalRef::Operand(Some(operand)); bcx } LocalRef::Operand(Some(_)) => { let ty = self.monomorphized_lvalue_ty(lvalue); - if !common::type_is_zero_size(bcx.ccx(), ty) { + if !common::type_is_zero_size(bcx.ccx, ty) { span_bug!(statement.source_info.span, "operand {:?} already assigned", rvalue); } else { // If the type is zero-sized, it's already been set here, // but we still need to make sure we translate the operand - self.trans_rvalue_operand(bcx, rvalue, debug_loc).0 + self.trans_rvalue_operand(bcx, rvalue).0 } } } } else { let tr_dest = self.trans_lvalue(&bcx, lvalue); - self.trans_rvalue(bcx, tr_dest, rvalue, debug_loc) + self.trans_rvalue(bcx, tr_dest, rvalue) } } mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { let ty = self.monomorphized_lvalue_ty(lvalue); let lvalue_transed = self.trans_lvalue(&bcx, lvalue); - bcx.with_block(|bcx| - adt::trans_set_discr(bcx, - ty, - lvalue_transed.llval, - Disr::from(variant_index)) - ); + adt::trans_set_discr(&bcx, + ty, + lvalue_transed.llval, + Disr::from(variant_index)); bcx } mir::StatementKind::StorageLive(ref lvalue) => { @@ -82,10 +77,10 @@ pub fn trans_statement(&mut self, } fn trans_storage_liveness(&self, - bcx: BlockAndBuilder<'bcx, 'tcx>, + bcx: BlockAndBuilder<'a, 'tcx>, lvalue: &mir::Lvalue<'tcx>, intrinsic: base::Lifetime) - -> BlockAndBuilder<'bcx, 'tcx> { + -> BlockAndBuilder<'a, 'tcx> { if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Lvalue(tr_lval) = self.locals[index] { intrinsic.call(&bcx, tr_lval.llval); diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index 214eaeb817f302b24011f9603d5899aa1bf022cc..527bee832956a7c25d2e76d04a21abc8b1ff2aa0 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -184,7 +184,7 @@ fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>, linkage: llvm::Linkage, symbol_name: &str) { let tcx = ccx.tcx(); - assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty())); + assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty())); let t = dg.ty(); let sig = tcx.mk_fn_sig(iter::once(tcx.mk_mut_ptr(tcx.types.i8)), tcx.mk_nil(), false); diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs index cf897fc5a151877673768c4fd24d42cb2c652a11..c09726fda081028c6d48b71d8f9cdecdcf6ad624 100644 --- a/src/librustc_trans/tvec.rs +++ b/src/librustc_trans/tvec.rs @@ -8,56 +8,46 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - use llvm; use llvm::ValueRef; -use base::*; -use build::*; use common::*; -use debuginfo::DebugLoc; use rustc::ty::Ty; -pub fn slice_for_each<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F) - -> Block<'blk, 'tcx> where - F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>, -{ - let _icx = push_ctxt("tvec::slice_for_each"); - let fcx = bcx.fcx; - +pub fn slice_for_each<'a, 'tcx, F>( + bcx: &BlockAndBuilder<'a, 'tcx>, + data_ptr: ValueRef, + unit_ty: Ty<'tcx>, + len: ValueRef, + f: F +) -> BlockAndBuilder<'a, 'tcx> where F: FnOnce(&BlockAndBuilder<'a, 'tcx>, ValueRef) { // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - let zst = type_is_zero_size(bcx.ccx(), unit_ty); - let add = |bcx, a, b| if zst { - Add(bcx, a, b, DebugLoc::None) + let zst = type_is_zero_size(bcx.ccx, unit_ty); + let add = |bcx: &BlockAndBuilder, a, b| if zst { + bcx.add(a, b) } else { - InBoundsGEP(bcx, a, &[b]) + bcx.inbounds_gep(a, &[b]) }; - let header_bcx = fcx.new_block("slice_loop_header"); - let body_bcx = fcx.new_block("slice_loop_body"); - let next_bcx = fcx.new_block("slice_loop_next"); + let body_bcx = bcx.fcx().build_new_block("slice_loop_body"); + let next_bcx = bcx.fcx().build_new_block("slice_loop_next"); + let header_bcx = bcx.fcx().build_new_block("slice_loop_header"); let start = if zst { - C_uint(bcx.ccx(), 0 as usize) + C_uint(bcx.ccx, 0usize) } else { data_ptr }; - let end = add(bcx, start, len); + let end = add(&bcx, start, len); - Br(bcx, header_bcx.llbb, DebugLoc::None); - let current = Phi(header_bcx, val_ty(start), &[start], &[bcx.llbb]); + bcx.br(header_bcx.llbb()); + let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]); - let keep_going = - ICmp(header_bcx, llvm::IntNE, current, end, DebugLoc::None); - CondBr(header_bcx, keep_going, body_bcx.llbb, next_bcx.llbb, DebugLoc::None); + let keep_going = header_bcx.icmp(llvm::IntNE, current, end); + header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); - let body_bcx = f(body_bcx, if zst { data_ptr } else { current }); - let next = add(body_bcx, current, C_uint(bcx.ccx(), 1usize)); - AddIncomingToPhi(current, next, body_bcx.llbb); - Br(body_bcx, header_bcx.llbb, DebugLoc::None); + f(&body_bcx, if zst { data_ptr } else { current }); + let next = add(&body_bcx, current, C_uint(bcx.ccx, 1usize)); + header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); + body_bcx.br(header_bcx.llbb()); next_bcx } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 22c405fe254a62c400d818d54e51de617f67c405..469214b466e1ae7b9c3ffeffaa5857703b4c63b2 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -8,8 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![allow(non_camel_case_types)] - use abi::FnType; use adt; use common::*; @@ -41,7 +39,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ let _recursion_lock = cx.enter_type_of(t); let llsizingty = match t.sty { - _ if !type_is_sized(cx.tcx(), t) => { + _ if !cx.shared().type_is_sized(t) => { Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false) } @@ -55,7 +53,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - if type_is_sized(cx.tcx(), ty) { + if cx.shared().type_is_sized(ty) { Type::i8p(cx) } else { Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false) @@ -104,7 +102,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ // FIXME(eddyb) Temporary sanity check for ty::layout. let layout = cx.layout_of(t); - if !type_is_sized(cx.tcx(), t) { + if !cx.shared().type_is_sized(t) { if !layout.is_unsized() { bug!("layout should be unsized for type `{}` / {:#?}", t, layout); @@ -135,7 +133,7 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> match ty.sty { ty::TyBox(t) | ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !type_is_sized(ccx.tcx(), t) => { + ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !ccx.shared().type_is_sized(t) => { in_memory_type_of(ccx, t).ptr_to() } _ => bug!("expected fat ptr ty but got {:?}", ty) @@ -172,7 +170,7 @@ pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> /// is too large for it to be placed in SSA value (by our rules). /// For the raw type without far pointer indirection, see `in_memory_type_of`. pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let ty = if !type_is_sized(cx.tcx(), ty) { + let ty = if !cx.shared().type_is_sized(ty) { cx.tcx().mk_imm_ptr(ty) } else { ty @@ -232,7 +230,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> ty::TyBox(ty) | ty::TyRef(_, ty::TypeAndMut{ty, ..}) | ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - if !type_is_sized(cx.tcx(), ty) { + if !cx.shared().type_is_sized(ty) { if let ty::TyStr = ty.sty { // This means we get a nicer name in the output (str is always // unsized). diff --git a/src/librustc_trans/value.rs b/src/librustc_trans/value.rs index 79e0c11515fc49f34fe6cb5ed716a2fd8d1378a6..287ad87caacf94e6398c9e6bc8e1ebd9abb063e7 100644 --- a/src/librustc_trans/value.rs +++ b/src/librustc_trans/value.rs @@ -9,16 +9,11 @@ // except according to those terms. use llvm; -use llvm::{UseRef, ValueRef}; -use basic_block::BasicBlock; -use common::Block; use std::fmt; -use libc::c_uint; - #[derive(Copy, Clone, PartialEq)] -pub struct Value(pub ValueRef); +pub struct Value(pub llvm::ValueRef); impl fmt::Debug for Value { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -27,152 +22,3 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { }).expect("nun-UTF8 value description from LLVM")) } } - -macro_rules! opt_val { ($e:expr) => ( - unsafe { - match $e { - p if !p.is_null() => Some(Value(p)), - _ => None - } - } -) } - -/// Wrapper for LLVM ValueRef -impl Value { - /// Returns the native ValueRef - pub fn get(&self) -> ValueRef { - let Value(v) = *self; v - } - - /// Returns the BasicBlock that contains this value - pub fn get_parent(self) -> Option { - unsafe { - match llvm::LLVMGetInstructionParent(self.get()) { - p if !p.is_null() => Some(BasicBlock(p)), - _ => None - } - } - } - - /// Removes this value from its containing BasicBlock - pub fn erase_from_parent(self) { - unsafe { - llvm::LLVMInstructionEraseFromParent(self.get()); - } - } - - /// Returns the single dominating store to this value, if any - /// This only performs a search for a trivially dominating store. The store - /// must be the only user of this value, and there must not be any conditional - /// branches between the store and the given block. - pub fn get_dominating_store(self, bcx: Block) -> Option { - match self.get_single_user().and_then(|user| user.as_store_inst()) { - Some(store) => { - store.get_parent().and_then(|store_bb| { - let mut bb = BasicBlock(bcx.llbb); - let mut ret = Some(store); - while bb.get() != store_bb.get() { - match bb.get_single_predecessor() { - Some(pred) => bb = pred, - None => { ret = None; break } - } - } - ret - }) - } - _ => None - } - } - - /// Returns the first use of this value, if any - pub fn get_first_use(self) -> Option { - unsafe { - match llvm::LLVMGetFirstUse(self.get()) { - u if !u.is_null() => Some(Use(u)), - _ => None - } - } - } - - /// Tests if there are no uses of this value - pub fn has_no_uses(self) -> bool { - self.get_first_use().is_none() - } - - /// Returns the single user of this value - /// If there are no users or multiple users, this returns None - pub fn get_single_user(self) -> Option { - let mut iter = self.user_iter(); - match (iter.next(), iter.next()) { - (Some(first), None) => Some(first), - _ => None - } - } - - /// Returns an iterator for the users of this value - pub fn user_iter(self) -> Users { - Users { - next: self.get_first_use() - } - } - - /// Returns the requested operand of this instruction - /// Returns None, if there's no operand at the given index - pub fn get_operand(self, i: usize) -> Option { - opt_val!(llvm::LLVMGetOperand(self.get(), i as c_uint)) - } - - /// Returns the Store represent by this value, if any - pub fn as_store_inst(self) -> Option { - opt_val!(llvm::LLVMIsAStoreInst(self.get())) - } - - /// Tests if this value is a terminator instruction - pub fn is_a_terminator_inst(self) -> bool { - unsafe { - !llvm::LLVMIsATerminatorInst(self.get()).is_null() - } - } -} - -/// Wrapper for LLVM UseRef -#[derive(Copy, Clone)] -pub struct Use(UseRef); - -impl Use { - pub fn get(&self) -> UseRef { - let Use(v) = *self; v - } - - pub fn get_user(self) -> Value { - unsafe { - Value(llvm::LLVMGetUser(self.get())) - } - } - - pub fn get_next_use(self) -> Option { - unsafe { - match llvm::LLVMGetNextUse(self.get()) { - u if !u.is_null() => Some(Use(u)), - _ => None - } - } - } -} - -/// Iterator for the users of a value -pub struct Users { - next: Option -} - -impl Iterator for Users { - type Item = Value; - - fn next(&mut self) -> Option { - let current = self.next; - - self.next = current.and_then(|u| u.get_next_use()); - - current.map(|u| u.get_user()) - } -} diff --git a/src/test/run-pass/trans-object-shim.rs b/src/test/run-pass/trans-object-shim.rs new file mode 100644 index 0000000000000000000000000000000000000000..5fbfef05e10d4f321b14f8f891c31d58e15faa24 --- /dev/null +++ b/src/test/run-pass/trans-object-shim.rs @@ -0,0 +1,14 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +fn main() { + assert_eq!((ToString::to_string as fn(&(ToString+'static)) -> String)(&"foo"), + String::from("foo")); +}