提交 1b38776c 编写于 作者: B bors

Auto merge of #38302 - Mark-Simulacrum:trans-cleanup, r=eddyb

Cleanup old trans

This is a cleanup of old trans, with the following main points:
 - Remove the `build.rs` API (prefer using `Builder` directly, which is now passed where needed through `BlockAndBuilder`).
 - Remove `Block` (inlining it into `BlockAndBuilder`)
 - Remove `Callee::call`, primarily through inlining and simplification of code.
 - Thinned `FunctionContext`:
   - `mir`, `debug_scopes`, `scopes`, and `fn_ty` are moved to `MirContext`.
   - `param_env` is moved to `SharedCrateContext` and renamed to `empty_param_env`.
   - `llretslotptr` is removed, replaced with more careful management of the return values in calls.
   - `landingpad_alloca` is inlined into cleanup.
   - `param_substs` are moved to `MirContext`.
   - `span` is removed, it was never set to anything but `None`.
   - `block_arena` and `lpad_arena` are removed, since neither was necessary (landing pads and block are quite small, and neither needs arena allocation).
 - Fixed `drop_in_place` not running other destructors in the same function.

Fixes #35566 (thanks to @est31 for confirming).
......@@ -127,6 +127,7 @@ pub fn usable_size(size: usize, align: usize) -> usize {
pub const EMPTY: *mut () = 0x1 as *mut ();
/// The allocator for unique pointers.
// This function must not unwind. If it does, MIR trans will fail.
#[cfg(not(test))]
#[lang = "exchange_malloc"]
#[inline]
......
......@@ -710,6 +710,7 @@ pub fn LLVMAppendBasicBlockInContext(C: ContextRef,
// Operations on instructions
pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef;
pub fn LLVMGetFirstBasicBlock(Fn: ValueRef) -> BasicBlockRef;
pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef;
pub fn LLVMInstructionEraseFromParent(Inst: ValueRef);
......
......@@ -10,7 +10,6 @@
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace};
use base;
use build::AllocaFcx;
use common::{type_is_fat_ptr, BlockAndBuilder, C_uint};
use context::CrateContext;
use cabi_x86;
......@@ -99,21 +98,11 @@ pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
self
}
pub fn unset(&mut self, attr: ArgAttribute) -> &mut Self {
self.regular = self.regular - attr;
self
}
pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
self.dereferenceable_bytes = bytes;
self
}
pub fn unset_dereferenceable(&mut self) -> &mut Self {
self.dereferenceable_bytes = 0;
self
}
pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
unsafe {
self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
......@@ -246,7 +235,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
if self.is_ignore() {
return;
}
let ccx = bcx.ccx();
let ccx = bcx.ccx;
if self.is_indirect() {
let llsz = llsize_of(ccx, self.ty);
let llalign = llalign_of_min(ccx, self.ty);
......@@ -278,7 +267,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let llscratch = AllocaFcx(bcx.fcx(), ty, "abi_cast");
let llscratch = bcx.fcx().alloca(ty, "abi_cast");
base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value...
......@@ -431,7 +420,7 @@ pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let ret_ty = sig.output();
let mut ret = arg_of(ret_ty, true);
if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
if !type_is_fat_ptr(ccx, ret_ty) {
// The `noalias` attribute on the return value is useful to a
// function ptr caller.
if let ty::TyBox(_) = ret_ty.sty {
......@@ -496,7 +485,7 @@ pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
for ty in inputs.iter().chain(extra_args.iter()) {
let mut arg = arg_of(ty, false);
if type_is_fat_ptr(ccx.tcx(), ty) {
if type_is_fat_ptr(ccx, ty) {
let original_tys = arg.original_ty.field_types();
let sizing_tys = arg.ty.field_types();
assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));
......@@ -569,7 +558,7 @@ pub fn adjust_for_abi<'a, 'tcx>(&mut self,
};
// Fat pointers are returned by-value.
if !self.ret.is_ignore() {
if !type_is_fat_ptr(ccx.tcx(), sig.output()) {
if !type_is_fat_ptr(ccx, sig.output()) {
fixup(&mut self.ret);
}
}
......
......@@ -48,9 +48,7 @@
use llvm::{ValueRef, True, IntEQ, IntNE};
use rustc::ty::layout;
use rustc::ty::{self, Ty, AdtKind};
use build::*;
use common::*;
use debuginfo::DebugLoc;
use glue;
use base;
use machine;
......@@ -295,7 +293,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
sizing: bool, dst: bool) -> Vec<Type> {
let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
if sizing {
fields.filter(|ty| !dst || type_is_sized(cx.tcx(), *ty))
fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty))
.map(|ty| type_of::sizing_type_of(cx, ty)).collect()
} else {
fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
......@@ -304,12 +302,13 @@ fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>
/// Obtain a representation of the discriminant sufficient to translate
/// destructuring; this may or may not involve the actual discriminant.
pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
range_assert: bool)
-> (BranchKind, Option<ValueRef>) {
let l = bcx.ccx().layout_of(t);
pub fn trans_switch<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
range_assert: bool
) -> (BranchKind, Option<ValueRef>) {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum { .. } | layout::General { .. } |
layout::RawNullablePointer { .. } | layout::StructWrappedNullablePointer { .. } => {
......@@ -331,34 +330,37 @@ pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
}
/// Obtain the actual discriminant of a value.
pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
scrutinee: ValueRef, cast_to: Option<Type>,
range_assert: bool)
-> ValueRef {
pub fn trans_get_discr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
cast_to: Option<Type>,
range_assert: bool
) -> ValueRef {
let (def, substs) = match t.sty {
ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs),
_ => bug!("{} is not an enum", t)
};
debug!("trans_get_discr t: {:?}", t);
let l = bcx.ccx().layout_of(t);
let l = bcx.ccx.layout_of(t);
let val = match *l {
layout::CEnum { discr, min, max, .. } => {
load_discr(bcx, discr, scrutinee, min, max, range_assert)
}
layout::General { discr, .. } => {
let ptr = StructGEP(bcx, scrutinee, 0);
let ptr = bcx.struct_gep(scrutinee, 0);
load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1,
range_assert)
}
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx(), 0),
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
let llptrty = type_of::sizing_type_of(bcx.ccx(),
monomorphize::field_ty(bcx.ccx().tcx(), substs,
let llptrty = type_of::sizing_type_of(bcx.ccx,
monomorphize::field_ty(bcx.ccx.tcx(), substs,
&def.variants[nndiscr as usize].fields[0]));
ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty), DebugLoc::None)
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
......@@ -367,24 +369,28 @@ pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
};
match cast_to {
None => val,
Some(llty) => if is_discr_signed(&l) { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
Some(llty) => if is_discr_signed(&l) { bcx.sext(val, llty) } else { bcx.zext(val, llty) }
}
}
fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: u64, discrfield: &layout::FieldPath,
scrutinee: ValueRef) -> ValueRef {
let llptrptr = GEPi(bcx, scrutinee,
fn struct_wrapped_nullable_bitdiscr(
bcx: &BlockAndBuilder,
nndiscr: u64,
discrfield: &layout::FieldPath,
scrutinee: ValueRef
) -> ValueRef {
let llptrptr = bcx.gepi(scrutinee,
&discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]);
let llptr = Load(bcx, llptrptr);
let llptr = bcx.load(llptrptr);
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)), DebugLoc::None)
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
}
/// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
fn load_discr(bcx: &BlockAndBuilder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
range_assert: bool)
-> ValueRef {
let llty = Type::from_integer(bcx.ccx(), ity);
let llty = Type::from_integer(bcx.ccx, ity);
assert_eq!(val_ty(ptr), llty.ptr_to());
let bits = ity.size().bits();
assert!(bits <= 64);
......@@ -397,11 +403,11 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
Load(bcx, ptr)
bcx.load(ptr)
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
LoadRangeAssert(bcx, ptr, min, max.wrapping_add(1), /* signed: */ True)
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True)
}
}
......@@ -409,18 +415,17 @@ fn load_discr(bcx: Block, ity: layout::Integer, ptr: ValueRef, min: u64, max: u6
/// discriminant-like value returned by `trans_switch`.
///
/// This should ideally be less tightly tied to `_match`.
pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
-> ValueRef {
let l = bcx.ccx().layout_of(t);
pub fn trans_case<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, value: Disr) -> ValueRef {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum { discr, .. }
| layout::General { discr, .. }=> {
C_integral(Type::from_integer(bcx.ccx(), discr), value.0, true)
C_integral(Type::from_integer(bcx.ccx, discr), value.0, true)
}
layout::RawNullablePointer { .. } |
layout::StructWrappedNullablePointer { .. } => {
assert!(value == Disr(0) || value == Disr(1));
C_bool(bcx.ccx(), value != Disr(0))
C_bool(bcx.ccx, value != Disr(0))
}
_ => {
bug!("{} does not have a discriminant. Represented as {:#?}", t, l);
......@@ -430,18 +435,19 @@ pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, value: Disr)
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
val: ValueRef, to: Disr) {
let l = bcx.ccx().layout_of(t);
pub fn trans_set_discr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: Disr
) {
let l = bcx.ccx.layout_of(t);
match *l {
layout::CEnum{ discr, min, max, .. } => {
assert_discr_in_range(Disr(min), Disr(max), to);
Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
val);
}
layout::General{ discr, .. } => {
Store(bcx, C_integral(Type::from_integer(bcx.ccx(), discr), to.0, true),
StructGEP(bcx, val, 0));
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
bcx.struct_gep(val, 0));
}
layout::Univariant { .. }
| layout::UntaggedUnion { .. }
......@@ -449,10 +455,10 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
assert_eq!(to, Disr(0));
}
layout::RawNullablePointer { nndiscr, .. } => {
let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0];
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
if to.0 != nndiscr {
let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
Store(bcx, C_null(llptrty), val);
let llptrty = type_of::sizing_type_of(bcx.ccx, nnty);
bcx.store(C_null(llptrty), val);
}
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
......@@ -461,17 +467,16 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
// Issue #34427: As workaround for LLVM bug on
// ARM, use memset of 0 on whole struct rather
// than storing null to single target field.
let b = B(bcx);
let llptr = b.pointercast(val, Type::i8(b.ccx).ptr_to());
let fill_byte = C_u8(b.ccx, 0);
let size = C_uint(b.ccx, nonnull.stride().bytes());
let align = C_i32(b.ccx, nonnull.align.abi() as i32);
base::call_memset(&b, llptr, fill_byte, size, align, false);
let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to());
let fill_byte = C_u8(bcx.ccx, 0);
let size = C_uint(bcx.ccx, nonnull.stride().bytes());
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
let llptrptr = GEPi(bcx, val, &path[..]);
let llptrptr = bcx.gepi(val, &path[..]);
let llptrty = val_ty(llptrptr).element_type();
Store(bcx, C_null(llptrty), llptrptr);
bcx.store(C_null(llptrty), llptrptr);
}
}
}
......@@ -479,7 +484,7 @@ pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
}
}
fn target_sets_discr_via_memset<'blk, 'tcx>(bcx: Block<'blk, 'tcx>) -> bool {
fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>) -> bool {
bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
}
......@@ -492,19 +497,15 @@ fn assert_discr_in_range(min: Disr, max: Disr, discr: Disr) {
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>,
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
trans_field_ptr_builder(&bcx.build(), t, val, discr, ix)
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>,
val: MaybeSizedValue,
discr: Disr, ix: usize)
-> ValueRef {
let l = bcx.ccx().layout_of(t);
debug!("trans_field_ptr_builder on {} represented as {:#?}", t, l);
pub fn trans_field_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
t: Ty<'tcx>,
val: MaybeSizedValue,
discr: Disr,
ix: usize
) -> ValueRef {
let l = bcx.ccx.layout_of(t);
debug!("trans_field_ptr on {} represented as {:#?}", t, l);
// Note: if this ever needs to generate conditionals (e.g., if we
// decide to do some kind of cdr-coding-like non-unique repr
// someday), it will need to return a possibly-new bcx as well.
......@@ -512,7 +513,7 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
layout::Univariant { ref variant, .. } => {
assert_eq!(discr, Disr(0));
struct_field_ptr(bcx, &variant,
&compute_fields(bcx.ccx(), t, 0, false),
&compute_fields(bcx.ccx, t, 0, false),
val, ix, false)
}
layout::Vector { count, .. } => {
......@@ -521,57 +522,53 @@ pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
bcx.struct_gep(val.value, ix)
}
layout::General { discr: d, ref variants, .. } => {
let mut fields = compute_fields(bcx.ccx(), t, discr.0 as usize, false);
fields.insert(0, d.to_ty(&bcx.ccx().tcx(), false));
let mut fields = compute_fields(bcx.ccx, t, discr.0 as usize, false);
fields.insert(0, d.to_ty(&bcx.ccx.tcx(), false));
struct_field_ptr(bcx, &variants[discr.0 as usize],
&fields,
val, ix + 1, true)
}
layout::UntaggedUnion { .. } => {
let fields = compute_fields(bcx.ccx(), t, 0, false);
let ty = type_of::in_memory_type_of(bcx.ccx(), fields[ix]);
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
let fields = compute_fields(bcx.ccx, t, 0, false);
let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
bcx.pointercast(val.value, ty.ptr_to())
}
layout::RawNullablePointer { nndiscr, .. } |
layout::StructWrappedNullablePointer { nndiscr, .. } if discr.0 != nndiscr => {
let nullfields = compute_fields(bcx.ccx(), t, (1-nndiscr) as usize, false);
let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
// The unit-like case might have a nonzero number of unit-like fields.
// (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
// The contents of memory at this pointer can't matter, but use
// the value that's "reasonable" in case of pointer comparison.
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
bcx.pointercast(val.value, ty.ptr_to())
}
layout::RawNullablePointer { nndiscr, .. } => {
let nnty = compute_fields(bcx.ccx(), t, nndiscr as usize, false)[0];
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
assert_eq!(ix, 0);
assert_eq!(discr.0, nndiscr);
let ty = type_of::type_of(bcx.ccx(), nnty);
if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
let ty = type_of::type_of(bcx.ccx, nnty);
bcx.pointercast(val.value, ty.ptr_to())
}
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
assert_eq!(discr.0, nndiscr);
struct_field_ptr(bcx, &nonnull,
&compute_fields(bcx.ccx(), t, discr.0 as usize, false),
&compute_fields(bcx.ccx, t, discr.0 as usize, false),
val, ix, false)
}
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
}
}
fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
st: &layout::Struct, fields: &Vec<Ty<'tcx>>, val: MaybeSizedValue,
ix: usize, needs_cast: bool) -> ValueRef {
fn struct_field_ptr<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
st: &layout::Struct,
fields: &Vec<Ty<'tcx>>,
val: MaybeSizedValue,
ix: usize,
needs_cast: bool
) -> ValueRef {
let fty = fields[ix];
let ccx = bcx.ccx();
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
if bcx.is_unreachable() {
return C_undef(ll_fty.ptr_to());
}
let ccx = bcx.ccx;
let ptr_val = if needs_cast {
let fields = st.field_index_by_increasing_offset().map(|i| {
......@@ -587,7 +584,8 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// * First field - Always aligned properly
// * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || type_is_sized(bcx.tcx(), fty) {
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
bcx.ccx.shared().type_is_sized(fty) {
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
}
......@@ -607,8 +605,6 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
return bcx.struct_gep(ptr_val, ix);
}
let dbloc = DebugLoc::None;
// We need to get the pointer manually now.
// We do this by casting to a *i8, then offsetting it by the appropriate amount.
// We do this instead of, say, simply adjusting the pointer from the result of a GEP
......@@ -628,7 +624,7 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
let offset = st.offsets[ix].bytes();
let unaligned_offset = C_uint(bcx.ccx(), offset);
let unaligned_offset = C_uint(bcx.ccx, offset);
// Get the alignment of the field
let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
......@@ -639,19 +635,18 @@ fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
// (unaligned offset + (align - 1)) & -align
// Calculate offset
dbloc.apply(bcx.fcx());
let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64));
let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64));
let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
bcx.neg(align));
debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
// Cast and adjust pointer
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx()));
let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
let byte_ptr = bcx.gep(byte_ptr, &[offset]);
// Finally, cast back to the type expected
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
}
......
......@@ -12,7 +12,6 @@
use llvm::{self, ValueRef};
use base;
use build::*;
use common::*;
use type_of;
use type_::Type;
......@@ -25,10 +24,12 @@
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>) {
pub fn trans_inline_asm<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>,
mut inputs: Vec<ValueRef>
) {
let mut ext_constraints = vec![];
let mut output_types = vec![];
......@@ -47,7 +48,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if out.is_indirect {
indirect_outputs.push(val.unwrap());
} else {
output_types.push(type_of::type_of(bcx.ccx(), ty));
output_types.push(type_of::type_of(bcx.ccx, ty));
}
}
if !indirect_outputs.is_empty() {
......@@ -78,9 +79,9 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => Type::void(bcx.ccx()),
0 => Type::void(bcx.ccx),
1 => output_types[0],
_ => Type::struct_(bcx.ccx(), &output_types[..], false)
_ => Type::struct_(bcx.ccx, &output_types[..], false)
};
let dialect = match ia.dialect {
......@@ -90,32 +91,33 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
let constraint_cstr = CString::new(all_constraints).unwrap();
let r = InlineAsmCall(bcx,
asm.as_ptr(),
constraint_cstr.as_ptr(),
&inputs,
output_type,
ia.volatile,
ia.alignstack,
dialect);
let r = bcx.inline_asm_call(
asm.as_ptr(),
constraint_cstr.as_ptr(),
&inputs,
output_type,
ia.volatile,
ia.alignstack,
dialect
);
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
Store(bcx, v, val);
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
bcx.store(v, val);
}
// Store expn_id in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx().llcx(),
let kind = llvm::LLVMGetMDKindIDInContext(bcx.ccx.llcx(),
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: llvm::ValueRef = C_i32(bcx.ccx(), ia.expn_id.into_u32() as i32);
let val: llvm::ValueRef = C_i32(bcx.ccx, ia.expn_id.into_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
llvm::LLVMMDNodeInContext(bcx.ccx.llcx(), &val, 1));
}
}
此差异已折叠。
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm;
use llvm::BasicBlockRef;
use value::{Users, Value};
use std::iter::{Filter, Map};
#[derive(Copy, Clone)]
pub struct BasicBlock(pub BasicBlockRef);
pub type Preds = Map<Filter<Users, fn(&Value) -> bool>, fn(Value) -> BasicBlock>;
/// Wrapper for LLVM BasicBlockRef
impl BasicBlock {
pub fn get(&self) -> BasicBlockRef {
let BasicBlock(v) = *self; v
}
pub fn as_value(self) -> Value {
unsafe {
Value(llvm::LLVMBasicBlockAsValue(self.get()))
}
}
pub fn pred_iter(self) -> Preds {
fn is_a_terminator_inst(user: &Value) -> bool { user.is_a_terminator_inst() }
let is_a_terminator_inst: fn(&Value) -> bool = is_a_terminator_inst;
fn get_parent(user: Value) -> BasicBlock { user.get_parent().unwrap() }
let get_parent: fn(Value) -> BasicBlock = get_parent;
self.as_value().user_iter()
.filter(is_a_terminator_inst)
.map(get_parent)
}
pub fn get_single_predecessor(self) -> Option<BasicBlock> {
let mut iter = self.pred_iter();
match (iter.next(), iter.next()) {
(Some(first), None) => Some(first),
_ => None
}
}
pub fn delete(self) {
unsafe {
llvm::LLVMDeleteBasicBlock(self.0);
}
}
}
此差异已折叠。
......@@ -14,12 +14,10 @@
use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
use base;
use common::*;
use machine::llalign_of_pref;
use type_::Type;
use value::Value;
use util::nodemap::FxHashMap;
use libc::{c_uint, c_char};
use std::borrow::Cow;
......@@ -32,65 +30,40 @@ pub struct Builder<'a, 'tcx: 'a> {
pub ccx: &'a CrateContext<'a, 'tcx>,
}
impl<'a, 'tcx> Drop for Builder<'a, 'tcx> {
fn drop(&mut self) {
unsafe {
llvm::LLVMDisposeBuilder(self.llbuilder);
}
}
}
// This is a really awful way to get a zero-length c-string, but better (and a
// lot more efficient) than doing str::as_c_str("", ...) every time.
pub fn noname() -> *const c_char {
fn noname() -> *const c_char {
static CNULL: c_char = 0;
&CNULL
}
impl<'a, 'tcx> Builder<'a, 'tcx> {
pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
pub fn with_ccx(ccx: &'a CrateContext<'a, 'tcx>) -> Self {
// Create a fresh builder from the crate context.
let llbuilder = unsafe {
llvm::LLVMCreateBuilderInContext(ccx.llcx())
};
Builder {
llbuilder: ccx.raw_builder(),
llbuilder: llbuilder,
ccx: ccx,
}
}
pub fn count_insn(&self, category: &str) {
fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
self.ccx.stats().n_llvm_insns.set(self.ccx
.stats()
.n_llvm_insns
.get() + 1);
self.ccx.stats().n_llvm_insns.set(self.ccx.stats().n_llvm_insns.get() + 1);
}
self.ccx.count_llvm_insn();
if self.ccx.sess().count_llvm_insns() {
base::with_insn_ctxt(|v| {
let mut h = self.ccx.stats().llvm_insns.borrow_mut();
// Build version of path with cycles removed.
// Pass 1: scan table mapping str -> rightmost pos.
let mut mm = FxHashMap();
let len = v.len();
let mut i = 0;
while i < len {
mm.insert(v[i], i);
i += 1;
}
// Pass 2: concat strings for each elt, skipping
// forwards over any cycles by advancing to rightmost
// occurrence of each element in path.
let mut s = String::from(".");
i = 0;
while i < len {
i = mm[v[i]];
s.push('/');
s.push_str(v[i]);
i += 1;
}
s.push('/');
s.push_str(category);
let n = match h.get(&s) {
Some(&n) => n,
_ => 0
};
h.insert(s, n+1);
})
let mut h = self.ccx.stats().llvm_insns.borrow_mut();
*h.entry(category.to_string()).or_insert(0) += 1;
}
}
......@@ -462,7 +435,7 @@ pub fn not(&self, v: ValueRef) -> ValueRef {
}
}
pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
self.count_insn("alloca");
unsafe {
if name.is_empty() {
......@@ -1103,6 +1076,20 @@ pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) {
}
}
pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
unsafe {
if llvm::LLVMIsUndef(s) == llvm::True { return; }
llvm::LLVMAddCase(s, on_val, dest)
}
}
pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
unsafe {
if llvm::LLVMIsUndef(phi) == llvm::True { return; }
llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
}
}
/// Returns the ptr value that should be used for storing `val`.
fn check_store<'b>(&self,
val: ValueRef,
......
......@@ -8,8 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_upper_case_globals)]
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
use abi::{self, align_up_to, FnType, ArgType};
use context::CrateContext;
......
......@@ -16,7 +16,6 @@
pub use self::CalleeData::*;
use arena::TypedArena;
use llvm::{self, ValueRef, get_params};
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
......@@ -25,10 +24,10 @@
use attributes;
use base;
use base::*;
use build::*;
use common::{self, Block, Result, CrateContext, FunctionContext, SharedCrateContext};
use common::{
self, CrateContext, FunctionContext, SharedCrateContext
};
use consts;
use debuginfo::DebugLoc;
use declare;
use value::Value;
use meth;
......@@ -71,25 +70,8 @@ pub fn ptr(llfn: ValueRef, ty: Ty<'tcx>) -> Callee<'tcx> {
}
}
/// Trait or impl method call.
pub fn method_call<'blk>(bcx: Block<'blk, 'tcx>,
method_call: ty::MethodCall)
-> Callee<'tcx> {
let method = bcx.tcx().tables().method_map[&method_call];
Callee::method(bcx, method)
}
/// Trait or impl method.
pub fn method<'blk>(bcx: Block<'blk, 'tcx>,
method: ty::MethodCallee<'tcx>) -> Callee<'tcx> {
let substs = bcx.fcx.monomorphize(&method.substs);
Callee::def(bcx.ccx(), method.def_id, substs)
}
/// Function or method definition.
pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>)
pub fn def<'a>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>)
-> Callee<'tcx> {
let tcx = ccx.tcx();
......@@ -196,25 +178,6 @@ pub fn direct_fn_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
fn_ty
}
/// This behemoth of a function translates function calls. Unfortunately, in
/// order to generate more efficient LLVM output at -O0, it has quite a complex
/// signature (refactoring this into two functions seems like a good idea).
///
/// In particular, for lang items, it is invoked with a dest of None, and in
/// that case the return value contains the result of the fn. The lang item must
/// not return a structural type or else all heck breaks loose.
///
/// For non-lang items, `dest` is always Some, and hence the result is written
/// into memory somewhere. Nonetheless we return the actual return value of the
/// function.
pub fn call<'a, 'blk>(self, bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
args: &[ValueRef],
dest: Option<ValueRef>)
-> Result<'blk, 'tcx> {
trans_call_inner(bcx, debug_loc, self, args, dest)
}
/// Turn the callee into a function pointer.
pub fn reify<'a>(self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
match self.data {
......@@ -267,8 +230,6 @@ fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
// then adapt the self type
let llfn_closure_kind = ccx.tcx().closure_kind(def_id);
let _icx = push_ctxt("trans_closure_adapter_shim");
debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \
trait_closure_kind={:?}, llfn={:?})",
llfn_closure_kind, trait_closure_kind, Value(llfn));
......@@ -367,23 +328,28 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty);
attributes::set_frame_pointer_elimination(ccx, lloncefn);
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false);
let orig_fn_ty = fn_ty;
let fcx = FunctionContext::new(ccx, lloncefn);
let mut bcx = fcx.get_entry_block();
let callee = Callee {
data: Fn(llreffn),
ty: llref_fn_ty
};
// the first argument (`self`) will be the (by value) closure env.
let mut llargs = get_params(fcx.llfn);
let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let env_arg = &fcx.fn_ty.args[0];
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let self_idx = fn_ty.ret.is_indirect() as usize;
let env_arg = &orig_fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
llargs[self_idx]
} else {
let scratch = alloc_ty(bcx, closure_ty, "self");
let scratch = alloc_ty(&bcx, closure_ty, "self");
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, scratch);
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
scratch
};
......@@ -391,33 +357,37 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
// Adjust llargs such that llargs[self_idx..] has the call arguments.
// For zero-sized closures that means sneaking in a new argument.
if env_arg.is_ignore() {
if self_idx > 0 {
self_idx -= 1;
llargs[self_idx] = llenv;
} else {
llargs.insert(0, llenv);
}
llargs.insert(self_idx, llenv);
} else {
llargs[self_idx] = llenv;
}
let dest = fcx.llretslotptr.get();
let callee = Callee {
data: Fn(llreffn),
ty: llref_fn_ty
};
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = fcx.push_custom_cleanup_scope();
fcx.schedule_drop_mem(self_scope, llenv, closure_ty);
bcx = callee.call(bcx, DebugLoc::None, &llargs[self_idx..], dest).bcx;
let self_scope = fcx.schedule_drop_mem(llenv, closure_ty);
let llfn = callee.reify(bcx.ccx);
let llret;
if let Some(landing_pad) = self_scope.landing_pad {
let normal_bcx = bcx.fcx().build_new_block("normal-return");
llret = bcx.invoke(llfn, &llargs[..], normal_bcx.llbb(), landing_pad, None);
bcx = normal_bcx;
} else {
llret = bcx.call(llfn, &llargs[..], None);
}
fn_ty.apply_attrs_callsite(llret);
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
if fn_ret.0.is_never() {
bcx.unreachable();
} else {
self_scope.trans(&bcx);
fcx.finish(bcx, DebugLoc::None);
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
bcx.ret_void();
} else {
bcx.ret(llret);
}
}
ccx.instances().borrow_mut().insert(method_instance, lloncefn);
......@@ -443,7 +413,6 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
bare_fn_ty: Ty<'tcx>)
-> ValueRef
{
let _icx = push_ctxt("trans_fn_pointer_shim");
let tcx = ccx.tcx();
// Normalize the type for better caching.
......@@ -519,32 +488,39 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty);
attributes::set_frame_pointer_elimination(ccx, llfn);
//
let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
let mut bcx = fcx.init(false);
let fcx = FunctionContext::new(ccx, llfn);
let bcx = fcx.get_entry_block();
let llargs = get_params(fcx.llfn);
let mut llargs = get_params(fcx.llfn);
let self_idx = fcx.fn_ty.ret.is_indirect() as usize;
let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize);
let llfnpointer = llfnpointer.unwrap_or_else(|| {
// the first argument (`self`) will be ptr to the fn pointer
if is_by_ref {
Load(bcx, llargs[self_idx])
bcx.load(self_arg)
} else {
llargs[self_idx]
self_arg
}
});
let dest = fcx.llretslotptr.get();
let callee = Callee {
data: Fn(llfnpointer),
ty: bare_fn_ty
};
bcx = callee.call(bcx, DebugLoc::None, &llargs[(self_idx + 1)..], dest).bcx;
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(ccx, &[]);
let llret = bcx.call(llfnpointer, &llargs, None);
fn_ty.apply_attrs_callsite(llret);
fcx.finish(bcx, DebugLoc::None);
if fn_ret.0.is_never() {
bcx.unreachable();
} else {
if fn_ty.ret.is_indirect() || fn_ty.ret.is_ignore() {
bcx.ret_void();
} else {
bcx.ret(llret);
}
}
ccx.fn_pointer_shims().borrow_mut().insert(bare_fn_ty_maybe_ref, llfn);
......@@ -649,87 +625,3 @@ fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
(llfn, fn_ty)
}
// ______________________________________________________________________
// Translating calls
fn trans_call_inner<'a, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
debug_loc: DebugLoc,
callee: Callee<'tcx>,
args: &[ValueRef],
opt_llretslot: Option<ValueRef>)
-> Result<'blk, 'tcx> {
// Introduce a temporary cleanup scope that will contain cleanups
// for the arguments while they are being evaluated. The purpose
// this cleanup is to ensure that, should a panic occur while
// evaluating argument N, the values for arguments 0...N-1 are all
// cleaned up. If no panic occurs, the values are handed off to
// the callee, and hence none of the cleanups in this temporary
// scope will ever execute.
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(ccx, &[]);
let mut callee = match callee.data {
NamedTupleConstructor(_) | Intrinsic => {
bug!("{:?} calls should not go through Callee::call", callee);
}
f => f
};
// If there no destination, return must be direct, with no cast.
if opt_llretslot.is_none() {
assert!(!fn_ty.ret.is_indirect() && fn_ty.ret.cast.is_none());
}
let mut llargs = Vec::new();
if fn_ty.ret.is_indirect() {
let mut llretslot = opt_llretslot.unwrap();
if let Some(ty) = fn_ty.ret.cast {
llretslot = PointerCast(bcx, llretslot, ty.ptr_to());
}
llargs.push(llretslot);
}
match callee {
Virtual(idx) => {
llargs.push(args[0]);
let fn_ptr = meth::get_virtual_method(bcx, args[1], idx);
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
callee = Fn(PointerCast(bcx, fn_ptr, llty));
llargs.extend_from_slice(&args[2..]);
}
_ => llargs.extend_from_slice(args)
}
let llfn = match callee {
Fn(f) => f,
_ => bug!("expected fn pointer callee, found {:?}", callee)
};
let (llret, bcx) = base::invoke(bcx, llfn, &llargs, debug_loc);
if !bcx.unreachable.get() {
fn_ty.apply_attrs_callsite(llret);
// If the function we just called does not use an outpointer,
// store the result into the rust outpointer. Cast the outpointer
// type to match because some ABIs will use a different type than
// the Rust type. e.g., a {u32,u32} struct could be returned as
// u64.
if !fn_ty.ret.is_indirect() {
if let Some(llretslot) = opt_llretslot {
fn_ty.ret.store(&bcx.build(), llret, llretslot);
}
}
}
if fn_ret.0.is_never() {
Unreachable(bcx);
}
Result::new(bcx, llret)
}
此差异已折叠。
......@@ -208,7 +208,7 @@
use syntax_pos::DUMMY_SP;
use base::custom_coerce_unsize_info;
use context::SharedCrateContext;
use common::{fulfill_obligation, type_is_sized};
use common::fulfill_obligation;
use glue::{self, DropGlueKind};
use monomorphize::{self, Instance};
use util::nodemap::{FxHashSet, FxHashMap, DefIdMap};
......@@ -337,7 +337,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(scx: &SharedCrateContext<'a, 'tcx>,
TransItem::Static(node_id) => {
let def_id = scx.tcx().map.local_def_id(node_id);
let ty = scx.tcx().item_type(def_id);
let ty = glue::get_drop_glue_type(scx.tcx(), ty);
let ty = glue::get_drop_glue_type(scx, ty);
neighbors.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
recursion_depth_reset = None;
......@@ -542,7 +542,7 @@ fn visit_lvalue(&mut self,
self.param_substs,
&ty);
assert!(ty.is_normalized_for_trans());
let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
......@@ -678,7 +678,7 @@ fn visit_terminator_kind(&mut self,
let operand_ty = monomorphize::apply_param_substs(self.scx,
self.param_substs,
&mt.ty);
let ty = glue::get_drop_glue_type(tcx, operand_ty);
let ty = glue::get_drop_glue_type(self.scx, operand_ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
} else {
bug!("Has the drop_in_place() intrinsic's signature changed?")
......@@ -804,17 +804,17 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
let field_type = monomorphize::apply_param_substs(scx,
substs,
&field_type);
let field_type = glue::get_drop_glue_type(scx.tcx(), field_type);
let field_type = glue::get_drop_glue_type(scx, field_type);
if glue::type_needs_drop(scx.tcx(), field_type) {
if scx.type_needs_drop(field_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(field_type)));
}
}
}
ty::TyClosure(def_id, substs) => {
for upvar_ty in substs.upvar_tys(def_id, scx.tcx()) {
let upvar_ty = glue::get_drop_glue_type(scx.tcx(), upvar_ty);
if glue::type_needs_drop(scx.tcx(), upvar_ty) {
let upvar_ty = glue::get_drop_glue_type(scx, upvar_ty);
if scx.type_needs_drop(upvar_ty) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(upvar_ty)));
}
}
......@@ -822,15 +822,15 @@ fn find_drop_glue_neighbors<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
ty::TyBox(inner_type) |
ty::TySlice(inner_type) |
ty::TyArray(inner_type, _) => {
let inner_type = glue::get_drop_glue_type(scx.tcx(), inner_type);
if glue::type_needs_drop(scx.tcx(), inner_type) {
let inner_type = glue::get_drop_glue_type(scx, inner_type);
if scx.type_needs_drop(inner_type) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(inner_type)));
}
}
ty::TyTuple(args) => {
for arg in args {
let arg = glue::get_drop_glue_type(scx.tcx(), arg);
if glue::type_needs_drop(scx.tcx(), arg) {
let arg = glue::get_drop_glue_type(scx, arg);
if scx.type_needs_drop(arg) {
output.push(TransItem::DropGlue(DropGlueKind::Ty(arg)));
}
}
......@@ -969,7 +969,7 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>,
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
let (inner_source, inner_target) = (a, b);
if !type_is_sized(scx.tcx(), inner_source) {
if !scx.type_is_sized(inner_source) {
(inner_source, inner_target)
} else {
scx.tcx().struct_lockstep_tails(inner_source, inner_target)
......@@ -1051,7 +1051,7 @@ fn create_trans_items_for_vtable_methods<'a, 'tcx>(scx: &SharedCrateContext<'a,
output.extend(methods);
}
// Also add the destructor
let dg_type = glue::get_drop_glue_type(scx.tcx(), impl_ty);
let dg_type = glue::get_drop_glue_type(scx, impl_ty);
output.push(TransItem::DropGlue(DropGlueKind::Ty(dg_type)));
}
}
......@@ -1097,7 +1097,7 @@ fn visit_item(&mut self, item: &'v hir::Item) {
def_id_to_string(self.scx.tcx(), def_id));
let ty = self.scx.tcx().item_type(def_id);
let ty = glue::get_drop_glue_type(self.scx.tcx(), ty);
let ty = glue::get_drop_glue_type(self.scx, ty);
self.output.push(TransItem::DropGlue(DropGlueKind::Ty(ty)));
}
}
......
此差异已折叠。
......@@ -16,7 +16,7 @@
use rustc::hir::def_id::DefId;
use rustc::hir::map as hir_map;
use {debuginfo, machine};
use base::{self, push_ctxt};
use base;
use trans_item::TransItem;
use common::{CrateContext, val_ty};
use declare;
......@@ -221,7 +221,6 @@ pub fn trans_static(ccx: &CrateContext,
attrs: &[ast::Attribute])
-> Result<ValueRef, ConstEvalErr> {
unsafe {
let _icx = push_ctxt("trans_static");
let def_id = ccx.tcx().map.local_def_id(id);
let g = get_static(ccx, def_id);
......
此差异已折叠。
......@@ -44,8 +44,8 @@ pub fn is_valid(&self) -> bool {
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
/// If debuginfo is disabled, the returned vector is empty.
pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec<VisibilityScope, MirDebugScope> {
let mir = fcx.mir();
pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext)
-> IndexVec<VisibilityScope, MirDebugScope> {
let null_scope = MirDebugScope {
scope_metadata: ptr::null_mut(),
file_start_pos: BytePos(0),
......@@ -53,8 +53,8 @@ pub fn create_mir_scopes(fcx: &FunctionContext) -> IndexVec<VisibilityScope, Mir
};
let mut scopes = IndexVec::from_elem(null_scope, &mir.visibility_scopes);
let fn_metadata = match fcx.debug_context {
FunctionDebugContext::RegularContext(box ref data) => data.fn_metadata,
let fn_metadata = match *debug_context {
FunctionDebugContext::RegularContext(ref data) => data.fn_metadata,
FunctionDebugContext::DebugInfoDisabled |
FunctionDebugContext::FunctionWithoutDebugInfo => {
return scopes;
......
......@@ -13,37 +13,26 @@
use llvm;
use common::{C_bytes, CrateContext, C_i32};
use builder::Builder;
use declare;
use type_::Type;
use session::config::NoDebugInfo;
use std::ffi::CString;
use std::ptr;
use syntax::attr;
/// Inserts a side-effect free instruction sequence that makes sure that the
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext) {
pub fn insert_reference_to_gdb_debug_scripts_section_global(ccx: &CrateContext, builder: &Builder) {
if needs_gdb_debug_scripts_section(ccx) {
let empty = CString::new("").unwrap();
let gdb_debug_scripts_section_global =
get_or_insert_gdb_debug_scripts_section_global(ccx);
let gdb_debug_scripts_section_global = get_or_insert_gdb_debug_scripts_section_global(ccx);
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
let element = builder.inbounds_gep(gdb_debug_scripts_section_global, &indices);
let volative_load_instruction = builder.volatile_load(element);
unsafe {
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let indices = [C_i32(ccx, 0), C_i32(ccx, 0)];
let element =
llvm::LLVMBuildInBoundsGEP(ccx.raw_builder(),
gdb_debug_scripts_section_global,
indices.as_ptr(),
indices.len() as ::libc::c_uint,
empty.as_ptr());
let volative_load_instruction =
llvm::LLVMBuildLoad(ccx.raw_builder(),
element,
empty.as_ptr());
llvm::LLVMSetVolatile(volative_load_instruction, llvm::True);
llvm::LLVMSetAlignment(volative_load_instruction, 1);
}
}
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -36,6 +36,7 @@
#![feature(slice_patterns)]
#![feature(staged_api)]
#![feature(unicode)]
#![feature(conservative_impl_trait)]
use rustc::dep_graph::WorkProduct;
......@@ -95,8 +96,6 @@ pub mod back {
mod assert_module_sources;
mod attributes;
mod base;
mod basic_block;
mod build;
mod builder;
mod cabi_aarch64;
mod cabi_arm;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -184,7 +184,7 @@ fn predefine_drop_glue(ccx: &CrateContext<'a, 'tcx>,
linkage: llvm::Linkage,
symbol_name: &str) {
let tcx = ccx.tcx();
assert_eq!(dg.ty(), glue::get_drop_glue_type(tcx, dg.ty()));
assert_eq!(dg.ty(), glue::get_drop_glue_type(ccx.shared(), dg.ty()));
let t = dg.ty();
let sig = tcx.mk_fn_sig(iter::once(tcx.mk_mut_ptr(tcx.types.i8)), tcx.mk_nil(), false);
......
此差异已折叠。
......@@ -8,8 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_camel_case_types)]
use abi::FnType;
use adt;
use common::*;
......@@ -41,7 +39,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
let _recursion_lock = cx.enter_type_of(t);
let llsizingty = match t.sty {
_ if !type_is_sized(cx.tcx(), t) => {
_ if !cx.shared().type_is_sized(t) => {
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false)
}
......@@ -55,7 +53,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
ty::TyBox(ty) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
if type_is_sized(cx.tcx(), ty) {
if cx.shared().type_is_sized(ty) {
Type::i8p(cx)
} else {
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false)
......@@ -104,7 +102,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
// FIXME(eddyb) Temporary sanity check for ty::layout.
let layout = cx.layout_of(t);
if !type_is_sized(cx.tcx(), t) {
if !cx.shared().type_is_sized(t) {
if !layout.is_unsized() {
bug!("layout should be unsized for type `{}` / {:#?}",
t, layout);
......@@ -135,7 +133,7 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) ->
match ty.sty {
ty::TyBox(t) |
ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !type_is_sized(ccx.tcx(), t) => {
ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if !ccx.shared().type_is_sized(t) => {
in_memory_type_of(ccx, t).ptr_to()
}
_ => bug!("expected fat ptr ty but got {:?}", ty)
......@@ -172,7 +170,7 @@ pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
/// is too large for it to be placed in SSA value (by our rules).
/// For the raw type without far pointer indirection, see `in_memory_type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
let ty = if !type_is_sized(cx.tcx(), ty) {
let ty = if !cx.shared().type_is_sized(ty) {
cx.tcx().mk_imm_ptr(ty)
} else {
ty
......@@ -232,7 +230,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
ty::TyBox(ty) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
if !type_is_sized(cx.tcx(), ty) {
if !cx.shared().type_is_sized(ty) {
if let ty::TyStr = ty.sty {
// This means we get a nicer name in the output (str is always
// unsized).
......
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册