diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 79292d390e5d25b71059656be8383835b13a9cfe..2226cee6e3684c57048bdca16844d54c995e2030 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -151,7 +151,7 @@ fn pointer(&mut self) -> *mut T { unsafe fn finalize(b: IntermediateBox) -> Box { let p = b.ptr as *mut T; mem::forget(b); - mem::transmute(p) + Box::from_raw(p) } fn make_place() -> IntermediateBox { @@ -300,7 +300,10 @@ pub unsafe fn from_raw(raw: *mut T) -> Self { issue = "27730")] #[inline] pub unsafe fn from_unique(u: Unique) -> Self { - mem::transmute(u) + #[cfg(stage0)] + return mem::transmute(u); + #[cfg(not(stage0))] + return Box(u); } /// Consumes the `Box`, returning the wrapped raw pointer. @@ -362,7 +365,14 @@ pub fn into_raw(b: Box) -> *mut T { issue = "27730")] #[inline] pub fn into_unique(b: Box) -> Unique { - unsafe { mem::transmute(b) } + #[cfg(stage0)] + return unsafe { mem::transmute(b) }; + #[cfg(not(stage0))] + return { + let unique = b.0; + mem::forget(b); + unique + }; } } @@ -627,7 +637,7 @@ impl Box { pub fn downcast(self) -> Result, Box> { >::downcast(self).map_err(|s| unsafe { // reapply the Send marker - mem::transmute::, Box>(s) + Box::from_raw(Box::into_raw(s) as *mut (Any + Send)) }) } } diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 5e9019c92c5b73be37503504310f8b5eb5c04de6..b59f7480476b8ff1b193f8507502b49da5a1d2ea 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -46,11 +46,13 @@ #![feature(const_fn)] #![feature(core_intrinsics)] #![feature(drain_filter)] +#![feature(i128)] #![feature(i128_type)] -#![feature(match_default_bindings)] +#![feature(inclusive_range)] #![feature(inclusive_range_syntax)] #![cfg_attr(windows, feature(libc))] #![feature(macro_vis_matcher)] +#![feature(match_default_bindings)] #![feature(never_type)] #![feature(nonzero)] #![feature(quote)] diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index 601e0316d4af92cfe00a0dca46551e81d74dbd08..4496e07b13814857461296395116754e3899c8b0 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -34,7 +34,8 @@ use rustc_serialize::{Decoder, Decodable, Encoder, Encodable}; use session::{config, early_error, Session}; use traits::Reveal; -use ty::{self, TyCtxt}; +use ty::{self, TyCtxt, Ty}; +use ty::layout::{LayoutError, LayoutOf, TyLayout}; use util::nodemap::FxHashMap; use std::default::Default as StdDefault; @@ -626,6 +627,14 @@ fn with_param_env(&mut self, id: ast::NodeId, f: F) } } +impl<'a, 'tcx> LayoutOf> for &'a LateContext<'a, 'tcx> { + type TyLayout = Result, LayoutError<'tcx>>; + + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + (self.tcx, self.param_env.reveal_all()).layout_of(ty) + } +} + impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> { /// Because lints are scoped lexically, we want to walk nested /// items in the context of the outer item, so enable diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs index 2c6bcc654a5327cea33496ba5c358250b45aac76..c89d67d4aab8621baf6e98fb1ce93d295d59a14c 100644 --- a/src/librustc/middle/mem_categorization.rs +++ b/src/librustc/middle/mem_categorization.rs @@ -210,7 +210,7 @@ fn resolve_field(&self, field_name: FieldName) -> Option<(&'tcx ty::AdtDef, &'tc adt_def.variant_with_id(variant_did) } _ => { - assert!(adt_def.is_univariant()); + assert_eq!(adt_def.variants.len(), 1); &adt_def.variants[0] } }; @@ -1096,7 +1096,7 @@ pub fn cat_downcast_if_needed(&self, -> cmt<'tcx> { // univariant enums do not need downcasts let base_did = self.tcx.parent_def_id(variant_did).unwrap(); - if !self.tcx.adt_def(base_did).is_univariant() { + if self.tcx.adt_def(base_did).variants.len() != 1 { let base_ty = base_cmt.ty; let ret = Rc::new(cmt_ { id: node.id(), diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 22a3edd200c4fd0e70b5b9265caef334ed73d248..904f9a091252255c2c339cdcd07a146db214eebc 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -41,7 +41,7 @@ use ty::RegionKind; use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; use ty::TypeVariants::*; -use ty::layout::{Layout, TargetDataLayout}; +use ty::layout::{LayoutDetails, TargetDataLayout}; use ty::maps; use ty::steal::Steal; use ty::BindingMode; @@ -78,7 +78,7 @@ /// Internal storage pub struct GlobalArenas<'tcx> { // internings - layout: TypedArena, + layout: TypedArena, // references generics: TypedArena, @@ -918,7 +918,7 @@ pub struct GlobalCtxt<'tcx> { stability_interner: RefCell>, - layout_interner: RefCell>, + layout_interner: RefCell>, /// A vector of every trait accessible in the whole crate /// (i.e. including those from subcrates). This is used only for @@ -1016,7 +1016,7 @@ pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability { interned } - pub fn intern_layout(self, layout: Layout) -> &'gcx Layout { + pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails { if let Some(layout) = self.layout_interner.borrow().get(&layout) { return layout; } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 491fa2a240cce84324ee9b844ed3247d21ec9264..71bf333a8c6122cffd52110620976a3459792572 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -9,7 +9,6 @@ // except according to those terms. pub use self::Integer::*; -pub use self::Layout::*; pub use self::Primitive::*; use session::{self, DataTypeKind, Session}; @@ -21,10 +20,10 @@ use std::cmp; use std::fmt; -use std::i64; +use std::i128; use std::iter; use std::mem; -use std::ops::Deref; +use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -203,6 +202,18 @@ pub fn ptr_sized_integer(&self) -> Integer { bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits) } } + + pub fn vector_align(&self, vec_size: Size) -> Align { + for &(size, align) in &self.vector_align { + if size == vec_size { + return align; + } + } + // Default to natural alignment, which is what LLVM does. + // That is, use the size, rounded up to a power of 2. + let align = vec_size.bytes().next_power_of_two(); + Align::from_bytes(align, align).unwrap() + } } pub trait HasDataLayout: Copy { @@ -215,12 +226,6 @@ fn data_layout(&self) -> &TargetDataLayout { } } -impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> { - fn data_layout(&self) -> &TargetDataLayout { - &self.data_layout - } -} - /// Endianness of the target, which must match cfg(target-endian). #[derive(Copy, Clone)] pub enum Endian { @@ -236,7 +241,8 @@ pub struct Size { impl Size { pub fn from_bits(bits: u64) -> Size { - Size::from_bytes((bits + 7) / 8) + // Avoid potential overflow from `bits + 7`. + Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) } pub fn from_bytes(bytes: u64) -> Size { @@ -261,6 +267,11 @@ pub fn abi_align(self, align: Align) -> Size { Size::from_bytes((self.bytes() + mask) & !mask) } + pub fn is_abi_aligned(self, align: Align) -> bool { + let mask = align.abi() - 1; + self.bytes() & mask == 0 + } + pub fn checked_add(self, offset: Size, cx: C) -> Option { let dl = cx.data_layout(); @@ -278,8 +289,6 @@ pub fn checked_add(self, offset: Size, cx: C) -> Option pub fn checked_mul(self, count: u64, cx: C) -> Option { let dl = cx.data_layout(); - // Each Size is less than dl.obj_size_bound(), so the sum is - // also less than 1 << 62 (and therefore can't overflow). match self.bytes().checked_mul(count) { Some(bytes) if bytes < dl.obj_size_bound() => { Some(Size::from_bytes(bytes)) @@ -289,6 +298,46 @@ pub fn checked_mul(self, count: u64, cx: C) -> Option { } } +// Panicking addition, subtraction and multiplication for convenience. +// Avoid during layout computation, return `LayoutError` instead. + +impl Add for Size { + type Output = Size; + fn add(self, other: Size) -> Size { + // Each Size is less than 1 << 61, so the sum is + // less than 1 << 62 (and therefore can't overflow). + Size::from_bytes(self.bytes() + other.bytes()) + } +} + +impl Sub for Size { + type Output = Size; + fn sub(self, other: Size) -> Size { + // Each Size is less than 1 << 61, so an underflow + // would result in a value larger than 1 << 61, + // which Size::from_bytes will catch for us. + Size::from_bytes(self.bytes() - other.bytes()) + } +} + +impl Mul for Size { + type Output = Size; + fn mul(self, count: u64) -> Size { + match self.bytes().checked_mul(count) { + Some(bytes) => Size::from_bytes(bytes), + None => { + bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count) + } + } + } +} + +impl AddAssign for Size { + fn add_assign(&mut self, other: Size) { + *self = *self + other; + } +} + /// Alignment of a type in bytes, both ABI-mandated and preferred. /// Each field is a power of two, giving the alignment a maximum /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with @@ -301,7 +350,8 @@ pub struct Align { impl Align { pub fn from_bits(abi: u64, pref: u64) -> Result { - Align::from_bytes((abi + 7) / 8, (pref + 7) / 8) + Align::from_bytes(Size::from_bits(abi).bytes(), + Size::from_bits(pref).bytes()) } pub fn from_bytes(abi: u64, pref: u64) -> Result { @@ -340,6 +390,14 @@ pub fn pref(self) -> u64 { 1 << self.pref } + pub fn abi_bits(self) -> u64 { + self.abi() * 8 + } + + pub fn pref_bits(self) -> u64 { + self.pref() * 8 + } + pub fn min(self, other: Align) -> Align { Align { abi: cmp::min(self.abi, other.abi), @@ -358,7 +416,6 @@ pub fn max(self, other: Align) -> Align { /// Integers, also used for enum discriminants. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] pub enum Integer { - I1, I8, I16, I32, @@ -366,10 +423,9 @@ pub enum Integer { I128, } -impl Integer { +impl<'a, 'tcx> Integer { pub fn size(&self) -> Size { match *self { - I1 => Size::from_bits(1), I8 => Size::from_bytes(1), I16 => Size::from_bytes(2), I32 => Size::from_bytes(4), @@ -382,7 +438,6 @@ pub fn align(&self, cx: C) -> Align { let dl = cx.data_layout(); match *self { - I1 => dl.i1_align, I8 => dl.i8_align, I16 => dl.i16_align, I32 => dl.i32_align, @@ -391,16 +446,13 @@ pub fn align(&self, cx: C) -> Align { } } - pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>, - signed: bool) -> Ty<'tcx> { + pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { - (I1, false) => tcx.types.u8, (I8, false) => tcx.types.u8, (I16, false) => tcx.types.u16, (I32, false) => tcx.types.u32, (I64, false) => tcx.types.u64, (I128, false) => tcx.types.u128, - (I1, true) => tcx.types.i8, (I8, true) => tcx.types.i8, (I16, true) => tcx.types.i16, (I32, true) => tcx.types.i32, @@ -410,9 +462,8 @@ pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>, } /// Find the smallest Integer type which can represent the signed value. - pub fn fit_signed(x: i64) -> Integer { + pub fn fit_signed(x: i128) -> Integer { match x { - -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1, -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8, -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16, -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32, @@ -422,9 +473,8 @@ pub fn fit_signed(x: i64) -> Integer { } /// Find the smallest Integer type which can represent the unsigned value. - pub fn fit_unsigned(x: u64) -> Integer { + pub fn fit_unsigned(x: u128) -> Integer { match x { - 0...0x0000_0000_0000_0001 => I1, 0...0x0000_0000_0000_00ff => I8, 0...0x0000_0000_0000_ffff => I16, 0...0x0000_0000_ffff_ffff => I32, @@ -438,8 +488,8 @@ pub fn for_abi_align(cx: C, align: Align) -> Option { let dl = cx.data_layout(); let wanted = align.abi(); - for &candidate in &[I8, I16, I32, I64] { - let ty = Int(candidate); + for &candidate in &[I8, I16, I32, I64, I128] { + let ty = Int(candidate, false); if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { return Some(candidate); } @@ -465,19 +515,19 @@ pub fn from_attr(cx: C, ity: attr::IntType) -> Integer { /// Find the appropriate Integer type and signedness for the given /// signed discriminant range and #[repr] attribute. - /// N.B.: u64 values above i64::MAX will be treated as signed, but + /// N.B.: u128 values above i128::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. - fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - repr: &ReprOptions, - min: i64, - max: i64) - -> (Integer, bool) { + fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + repr: &ReprOptions, + min: i128, + max: i128) + -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there - // are any negative values, the only valid unsigned representation is u64 - // which can fit all i64 values, so the result remains unaffected. - let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64)); + // are any negative values, the only valid unsigned representation is u128 + // which can fit all i128 values, so the result remains unaffected. + let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128)); let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); let mut min_from_extern = None; @@ -518,22 +568,27 @@ fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, /// Fundamental unit of memory access and layout. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum Primitive { - Int(Integer), + /// The `bool` is the signedness of the `Integer` type. + /// + /// One would think we would not care about such details this low down, + /// but some ABIs are described in terms of C types and ISAs where the + /// integer arithmetic is done on {sign,zero}-extended registers, e.g. + /// a negative integer passed by zero-extension will appear positive in + /// the callee, and most operations on it will produce the wrong values. + Int(Integer, bool), F32, F64, Pointer } -impl Primitive { +impl<'a, 'tcx> Primitive { pub fn size(self, cx: C) -> Size { let dl = cx.data_layout(); match self { - Int(I1) | Int(I8) => Size::from_bits(8), - Int(I16) => Size::from_bits(16), - Int(I32) | F32 => Size::from_bits(32), - Int(I64) | F64 => Size::from_bits(64), - Int(I128) => Size::from_bits(128), + Int(i, _) => i.size(), + F32 => Size::from_bits(32), + F64 => Size::from_bits(64), Pointer => dl.pointer_size } } @@ -542,567 +597,228 @@ pub fn align(self, cx: C) -> Align { let dl = cx.data_layout(); match self { - Int(I1) => dl.i1_align, - Int(I8) => dl.i8_align, - Int(I16) => dl.i16_align, - Int(I32) => dl.i32_align, - Int(I64) => dl.i64_align, - Int(I128) => dl.i128_align, + Int(i, _) => i.align(dl), F32 => dl.f32_align, F64 => dl.f64_align, Pointer => dl.pointer_align } } -} - -/// Path through fields of nested structures. -// FIXME(eddyb) use small vector optimization for the common case. -pub type FieldPath = Vec; - -/// A structure, a product type in ADT terms. -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct Struct { - /// Maximum alignment of fields and repr alignment. - pub align: Align, - /// Primitive alignment of fields without repr alignment. - pub primitive_align: Align, - - /// If true, no alignment padding is used. - pub packed: bool, - - /// If true, the size is exact, otherwise it's only a lower bound. - pub sized: bool, - - /// Offsets for the first byte of each field, ordered to match the source definition order. - /// This vector does not go in increasing order. - /// FIXME(eddyb) use small vector optimization for the common case. - pub offsets: Vec, - - /// Maps source order field indices to memory order indices, depending how fields were permuted. - /// FIXME (camlorn) also consider small vector optimization here. - pub memory_index: Vec, - - pub min_size: Size, + pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> { + match *self { + Int(i, signed) => i.to_ty(tcx, signed), + F32 => tcx.types.f32, + F64 => tcx.types.f64, + Pointer => tcx.mk_mut_ptr(tcx.mk_nil()), + } + } } -/// Info required to optimize struct layout. -#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)] -enum StructKind { - /// A tuple, closure, or univariant which cannot be coerced to unsized. - AlwaysSizedUnivariant, - /// A univariant, the last field of which may be coerced to unsized. - MaybeUnsizedUnivariant, - /// A univariant, but part of an enum. - EnumVariant, +/// Information about one scalar component of a Rust type. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct Scalar { + pub value: Primitive, + + /// Inclusive wrap-around range of valid values, that is, if + /// min > max, it represents min..=u128::MAX followed by 0..=max. + // FIXME(eddyb) always use the shortest range, e.g. by finding + // the largest space between two consecutive valid values and + // taking everything else as the (shortest) valid range. + pub valid_range: RangeInclusive, } -impl<'a, 'tcx> Struct { - fn new(dl: &TargetDataLayout, - fields: &Vec<&'a Layout>, - repr: &ReprOptions, - kind: StructKind, - scapegoat: Ty<'tcx>) - -> Result> { - if repr.packed() && repr.align > 0 { - bug!("Struct cannot be packed and aligned"); - } - - let align = if repr.packed() { - dl.i8_align +impl Scalar { + pub fn is_bool(&self) -> bool { + if let Int(I8, _) = self.value { + self.valid_range == (0..=1) } else { - dl.aggregate_align - }; - - let mut ret = Struct { - align, - primitive_align: align, - packed: repr.packed(), - sized: true, - offsets: vec![], - memory_index: vec![], - min_size: Size::from_bytes(0), - }; - - // Anything with repr(C) or repr(packed) doesn't optimize. - // Neither do 1-member and 2-member structs. - // In addition, code in trans assume that 2-element structs can become pairs. - // It's easier to just short-circuit here. - let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind) - && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty(); - - let (optimize, sort_ascending) = match kind { - StructKind::AlwaysSizedUnivariant => (can_optimize, false), - StructKind::MaybeUnsizedUnivariant => (can_optimize, false), - StructKind::EnumVariant => { - assert!(fields.len() >= 1, "Enum variants must have discriminants."); - (can_optimize && fields[0].size(dl).bytes() == 1, true) - } - }; - - ret.offsets = vec![Size::from_bytes(0); fields.len()]; - let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); - - if optimize { - let start = if let StructKind::EnumVariant = kind { 1 } else { 0 }; - let end = if let StructKind::MaybeUnsizedUnivariant = kind { - fields.len() - 1 - } else { - fields.len() - }; - if end > start { - let optimizing = &mut inverse_memory_index[start..end]; - if sort_ascending { - optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi()); - } else { - optimizing.sort_by(| &a, &b | { - let a = fields[a as usize].align(dl).abi(); - let b = fields[b as usize].align(dl).abi(); - b.cmp(&a) - }); - } - } + false } + } +} - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we use inverse_memory_index to produce memory_index. +/// The first half of a fat pointer. +/// - For a trait object, this is the address of the box. +/// - For a slice, this is the base address. +pub const FAT_PTR_ADDR: usize = 0; - if let StructKind::EnumVariant = kind { - assert_eq!(inverse_memory_index[0], 0, - "Enum variant discriminants must have the lowest offset."); - } +/// The second half of a fat pointer. +/// - For a trait object, this is the address of the vtable. +/// - For a slice, this is the length. +pub const FAT_PTR_EXTRA: usize = 1; - let mut offset = Size::from_bytes(0); +/// Describes how the fields of a type are located in memory. +#[derive(PartialEq, Eq, Hash, Debug)] +pub enum FieldPlacement { + /// All fields start at no offset. The `usize` is the field count. + Union(usize), - for i in inverse_memory_index.iter() { - let field = fields[*i as usize]; - if !ret.sized { - bug!("Struct::new: field #{} of `{}` comes after unsized field", - ret.offsets.len(), scapegoat); - } + /// Array/vector-like placement, with all fields of identical types. + Array { + stride: Size, + count: u64 + }, - if field.is_unsized() { - ret.sized = false; - } + /// Struct-like placement, with precomputed offsets. + /// + /// Fields are guaranteed to not overlap, but note that gaps + /// before, between and after all the fields are NOT always + /// padding, and as such their contents may not be discarded. + /// For example, enum variants leave a gap at the start, + /// where the discriminant field in the enum layout goes. + Arbitrary { + /// Offsets for the first byte of each field, + /// ordered to match the source definition order. + /// This vector does not go in increasing order. + // FIXME(eddyb) use small vector optimization for the common case. + offsets: Vec, + + /// Maps source order field indices to memory order indices, + /// depending how fields were permuted. + // FIXME(camlorn) also consider small vector optimization here. + memory_index: Vec + } +} - // Invariant: offset < dl.obj_size_bound() <= 1<<61 - if !ret.packed { - let align = field.align(dl); - let primitive_align = field.primitive_align(dl); - ret.align = ret.align.max(align); - ret.primitive_align = ret.primitive_align.max(primitive_align); - offset = offset.abi_align(align); +impl FieldPlacement { + pub fn count(&self) -> usize { + match *self { + FieldPlacement::Union(count) => count, + FieldPlacement::Array { count, .. } => { + let usize_count = count as usize; + assert_eq!(usize_count as u64, count); + usize_count } - - debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl)); - ret.offsets[*i as usize] = offset; - - offset = offset.checked_add(field.size(dl), dl) - .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?; + FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len() } + } - if repr.align > 0 { - let repr_align = repr.align as u64; - ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap()); - debug!("Struct::new repr_align: {:?}", repr_align); - } - - debug!("Struct::new min_size: {:?}", offset); - ret.min_size = offset; - - // As stated above, inverse_memory_index holds field indices by increasing offset. - // This makes it an already-sorted view of the offsets vec. - // To invert it, consider: - // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. - // Field 5 would be the first element, so memory_index is i: - // Note: if we didn't optimize, it's already right. - - if optimize { - ret.memory_index = vec![0; inverse_memory_index.len()]; - - for i in 0..inverse_memory_index.len() { - ret.memory_index[inverse_memory_index[i] as usize] = i as u32; + pub fn offset(&self, i: usize) -> Size { + match *self { + FieldPlacement::Union(_) => Size::from_bytes(0), + FieldPlacement::Array { stride, count } => { + let i = i as u64; + assert!(i < count); + stride * i } - } else { - ret.memory_index = inverse_memory_index; + FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i] } - - Ok(ret) - } - - /// Get the size with trailing alignment padding. - pub fn stride(&self) -> Size { - self.min_size.abi_align(self.align) } - /// Determine whether a structure would be zero-sized, given its fields. - fn would_be_zero_sized(dl: &TargetDataLayout, fields: I) - -> Result> - where I: Iterator>> { - for field in fields { - let field = field?; - if field.is_unsized() || field.size(dl).bytes() > 0 { - return Ok(false); + pub fn memory_index(&self, i: usize) -> usize { + match *self { + FieldPlacement::Union(_) | + FieldPlacement::Array { .. } => i, + FieldPlacement::Arbitrary { ref memory_index, .. } => { + let r = memory_index[i]; + assert_eq!(r as usize as u32, r); + r as usize } } - Ok(true) } - /// Get indices of the tys that made this struct by increasing offset. + /// Get source indices of the fields by increasing offsets. #[inline] - pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator+'b { + pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator+'a { let mut inverse_small = [0u8; 64]; let mut inverse_big = vec![]; - let use_small = self.memory_index.len() <= inverse_small.len(); + let use_small = self.count() <= inverse_small.len(); // We have to write this logic twice in order to keep the array small. - if use_small { - for i in 0..self.memory_index.len() { - inverse_small[self.memory_index[i] as usize] = i as u8; - } - } else { - inverse_big = vec![0; self.memory_index.len()]; - for i in 0..self.memory_index.len() { - inverse_big[self.memory_index[i] as usize] = i as u32; - } - } - - (0..self.memory_index.len()).map(move |i| { - if use_small { inverse_small[i] as usize } - else { inverse_big[i] as usize } - }) - } - - /// Find the path leading to a non-zero leaf field, starting from - /// the given type and recursing through aggregates. - /// The tuple is `(path, source_path)`, - /// where `path` is in memory order and `source_path` in source order. - // FIXME(eddyb) track value ranges and traverse already optimized enums. - fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>) - -> Result, LayoutError<'tcx>> { - match (ty.layout(tcx, param_env)?, &ty.sty) { - (&Scalar { non_zero: true, .. }, _) | - (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))), - (&FatPointer { non_zero: true, .. }, _) => { - Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32]))) - } - - // Is this the NonZero lang item wrapping a pointer or integer type? - (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => { - let fields = &def.struct_variant().fields; - assert_eq!(fields.len(), 1); - match *fields[0].ty(tcx, substs).layout(tcx, param_env)? { - // FIXME(eddyb) also allow floating-point types here. - Scalar { value: Int(_), non_zero: false } | - Scalar { value: Pointer, non_zero: false } => { - Ok(Some((vec![0], vec![0]))) - } - FatPointer { non_zero: false, .. } => { - let tmp = vec![FAT_PTR_ADDR as u32, 0]; - Ok(Some((tmp.clone(), tmp))) - } - _ => Ok(None) - } - } - - // Perhaps one of the fields of this struct is non-zero - // let's recurse and find out - (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => { - Struct::non_zero_field_paths( - tcx, - param_env, - def.struct_variant().fields.iter().map(|field| { - field.ty(tcx, substs) - }), - Some(&variant.memory_index[..])) - } - - // Perhaps one of the upvars of this closure is non-zero - (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => { - let upvar_tys = substs.upvar_tys(def, tcx); - Struct::non_zero_field_paths( - tcx, - param_env, - upvar_tys, - Some(&variant.memory_index[..])) - } - // Can we use one of the fields in this tuple? - (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => { - Struct::non_zero_field_paths( - tcx, - param_env, - tys.iter().cloned(), - Some(&variant.memory_index[..])) - } - - // Is this a fixed-size array of something non-zero - // with at least one element? - (_, &ty::TyArray(ety, mut count)) => { - if count.has_projections() { - count = tcx.normalize_associated_type_in_env(&count, param_env); - if count.has_projections() { - return Err(LayoutError::Unknown(ty)); - } + if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self { + if use_small { + for i in 0..self.count() { + inverse_small[memory_index[i] as usize] = i as u8; } - if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 { - Struct::non_zero_field_paths( - tcx, - param_env, - Some(ety).into_iter(), - None) - } else { - Ok(None) - } - } - - (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => { - let normalized = tcx.normalize_associated_type_in_env(&ty, param_env); - if ty == normalized { - return Ok(None); + } else { + inverse_big = vec![0; self.count()]; + for i in 0..self.count() { + inverse_big[memory_index[i] as usize] = i as u32; } - return Struct::non_zero_field_in_type(tcx, param_env, normalized); } - - // Anything else is not a non-zero type. - _ => Ok(None) } - } - /// Find the path leading to a non-zero leaf field, starting from - /// the given set of fields and recursing through aggregates. - /// Returns Some((path, source_path)) on success. - /// `path` is translated to memory order. `source_path` is not. - fn non_zero_field_paths(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - fields: I, - permutation: Option<&[u32]>) - -> Result, LayoutError<'tcx>> - where I: Iterator> { - for (i, ty) in fields.enumerate() { - let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?; - if let Some((mut path, mut source_path)) = r { - source_path.push(i as u32); - let index = if let Some(p) = permutation { - p[i] as usize - } else { - i - }; - path.push(index as u32); - return Ok(Some((path, source_path))); + (0..self.count()).map(move |i| { + match *self { + FieldPlacement::Union(_) | + FieldPlacement::Array { .. } => i, + FieldPlacement::Arbitrary { .. } => { + if use_small { inverse_small[i] as usize } + else { inverse_big[i] as usize } + } } - } - Ok(None) - } - - pub fn over_align(&self) -> Option { - let align = self.align.abi(); - let primitive_align = self.primitive_align.abi(); - if align > primitive_align { - Some(align as u32) - } else { - None - } + }) } } -/// An untagged union. -#[derive(PartialEq, Eq, Hash, Debug)] -pub struct Union { - pub align: Align, - pub primitive_align: Align, - - pub min_size: Size, - - /// If true, no alignment padding is used. - pub packed: bool, -} - -impl<'a, 'tcx> Union { - fn new(dl: &TargetDataLayout, repr: &ReprOptions) -> Union { - if repr.packed() && repr.align > 0 { - bug!("Union cannot be packed and aligned"); - } - - let primitive_align = if repr.packed() { - dl.i8_align - } else { - dl.aggregate_align - }; - - let align = if repr.align > 0 { - let repr_align = repr.align as u64; - debug!("Union::new repr_align: {:?}", repr_align); - primitive_align.max(Align::from_bytes(repr_align, repr_align).unwrap()) - } else { - primitive_align - }; - - Union { - align, - primitive_align, - min_size: Size::from_bytes(0), - packed: repr.packed(), - } +/// Describes how values of the type are passed by target ABIs, +/// in terms of categories of C types there are ABI rules for. +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub enum Abi { + Uninhabited, + Scalar(Scalar), + ScalarPair(Scalar, Scalar), + Vector, + Aggregate { + /// If true, the size is exact, otherwise it's only a lower bound. + sized: bool, + packed: bool } +} - /// Extend the Struct with more fields. - fn extend(&mut self, dl: &TargetDataLayout, - fields: I, - scapegoat: Ty<'tcx>) - -> Result<(), LayoutError<'tcx>> - where I: Iterator>> { - for (index, field) in fields.enumerate() { - let field = field?; - if field.is_unsized() { - bug!("Union::extend: field #{} of `{}` is unsized", - index, scapegoat); - } - - debug!("Union::extend field: {:?} {:?}", field, field.size(dl)); - - if !self.packed { - self.align = self.align.max(field.align(dl)); - self.primitive_align = self.primitive_align.max(field.primitive_align(dl)); - } - self.min_size = cmp::max(self.min_size, field.size(dl)); +impl Abi { + /// Returns true if the layout corresponds to an unsized type. + pub fn is_unsized(&self) -> bool { + match *self { + Abi::Uninhabited | + Abi::Scalar(_) | + Abi::ScalarPair(..) | + Abi::Vector => false, + Abi::Aggregate { sized, .. } => !sized } - - debug!("Union::extend min-size: {:?}", self.min_size); - - Ok(()) } - /// Get the size with trailing alignment padding. - pub fn stride(&self) -> Size { - self.min_size.abi_align(self.align) - } - - pub fn over_align(&self) -> Option { - let align = self.align.abi(); - let primitive_align = self.primitive_align.abi(); - if align > primitive_align { - Some(align as u32) - } else { - None + /// Returns true if the fields of the layout are packed. + pub fn is_packed(&self) -> bool { + match *self { + Abi::Uninhabited | + Abi::Scalar(_) | + Abi::ScalarPair(..) | + Abi::Vector => false, + Abi::Aggregate { packed, .. } => packed } } } -/// The first half of a fat pointer. -/// - For a trait object, this is the address of the box. -/// - For a slice, this is the base address. -pub const FAT_PTR_ADDR: usize = 0; - -/// The second half of a fat pointer. -/// - For a trait object, this is the address of the vtable. -/// - For a slice, this is the length. -pub const FAT_PTR_EXTRA: usize = 1; - -/// Type layout, from which size and alignment can be cheaply computed. -/// For ADTs, it also includes field placement and enum optimizations. -/// NOTE: Because Layout is interned, redundant information should be -/// kept to a minimum, e.g. it includes no sub-component Ty or Layout. -#[derive(Debug, PartialEq, Eq, Hash)] -pub enum Layout { - /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr. - Scalar { - value: Primitive, - // If true, the value cannot represent a bit pattern of all zeroes. - non_zero: bool - }, - - /// SIMD vectors, from structs marked with #[repr(simd)]. - Vector { - element: Primitive, - count: u64 - }, - - /// TyArray, TySlice or TyStr. - Array { - /// If true, the size is exact, otherwise it's only a lower bound. - sized: bool, - align: Align, - primitive_align: Align, - element_size: Size, - count: u64 - }, - - /// TyRawPtr or TyRef with a !Sized pointee. - FatPointer { - metadata: Primitive, - /// If true, the pointer cannot be null. - non_zero: bool - }, - - // Remaining variants are all ADTs such as structs, enums or tuples. - - /// C-like enums; basically an integer. - CEnum { - discr: Integer, - signed: bool, - non_zero: bool, - /// Inclusive discriminant range. - /// If min > max, it represents min...u64::MAX followed by 0...max. - // FIXME(eddyb) always use the shortest range, e.g. by finding - // the largest space between two consecutive discriminants and - // taking everything else as the (shortest) discriminant range. - min: u64, - max: u64 - }, - - /// Single-case enums, and structs/tuples. - Univariant { - variant: Struct, - /// If true, the structure is NonZero. - // FIXME(eddyb) use a newtype Layout kind for this. - non_zero: bool - }, - - /// Untagged unions. - UntaggedUnion { - variants: Union, +#[derive(PartialEq, Eq, Hash, Debug)] +pub enum Variants { + /// Single enum variants, structs/tuples, unions, and all non-ADTs. + Single { + index: usize }, - /// General-case enums: for each case there is a struct, and they - /// all start with a field for the discriminant. - General { - discr: Integer, - variants: Vec, - size: Size, - align: Align, - primitive_align: Align, + /// General-case enums: for each case there is a struct, and they all have + /// all space reserved for the discriminant, and their first field starts + /// at a non-0 offset, after where the discriminant would go. + Tagged { + discr: Scalar, + variants: Vec, }, - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` must have single field which is known to be nonnull due to its type. - /// The other case is known to be zero sized. Hence we represent the enum - /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant, - /// otherwise it indicates the other case. + /// Multiple cases distinguished by a niche (values invalid for a type): + /// the variant `dataful_variant` contains a niche at an arbitrary + /// offset (field 0 of the enum), which for a variant with discriminant + /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`. /// - /// For example, `std::option::Option` instantiated at a safe pointer type - /// is represented such that `None` is a null pointer and `Some` is the - /// identity function. - RawNullablePointer { - nndiscr: u64, - value: Primitive - }, - - /// Two cases distinguished by a nullable pointer: the case with discriminant - /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th - /// field is known to be nonnull due to its type; if that field is null, then - /// it represents the other case, which is known to be zero sized. - StructWrappedNullablePointer { - nndiscr: u64, - nonnull: Struct, - /// N.B. There is a 0 at the start, for LLVM GEP through a pointer. - discrfield: FieldPath, - /// Like discrfield, but in source order. For debuginfo. - discrfield_source: FieldPath + /// For example, `Option<(usize, &T)>` is represented such that + /// `None` has a null pointer for the second tuple field, and + /// `Some` is the identity function (with a non-null reference). + NicheFilling { + dataful_variant: usize, + niche_variants: RangeInclusive, + niche: Scalar, + niche_start: u128, + variants: Vec, } } @@ -1125,72 +841,387 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { } } -impl<'a, 'tcx> Layout { - pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>) - -> Result<&'tcx Layout, LayoutError<'tcx>> { - let success = |layout| Ok(tcx.intern_layout(layout)); - let dl = &tcx.data_layout; - assert!(!ty.has_infer_types()); +#[derive(PartialEq, Eq, Hash, Debug)] +pub struct LayoutDetails { + pub variants: Variants, + pub fields: FieldPlacement, + pub abi: Abi, + pub align: Align, + pub size: Size +} - let ptr_layout = |pointee: Ty<'tcx>| { - let non_zero = !ty.is_unsafe_ptr(); - let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); - if pointee.is_sized(tcx, param_env, DUMMY_SP) { - Ok(Scalar { value: Pointer, non_zero: non_zero }) - } else { - let unsized_part = tcx.struct_tail(pointee); - match unsized_part.sty { - ty::TySlice(_) | ty::TyStr => Ok(FatPointer { - metadata: Int(dl.ptr_sized_integer()), - non_zero: non_zero - }), - ty::TyDynamic(..) => Ok(FatPointer { metadata: Pointer, non_zero: non_zero }), - ty::TyForeign(..) => Ok(Scalar { value: Pointer, non_zero: non_zero }), - _ => Err(LayoutError::Unknown(unsized_part)), - } +impl LayoutDetails { + fn scalar(cx: C, scalar: Scalar) -> Self { + let size = scalar.value.size(cx); + let align = scalar.value.align(cx); + LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Union(0), + abi: Abi::Scalar(scalar), + size, + align, + } + } + + fn uninhabited(field_count: usize) -> Self { + let align = Align::from_bytes(1, 1).unwrap(); + LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Union(field_count), + abi: Abi::Uninhabited, + align, + size: Size::from_bytes(0) + } + } +} + +fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) + -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> +{ + let (param_env, ty) = query.into_parts(); + + let rec_limit = tcx.sess.recursion_limit.get(); + let depth = tcx.layout_depth.get(); + if depth > rec_limit { + tcx.sess.fatal( + &format!("overflow representing the type `{}`", ty)); + } + + tcx.layout_depth.set(depth+1); + let layout = LayoutDetails::compute_uncached(tcx, param_env, ty); + tcx.layout_depth.set(depth); + + layout +} + +pub fn provide(providers: &mut ty::maps::Providers) { + *providers = ty::maps::Providers { + layout_raw, + ..*providers + }; +} + +impl<'a, 'tcx> LayoutDetails { + fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>) + -> Result<&'tcx Self, LayoutError<'tcx>> { + let cx = (tcx, param_env); + let dl = cx.data_layout(); + let scalar_unit = |value: Primitive| { + let bits = value.size(dl).bits(); + assert!(bits <= 128); + Scalar { + value, + valid_range: 0..=(!0 >> (128 - bits)) } }; - - let layout = match ty.sty { - // Basic scalars. - ty::TyBool => Scalar { value: Int(I1), non_zero: false }, - ty::TyChar => Scalar { value: Int(I32), non_zero: false }, - ty::TyInt(ity) => { - Scalar { - value: Int(Integer::from_attr(dl, attr::SignedInt(ity))), - non_zero: false - } + let scalar = |value: Primitive| { + tcx.intern_layout(LayoutDetails::scalar(cx, scalar_unit(value))) + }; + let scalar_pair = |a: Scalar, b: Scalar| { + let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); + let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); + let size = (b_offset + b.value.size(dl)).abi_align(align); + LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Arbitrary { + offsets: vec![Size::from_bytes(0), b_offset], + memory_index: vec![0, 1] + }, + abi: Abi::ScalarPair(a, b), + align, + size } - ty::TyUint(ity) => { - Scalar { - value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))), - non_zero: false - } + }; + + #[derive(Copy, Clone, Debug)] + enum StructKind { + /// A tuple, closure, or univariant which cannot be coerced to unsized. + AlwaysSized, + /// A univariant, the last field of which may be coerced to unsized. + MaybeUnsized, + /// A univariant, but part of an enum. + EnumVariant(Integer), + } + let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| { + let packed = repr.packed(); + if packed && repr.align > 0 { + bug!("struct cannot be packed and aligned"); } - ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false }, - ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false }, - ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true }, - // The never type. - ty::TyNever => Univariant { - variant: Struct::new(dl, &vec![], &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?, - non_zero: false - }, + let mut align = if packed { + dl.i8_align + } else { + dl.aggregate_align + }; - // Potentially-fat pointers. - ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_layout(pointee)? - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_layout(ty.boxed_ty())? - } + let mut sized = true; + let mut offsets = vec![Size::from_bytes(0); fields.len()]; + let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); - // Arrays and slices. - ty::TyArray(element, mut count) => { + // Anything with repr(C) or repr(packed) doesn't optimize. + let optimize = match kind { + StructKind::AlwaysSized | + StructKind::MaybeUnsized | + StructKind::EnumVariant(I8) => { + (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty() + } + StructKind::EnumVariant(_) => false + }; + if optimize { + let end = if let StructKind::MaybeUnsized = kind { + fields.len() - 1 + } else { + fields.len() + }; + let optimizing = &mut inverse_memory_index[..end]; + match kind { + StructKind::AlwaysSized | + StructKind::MaybeUnsized => { + optimizing.sort_by_key(|&x| { + // Place ZSTs first to avoid "interesting offsets", + // especially with only one or two non-ZST fields. + let f = &fields[x as usize]; + (!f.is_zst(), cmp::Reverse(f.align.abi())) + }) + } + StructKind::EnumVariant(_) => { + optimizing.sort_by_key(|&x| fields[x as usize].align.abi()); + } + } + } + + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we use inverse_memory_index to produce memory_index. + + let mut offset = Size::from_bytes(0); + + if let StructKind::EnumVariant(discr) = kind { + offset = discr.size(); + if !packed { + let discr_align = discr.align(dl); + align = align.max(discr_align); + } + } + + for &i in &inverse_memory_index { + let field = fields[i as usize]; + if !sized { + bug!("univariant: field #{} of `{}` comes after unsized field", + offsets.len(), ty); + } + + if field.abi == Abi::Uninhabited { + return Ok(LayoutDetails::uninhabited(fields.len())); + } + + if field.is_unsized() { + sized = false; + } + + // Invariant: offset < dl.obj_size_bound() <= 1<<61 + if !packed { + offset = offset.abi_align(field.align); + align = align.max(field.align); + } + + debug!("univariant offset: {:?} field: {:#?}", offset, field); + offsets[i as usize] = offset; + + offset = offset.checked_add(field.size, dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + } + + if repr.align > 0 { + let repr_align = repr.align as u64; + align = align.max(Align::from_bytes(repr_align, repr_align).unwrap()); + debug!("univariant repr_align: {:?}", repr_align); + } + + debug!("univariant min_size: {:?}", offset); + let min_size = offset; + + // As stated above, inverse_memory_index holds field indices by increasing offset. + // This makes it an already-sorted view of the offsets vec. + // To invert it, consider: + // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. + // Field 5 would be the first element, so memory_index is i: + // Note: if we didn't optimize, it's already right. + + let mut memory_index; + if optimize { + memory_index = vec![0; inverse_memory_index.len()]; + + for i in 0..inverse_memory_index.len() { + memory_index[inverse_memory_index[i] as usize] = i as u32; + } + } else { + memory_index = inverse_memory_index; + } + + let size = min_size.abi_align(align); + let mut abi = Abi::Aggregate { + sized, + packed + }; + + // Unpack newtype ABIs and find scalar pairs. + if sized && size.bytes() > 0 { + // All other fields must be ZSTs, and we need them to all start at 0. + let mut zst_offsets = + offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst()); + if zst_offsets.all(|(_, o)| o.bytes() == 0) { + let mut non_zst_fields = + fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); + + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && size == field.size { + match field.abi { + // For plain scalars we can't unpack newtypes + // for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) if optimize => { + abi = field.abi.clone(); + } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. + Abi::ScalarPair(..) => { + abi = field.abi.clone(); + } + _ => {} + } + } + } + + // Two non-ZST fields, and they're both scalars. + (Some((i, &TyLayout { + details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, .. + })), Some((j, &TyLayout { + details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, .. + })), None) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = if offsets[i] < offsets[j] { + ((i, a), (j, b)) + } else { + ((j, b), (i, a)) + }; + let pair = scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldPlacement::Arbitrary { + ref offsets, + ref memory_index + } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!() + }; + if offsets[i] == pair_offsets[0] && + offsets[j] == pair_offsets[1] && + align == pair.align && + size == pair.size { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + + _ => {} + } + } + } + + Ok(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Arbitrary { + offsets, + memory_index + }, + abi, + align, + size + }) + }; + let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { + Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?)) + }; + assert!(!ty.has_infer_types()); + + Ok(match ty.sty { + // Basic scalars. + ty::TyBool => { + tcx.intern_layout(LayoutDetails::scalar(cx, Scalar { + value: Int(I8, false), + valid_range: 0..=1 + })) + } + ty::TyChar => { + tcx.intern_layout(LayoutDetails::scalar(cx, Scalar { + value: Int(I32, false), + valid_range: 0..=0x10FFFF + })) + } + ty::TyInt(ity) => { + scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true)) + } + ty::TyUint(ity) => { + scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false)) + } + ty::TyFloat(FloatTy::F32) => scalar(F32), + ty::TyFloat(FloatTy::F64) => scalar(F64), + ty::TyFnPtr(_) => { + let mut ptr = scalar_unit(Pointer); + ptr.valid_range.start = 1; + tcx.intern_layout(LayoutDetails::scalar(cx, ptr)) + } + + // The never type. + ty::TyNever => { + tcx.intern_layout(LayoutDetails::uninhabited(0)) + } + + // Potentially-fat pointers. + ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { + let mut data_ptr = scalar_unit(Pointer); + if !ty.is_unsafe_ptr() { + data_ptr.valid_range.start = 1; + } + + let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); + if pointee.is_sized(tcx, param_env, DUMMY_SP) { + return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr))); + } + + let unsized_part = tcx.struct_tail(pointee); + let metadata = match unsized_part.sty { + ty::TyForeign(..) => { + return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr))); + } + ty::TySlice(_) | ty::TyStr => { + scalar_unit(Int(dl.ptr_sized_integer(), false)) + } + ty::TyDynamic(..) => { + let mut vtable = scalar_unit(Pointer); + vtable.valid_range.start = 1; + vtable + } + _ => return Err(LayoutError::Unknown(unsized_part)) + }; + + // Effectively a (ptr, meta) tuple. + tcx.intern_layout(scalar_pair(data_ptr, metadata)) + } + + // Arrays and slices. + ty::TyArray(element, mut count) => { if count.has_projections() { count = tcx.normalize_associated_type_in_env(&count, param_env); if count.has_projections() { @@ -1198,284 +1229,350 @@ pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - let element = element.layout(tcx, param_env)?; - let element_size = element.size(dl); + let element = cx.layout_of(element)?; let count = count.val.to_const_int().unwrap().to_u64().unwrap(); - if element_size.checked_mul(count, dl).is_none() { - return Err(LayoutError::SizeOverflow(ty)); - } - Array { - sized: true, - align: element.align(dl), - primitive_align: element.primitive_align(dl), - element_size, - count, - } + let size = element.size.checked_mul(count, dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + + tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Array { + stride: element.size, + count + }, + abi: Abi::Aggregate { + sized: true, + packed: false + }, + align: element.align, + size + }) } ty::TySlice(element) => { - let element = element.layout(tcx, param_env)?; - Array { - sized: false, - align: element.align(dl), - primitive_align: element.primitive_align(dl), - element_size: element.size(dl), - count: 0 - } + let element = cx.layout_of(element)?; + tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Array { + stride: element.size, + count: 0 + }, + abi: Abi::Aggregate { + sized: false, + packed: false + }, + align: element.align, + size: Size::from_bytes(0) + }) } ty::TyStr => { - Array { - sized: false, + tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Array { + stride: Size::from_bytes(1), + count: 0 + }, + abi: Abi::Aggregate { + sized: false, + packed: false + }, align: dl.i8_align, - primitive_align: dl.i8_align, - element_size: Size::from_bytes(1), - count: 0 - } + size: Size::from_bytes(0) + }) } // Odd unit types. ty::TyFnDef(..) => { - Univariant { - variant: Struct::new(dl, &vec![], - &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?, - non_zero: false - } + univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)? } ty::TyDynamic(..) | ty::TyForeign(..) => { - let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?; - unit.sized = false; - Univariant { variant: unit, non_zero: false } + let mut unit = univariant_uninterned(&[], &ReprOptions::default(), + StructKind::AlwaysSized)?; + match unit.abi { + Abi::Aggregate { ref mut sized, .. } => *sized = false, + _ => bug!() + } + tcx.intern_layout(unit) } // Tuples, generators and closures. ty::TyGenerator(def_id, ref substs, _) => { let tys = substs.field_tys(def_id, tcx); - let st = Struct::new(dl, - &tys.map(|ty| ty.layout(tcx, param_env)) - .collect::, _>>()?, + univariant(&tys.map(|ty| cx.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?; - Univariant { variant: st, non_zero: false } + StructKind::AlwaysSized)? } ty::TyClosure(def_id, ref substs) => { let tys = substs.upvar_tys(def_id, tcx); - let st = Struct::new(dl, - &tys.map(|ty| ty.layout(tcx, param_env)) - .collect::, _>>()?, + univariant(&tys.map(|ty| cx.layout_of(ty)).collect::, _>>()?, &ReprOptions::default(), - StructKind::AlwaysSizedUnivariant, ty)?; - Univariant { variant: st, non_zero: false } + StructKind::AlwaysSized)? } ty::TyTuple(tys, _) => { let kind = if tys.len() == 0 { - StructKind::AlwaysSizedUnivariant + StructKind::AlwaysSized } else { - StructKind::MaybeUnsizedUnivariant + StructKind::MaybeUnsized }; - let st = Struct::new(dl, - &tys.iter().map(|ty| ty.layout(tcx, param_env)) - .collect::, _>>()?, - &ReprOptions::default(), kind, ty)?; - Univariant { variant: st, non_zero: false } + univariant(&tys.iter().map(|ty| cx.layout_of(ty)).collect::, _>>()?, + &ReprOptions::default(), kind)? } // SIMD vector types. ty::TyAdt(def, ..) if def.repr.simd() => { - let element = ty.simd_type(tcx); - match *element.layout(tcx, param_env)? { - Scalar { value, .. } => { - return success(Vector { - element: value, - count: ty.simd_size(tcx) as u64 - }); - } + let count = ty.simd_size(tcx) as u64; + let element = cx.layout_of(ty.simd_type(tcx))?; + match element.abi { + Abi::Scalar(_) => {} _ => { tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \ a non-machine element type `{}`", - ty, element)); + ty, element.ty)); } } + let size = element.size.checked_mul(count, dl) + .ok_or(LayoutError::SizeOverflow(ty))?; + let align = dl.vector_align(size); + let size = size.abi_align(align); + + tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Array { + stride: element.size, + count + }, + abi: Abi::Vector, + size, + align, + }) } // ADTs. ty::TyAdt(def, substs) => { - if def.variants.is_empty() { - // Uninhabitable; represent as unit - // (Typechecking will reject discriminant-sizing attrs.) + // Cache the field layouts. + let variants = def.variants.iter().map(|v| { + v.fields.iter().map(|field| { + cx.layout_of(field.ty(tcx, substs)) + }).collect::, _>>() + }).collect::, _>>()?; - return success(Univariant { - variant: Struct::new(dl, &vec![], - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?, - non_zero: false + let (inh_first, inh_second) = { + let mut inh_variants = (0..variants.len()).filter(|&v| { + variants[v].iter().all(|f| f.abi != Abi::Uninhabited) }); + (inh_variants.next(), inh_variants.next()) + }; + if inh_first.is_none() { + // Uninhabited because it has no variants, or only uninhabited ones. + return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0))); } - if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) { - // All bodies empty -> intlike - let (mut min, mut max, mut non_zero) = (i64::max_value(), - i64::min_value(), - true); - for discr in def.discriminants(tcx) { - let x = discr.to_u128_unchecked() as i64; - if x == 0 { non_zero = false; } - if x < min { min = x; } - if x > max { max = x; } + if def.is_union() { + let packed = def.repr.packed(); + if packed && def.repr.align > 0 { + bug!("Union cannot be packed and aligned"); } - // FIXME: should handle i128? signed-value based impl is weird and hard to - // grok. - let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); - return success(CEnum { - discr, - signed, - non_zero, - // FIXME: should be u128? - min: min as u64, - max: max as u64 - }); + let mut align = if def.repr.packed() { + dl.i8_align + } else { + dl.aggregate_align + }; + + if def.repr.align > 0 { + let repr_align = def.repr.align as u64; + align = align.max( + Align::from_bytes(repr_align, repr_align).unwrap()); + } + + let mut size = Size::from_bytes(0); + for field in &variants[0] { + assert!(!field.is_unsized()); + + if !packed { + align = align.max(field.align); + } + size = cmp::max(size, field.size); + } + + return Ok(tcx.intern_layout(LayoutDetails { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Union(variants[0].len()), + abi: Abi::Aggregate { + sized: true, + packed + }, + align, + size: size.abi_align(align) + })); } - if !def.is_enum() || (def.variants.len() == 1 && - !def.repr.inhibit_enum_layout_opt()) { - // Struct, or union, or univariant enum equivalent to a struct. + let is_struct = !def.is_enum() || + // Only one variant is inhabited. + (inh_second.is_none() && + // Representation optimizations are allowed. + !def.repr.inhibit_enum_layout_opt() && + // Inhabited variant either has data ... + (!variants[inh_first.unwrap()].is_empty() || + // ... or there other, uninhabited, variants. + variants.len() > 1)); + if is_struct { + // Struct, or univariant enum equivalent to a struct. // (Typechecking will reject discriminant-sizing attrs.) - let kind = if def.is_enum() || def.variants[0].fields.len() == 0{ - StructKind::AlwaysSizedUnivariant + let v = inh_first.unwrap(); + let kind = if def.is_enum() || variants[v].len() == 0 { + StructKind::AlwaysSized } else { let param_env = tcx.param_env(def.did); - let fields = &def.variants[0].fields; - let last_field = &fields[fields.len()-1]; + let last_field = def.variants[v].fields.last().unwrap(); let always_sized = tcx.type_of(last_field.did) .is_sized(tcx, param_env, DUMMY_SP); - if !always_sized { StructKind::MaybeUnsizedUnivariant } - else { StructKind::AlwaysSizedUnivariant } + if !always_sized { StructKind::MaybeUnsized } + else { StructKind::AlwaysSized } }; - let fields = def.variants[0].fields.iter().map(|field| { - field.ty(tcx, substs).layout(tcx, param_env) - }).collect::, _>>()?; - let layout = if def.is_union() { - let mut un = Union::new(dl, &def.repr); - un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?; - UntaggedUnion { variants: un } - } else { - let st = Struct::new(dl, &fields, &def.repr, - kind, ty)?; - let non_zero = Some(def.did) == tcx.lang_items().non_zero(); - Univariant { variant: st, non_zero: non_zero } - }; - return success(layout); - } - - // Since there's at least one - // non-empty body, explicit discriminants should have - // been rejected by a checker before this point. - for (i, v) in def.variants.iter().enumerate() { - if v.discr != ty::VariantDiscr::Relative(i) { - bug!("non-C-like enum {} with specified discriminants", - tcx.item_path_str(def.did)); + let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?; + st.variants = Variants::Single { index: v }; + // Exclude 0 from the range of a newtype ABI NonZero. + if Some(def.did) == cx.tcx().lang_items().non_zero() { + match st.abi { + Abi::Scalar(ref mut scalar) | + Abi::ScalarPair(ref mut scalar, _) => { + if scalar.valid_range.start == 0 { + scalar.valid_range.start = 1; + } + } + _ => {} + } } + return Ok(tcx.intern_layout(st)); } - // Cache the substituted and normalized variant field types. - let variants = def.variants.iter().map(|v| { - v.fields.iter().map(|field| field.ty(tcx, substs)).collect::>() - }).collect::>(); - - if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() { - // Nullable pointer optimization - for discr in 0..2 { - let other_fields = variants[1 - discr].iter().map(|ty| { - ty.layout(tcx, param_env) - }); - if !Struct::would_be_zero_sized(dl, other_fields)? { - continue; + let no_explicit_discriminants = def.variants.iter().enumerate() + .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i)); + + // Niche-filling enum optimization. + if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants { + let mut dataful_variant = None; + let mut niche_variants = usize::max_value()..=0; + + // Find one non-ZST variant. + 'variants: for (v, fields) in variants.iter().enumerate() { + for f in fields { + if f.abi == Abi::Uninhabited { + continue 'variants; + } + if !f.is_zst() { + if dataful_variant.is_none() { + dataful_variant = Some(v); + continue 'variants; + } else { + dataful_variant = None; + break 'variants; + } + } } - let paths = Struct::non_zero_field_paths(tcx, - param_env, - variants[discr].iter().cloned(), - None)?; - let (mut path, mut path_source) = if let Some(p) = paths { p } - else { continue }; - - // FIXME(eddyb) should take advantage of a newtype. - if path == &[0] && variants[discr].len() == 1 { - let value = match *variants[discr][0].layout(tcx, param_env)? { - Scalar { value, .. } => value, - CEnum { discr, .. } => Int(discr), - _ => bug!("Layout::compute: `{}`'s non-zero \ - `{}` field not scalar?!", - ty, variants[discr][0]) + if niche_variants.start > v { + niche_variants.start = v; + } + niche_variants.end = v; + } + + if niche_variants.start > niche_variants.end { + dataful_variant = None; + } + + if let Some(i) = dataful_variant { + let count = (niche_variants.end - niche_variants.start + 1) as u128; + for (field_index, field) in variants[i].iter().enumerate() { + let (offset, niche, niche_start) = + match field.find_niche(cx, count)? { + Some(niche) => niche, + None => continue + }; + let st = variants.iter().enumerate().map(|(j, v)| { + let mut st = univariant_uninterned(v, + &def.repr, StructKind::AlwaysSized)?; + st.variants = Variants::Single { index: j }; + Ok(st) + }).collect::, _>>()?; + + let offset = st[i].fields.offset(field_index) + offset; + let LayoutDetails { size, mut align, .. } = st[i]; + + let mut niche_align = niche.value.align(dl); + let abi = if offset.bytes() == 0 && niche.value.size(dl) == size { + Abi::Scalar(niche.clone()) + } else { + let mut packed = st[i].abi.is_packed(); + if offset.abi_align(niche_align) != offset { + packed = true; + niche_align = dl.i8_align; + } + Abi::Aggregate { + sized: true, + packed + } }; - return success(RawNullablePointer { - nndiscr: discr as u64, - value, - }); + align = align.max(niche_align); + + return Ok(tcx.intern_layout(LayoutDetails { + variants: Variants::NicheFilling { + dataful_variant: i, + niche_variants, + niche, + niche_start, + variants: st, + }, + fields: FieldPlacement::Arbitrary { + offsets: vec![offset], + memory_index: vec![0] + }, + abi, + size, + align, + })); } + } + } - let st = Struct::new(dl, - &variants[discr].iter().map(|ty| ty.layout(tcx, param_env)) - .collect::, _>>()?, - &def.repr, StructKind::AlwaysSizedUnivariant, ty)?; - - // We have to fix the last element of path here. - let mut i = *path.last().unwrap(); - i = st.memory_index[i as usize]; - *path.last_mut().unwrap() = i; - path.push(0); // For GEP through a pointer. - path.reverse(); - path_source.push(0); - path_source.reverse(); - - return success(StructWrappedNullablePointer { - nndiscr: discr as u64, - nonnull: st, - discrfield: path, - discrfield_source: path_source - }); + let (mut min, mut max) = (i128::max_value(), i128::min_value()); + for (i, discr) in def.discriminants(tcx).enumerate() { + if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) { + continue; } + let x = discr.to_u128_unchecked() as i128; + if x < min { min = x; } + if x > max { max = x; } } + assert!(min <= max, "discriminant range is {}...{}", min, max); + let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); - // The general case. - let discr_max = (variants.len() - 1) as i64; - assert!(discr_max >= 0); - let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max); let mut align = dl.aggregate_align; - let mut primitive_align = dl.aggregate_align; let mut size = Size::from_bytes(0); // We're interested in the smallest alignment, so start large. let mut start_align = Align::from_bytes(256, 256).unwrap(); + assert_eq!(Integer::for_abi_align(dl, start_align), None); - // Create the set of structs that represent each variant - // Use the minimum integer type we figured out above - let discr = Scalar { value: Int(min_ity), non_zero: false }; - let mut variants = variants.into_iter().map(|fields| { - let mut fields = fields.into_iter().map(|field| { - field.layout(tcx, param_env) - }).collect::, _>>()?; - fields.insert(0, &discr); - let st = Struct::new(dl, - &fields, - &def.repr, StructKind::EnumVariant, ty)?; + // Create the set of structs that represent each variant. + let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| { + let mut st = univariant_uninterned(&field_layouts, + &def.repr, StructKind::EnumVariant(min_ity))?; + st.variants = Variants::Single { index: i }; // Find the first field we can't move later // to make room for a larger discriminant. - // It is important to skip the first field. - for i in st.field_index_by_increasing_offset().skip(1) { - let field = fields[i]; - let field_align = field.align(dl); - if field.size(dl).bytes() != 0 || field_align.abi() != 1 { - start_align = start_align.min(field_align); + for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { + if !field.is_zst() || field.align.abi() != 1 { + start_align = start_align.min(field.align); break; } } - size = cmp::max(size, st.min_size); + size = cmp::max(size, st.size); align = align.max(st.align); - primitive_align = primitive_align.max(st.primitive_align); Ok(st) }).collect::, _>>()?; @@ -1521,30 +1618,55 @@ pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, ity = min_ity; } else { // Patch up the variants' first few fields. - let old_ity_size = Int(min_ity).size(dl); - let new_ity_size = Int(ity).size(dl); + let old_ity_size = min_ity.size(); + let new_ity_size = ity.size(); for variant in &mut variants { - for i in variant.offsets.iter_mut() { - // The first field is the discrimminant, at offset 0. - // These aren't in order, and we need to skip it. - if *i <= old_ity_size && *i > Size::from_bytes(0) { - *i = new_ity_size; - } + if variant.abi == Abi::Uninhabited { + continue; } - // We might be making the struct larger. - if variant.min_size <= old_ity_size { - variant.min_size = new_ity_size; + match variant.fields { + FieldPlacement::Arbitrary { ref mut offsets, .. } => { + for i in offsets { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); + *i = new_ity_size; + } + } + // We might be making the struct larger. + if variant.size <= old_ity_size { + variant.size = new_ity_size; + } + } + _ => bug!() } } } - General { - discr: ity, - variants, - size, + let discr = Scalar { + value: Int(ity, signed), + valid_range: (min as u128)..=(max as u128) + }; + let abi = if discr.value.size(dl) == size { + Abi::Scalar(discr.clone()) + } else { + Abi::Aggregate { + sized: true, + packed: false + } + }; + tcx.intern_layout(LayoutDetails { + variants: Variants::Tagged { + discr, + variants + }, + // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results + // in lost optimizations, specifically around allocations, see + // `test/codegen/{alloc-optimisation,vec-optimizes-away}.rs`. + fields: FieldPlacement::Union(1), + abi, align, - primitive_align, - } + size + }) } // Types with no meaningful known layout. @@ -1553,204 +1675,24 @@ pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>, if ty == normalized { return Err(LayoutError::Unknown(ty)); } - return normalized.layout(tcx, param_env); + tcx.layout_raw(param_env.and(normalized))? } ty::TyParam(_) => { return Err(LayoutError::Unknown(ty)); } ty::TyInfer(_) | ty::TyError => { - bug!("Layout::compute: unexpected type `{}`", ty) + bug!("LayoutDetails::compute: unexpected type `{}`", ty) } - }; - - success(layout) - } - - /// Returns true if the layout corresponds to an unsized type. - pub fn is_unsized(&self) -> bool { - match *self { - Scalar {..} | Vector {..} | FatPointer {..} | - CEnum {..} | UntaggedUnion {..} | General {..} | - RawNullablePointer {..} | - StructWrappedNullablePointer {..} => false, - - Array { sized, .. } | - Univariant { variant: Struct { sized, .. }, .. } => !sized - } - } - - pub fn size(&self, cx: C) -> Size { - let dl = cx.data_layout(); - - match *self { - Scalar { value, .. } | RawNullablePointer { value, .. } => { - value.size(dl) - } - - Vector { element, count } => { - let element_size = element.size(dl); - let vec_size = match element_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::size({:?}): {} * {} overflowed", - self, element_size.bytes(), count) - }; - vec_size.abi_align(self.align(dl)) - } - - Array { element_size, count, .. } => { - match element_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::size({:?}): {} * {} overflowed", - self, element_size.bytes(), count) - } - } - - FatPointer { metadata, .. } => { - // Effectively a (ptr, meta) tuple. - Pointer.size(dl).abi_align(metadata.align(dl)) - .checked_add(metadata.size(dl), dl).unwrap() - .abi_align(self.align(dl)) - } - - CEnum { discr, .. } => Int(discr).size(dl), - General { size, .. } => size, - UntaggedUnion { ref variants } => variants.stride(), - - Univariant { ref variant, .. } | - StructWrappedNullablePointer { nonnull: ref variant, .. } => { - variant.stride() - } - } - } - - pub fn align(&self, cx: C) -> Align { - let dl = cx.data_layout(); - - match *self { - Scalar { value, .. } | RawNullablePointer { value, .. } => { - value.align(dl) - } - - Vector { element, count } => { - let elem_size = element.size(dl); - let vec_size = match elem_size.checked_mul(count, dl) { - Some(size) => size, - None => bug!("Layout::align({:?}): {} * {} overflowed", - self, elem_size.bytes(), count) - }; - for &(size, align) in &dl.vector_align { - if size == vec_size { - return align; - } - } - // Default to natural alignment, which is what LLVM does. - // That is, use the size, rounded up to a power of 2. - let align = vec_size.bytes().next_power_of_two(); - Align::from_bytes(align, align).unwrap() - } - - FatPointer { metadata, .. } => { - // Effectively a (ptr, meta) tuple. - Pointer.align(dl).max(metadata.align(dl)) - } - - CEnum { discr, .. } => Int(discr).align(dl), - Array { align, .. } | General { align, .. } => align, - UntaggedUnion { ref variants } => variants.align, - - Univariant { ref variant, .. } | - StructWrappedNullablePointer { nonnull: ref variant, .. } => { - variant.align - } - } - } - - /// Returns alignment before repr alignment is applied - pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align { - match *self { - Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align, - Univariant { ref variant, .. } | - StructWrappedNullablePointer { nonnull: ref variant, .. } => { - variant.primitive_align - }, - - _ => self.align(dl) - } - } - - /// Returns repr alignment if it is greater than the primitive alignment. - pub fn over_align(&self, dl: &TargetDataLayout) -> Option { - let align = self.align(dl); - let primitive_align = self.primitive_align(dl); - if align.abi() > primitive_align.abi() { - Some(align.abi() as u32) - } else { - None - } - } - - pub fn field_offset(&self, - cx: C, - i: usize, - variant_index: Option) - -> Size { - let dl = cx.data_layout(); - - match *self { - Scalar { .. } | - CEnum { .. } | - UntaggedUnion { .. } | - RawNullablePointer { .. } => { - Size::from_bytes(0) - } - - Vector { element, count } => { - let element_size = element.size(dl); - let i = i as u64; - assert!(i < count); - Size::from_bytes(element_size.bytes() * count) - } - - Array { element_size, count, .. } => { - let i = i as u64; - assert!(i < count); - Size::from_bytes(element_size.bytes() * count) - } - - FatPointer { metadata, .. } => { - // Effectively a (ptr, meta) tuple. - assert!(i < 2); - if i == 0 { - Size::from_bytes(0) - } else { - Pointer.size(dl).abi_align(metadata.align(dl)) - } - } - - Univariant { ref variant, .. } => variant.offsets[i], - - General { ref variants, .. } => { - let v = variant_index.expect("variant index required"); - variants[v].offsets[i + 1] - } - - StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { - if Some(nndiscr as usize) == variant_index { - nonnull.offsets[i] - } else { - Size::from_bytes(0) - } - } - } + }) } /// This is invoked by the `layout_raw` query to record the final /// layout of each type. #[inline] - pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - param_env: ty::ParamEnv<'tcx>, - layout: &Layout) { + fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + param_env: ty::ParamEnv<'tcx>, + layout: TyLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1770,24 +1712,23 @@ pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: &Layout) { + layout: TyLayout<'tcx>) { + let cx = (tcx, param_env); // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { let type_desc = format!("{:?}", ty); - let overall_size = layout.size(tcx); - let align = layout.align(tcx); tcx.sess.code_stats.borrow_mut().record_type_size(kind, type_desc, - align, - overall_size, + layout.align, + layout.size, opt_discr_size, variants); }; - let (adt_def, substs) = match ty.sty { - ty::TyAdt(ref adt_def, substs) => { + let adt_def = match ty.sty { + ty::TyAdt(ref adt_def, _) => { debug!("print-type-size t: `{:?}` process adt", ty); - (adt_def, substs) + adt_def } ty::TyClosure(..) => { @@ -1804,106 +1745,61 @@ fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, let adt_kind = adt_def.adt_kind(); - let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| { - let layout = field_ty.layout(tcx, param_env); - match layout { - Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty), - Ok(field_layout) => { - session::FieldInfo { - name: field_name.to_string(), - offset: offset.bytes(), - size: field_layout.size(tcx).bytes(), - align: field_layout.align(tcx).abi(), + let build_variant_info = |n: Option, + flds: &[ast::Name], + layout: TyLayout<'tcx>| { + let mut min_size = Size::from_bytes(0); + let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { + match layout.field(cx, i) { + Err(err) => { + bug!("no layout found for field {}: `{:?}`", name, err); + } + Ok(field_layout) => { + let offset = layout.fields.offset(i); + let field_end = offset + field_layout.size; + if min_size < field_end { + min_size = field_end; + } + session::FieldInfo { + name: name.to_string(), + offset: offset.bytes(), + size: field_layout.size.bytes(), + align: field_layout.align.abi(), + } } } - } - }; - - let build_primitive_info = |name: ast::Name, value: &Primitive| { - session::VariantInfo { - name: Some(name.to_string()), - kind: session::SizeKind::Exact, - align: value.align(tcx).abi(), - size: value.size(tcx).bytes(), - fields: vec![], - } - }; - - enum Fields<'a> { - WithDiscrim(&'a Struct), - NoDiscrim(&'a Struct), - } - - let build_variant_info = |n: Option, - flds: &[(ast::Name, Ty<'tcx>)], - layout: Fields| { - let (s, field_offsets) = match layout { - Fields::WithDiscrim(s) => (s, &s.offsets[1..]), - Fields::NoDiscrim(s) => (s, &s.offsets[0..]), - }; - let field_info: Vec<_> = - flds.iter() - .zip(field_offsets.iter()) - .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset)) - .collect(); + }).collect(); session::VariantInfo { name: n.map(|n|n.to_string()), - kind: if s.sized { + kind: if layout.is_unsized() { + session::SizeKind::Min + } else { session::SizeKind::Exact + }, + align: layout.align.abi(), + size: if min_size.bytes() == 0 { + layout.size.bytes() } else { - session::SizeKind::Min + min_size.bytes() }, - align: s.align.abi(), - size: s.min_size.bytes(), fields: field_info, } }; - match *layout { - Layout::StructWrappedNullablePointer { nonnull: ref variant_layout, - nndiscr, - discrfield: _, - discrfield_source: _ } => { - debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}", - ty, nndiscr, variant_layout); - let variant_def = &adt_def.variants[nndiscr as usize]; - let fields: Vec<_> = - variant_def.fields.iter() - .map(|field_def| (field_def.name, field_def.ty(tcx, substs))) - .collect(); - record(adt_kind.into(), - None, - vec![build_variant_info(Some(variant_def.name), - &fields, - Fields::NoDiscrim(variant_layout))]); - } - Layout::RawNullablePointer { nndiscr, value } => { - debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}", - ty, nndiscr, value); - let variant_def = &adt_def.variants[nndiscr as usize]; - record(adt_kind.into(), None, - vec![build_primitive_info(variant_def.name, &value)]); - } - Layout::Univariant { variant: ref variant_layout, non_zero: _ } => { - let variant_names = || { - adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() - }; - debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}", - ty, variant_layout, variant_names()); - assert!(adt_def.variants.len() <= 1, - "univariant with variants {:?}", variant_names()); - if adt_def.variants.len() == 1 { - let variant_def = &adt_def.variants[0]; + match layout.variants { + Variants::Single { index } => { + debug!("print-type-size `{:#?}` variant {}", + layout, adt_def.variants[index].name); + if !adt_def.variants.is_empty() { + let variant_def = &adt_def.variants[index]; let fields: Vec<_> = - variant_def.fields.iter() - .map(|f| (f.name, f.ty(tcx, substs))) - .collect(); + variant_def.fields.iter().map(|f| f.name).collect(); record(adt_kind.into(), None, vec![build_variant_info(Some(variant_def.name), &fields, - Fields::NoDiscrim(variant_layout))]); + layout)]); } else { // (This case arises for *empty* enums; so give it // zero variants.) @@ -1911,54 +1807,23 @@ enum Fields<'a> { } } - Layout::General { ref variants, discr, .. } => { - debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}", - ty, adt_def.variants.len(), variants.len(), variants); - let variant_infos: Vec<_> = - adt_def.variants.iter() - .zip(variants.iter()) - .map(|(variant_def, variant_layout)| { - let fields: Vec<_> = - variant_def.fields - .iter() - .map(|f| (f.name, f.ty(tcx, substs))) - .collect(); - build_variant_info(Some(variant_def.name), - &fields, - Fields::WithDiscrim(variant_layout)) - }) - .collect(); - record(adt_kind.into(), Some(discr.size()), variant_infos); - } - - Layout::UntaggedUnion { ref variants } => { - debug!("print-type-size t: `{:?}` adt union variants {:?}", - ty, variants); - // layout does not currently store info about each - // variant... - record(adt_kind.into(), None, Vec::new()); - } - - Layout::CEnum { discr, .. } => { - debug!("print-type-size t: `{:?}` adt c-like enum", ty); + Variants::NicheFilling { .. } | + Variants::Tagged { .. } => { + debug!("print-type-size `{:#?}` adt general variants def {}", + ty, adt_def.variants.len()); let variant_infos: Vec<_> = - adt_def.variants.iter() - .map(|variant_def| { - build_primitive_info(variant_def.name, - &Primitive::Int(discr)) - }) - .collect(); - record(adt_kind.into(), Some(discr.size()), variant_infos); - } - - // other cases provide little interesting (i.e. adjustable - // via representation tweaks) size info beyond total size. - Layout::Scalar { .. } | - Layout::Vector { .. } | - Layout::Array { .. } | - Layout::FatPointer { .. } => { - debug!("print-type-size t: `{:?}` adt other", ty); - record(adt_kind.into(), None, Vec::new()) + adt_def.variants.iter().enumerate().map(|(i, variant_def)| { + let fields: Vec<_> = + variant_def.fields.iter().map(|f| f.name).collect(); + build_variant_info(Some(variant_def.name), + &fields, + layout.for_variant(cx, i)) + }) + .collect(); + record(adt_kind.into(), match layout.variants { + Variants::Tagged { ref discr, .. } => Some(discr.value.size(tcx)), + _ => None + }, variant_infos); } } } @@ -1992,39 +1857,32 @@ pub fn compute(ty: Ty<'tcx>, assert!(!ty.has_infer_types()); // First try computing a static layout. - let err = match ty.layout(tcx, param_env) { + let err = match (tcx, param_env).layout_of(ty) { Ok(layout) => { - return Ok(SizeSkeleton::Known(layout.size(tcx))); + return Ok(SizeSkeleton::Known(layout.size)); } Err(err) => err }; - let ptr_skeleton = |pointee: Ty<'tcx>| { - let non_zero = !ty.is_unsafe_ptr(); - let tail = tcx.struct_tail(pointee); - match tail.sty { - ty::TyParam(_) | ty::TyProjection(_) => { - assert!(tail.has_param_types() || tail.has_self_ty()); - Ok(SizeSkeleton::Pointer { - non_zero, - tail: tcx.erase_regions(&tail) - }) - } - _ => { - bug!("SizeSkeleton::compute({}): layout errored ({}), yet \ - tail `{}` is not a type parameter or a projection", - ty, err, tail) - } - } - }; - match ty.sty { ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_skeleton(pointee) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_skeleton(ty.boxed_ty()) + let non_zero = !ty.is_unsafe_ptr(); + let tail = tcx.struct_tail(pointee); + match tail.sty { + ty::TyParam(_) | ty::TyProjection(_) => { + assert!(tail.has_param_types() || tail.has_self_ty()); + Ok(SizeSkeleton::Pointer { + non_zero, + tail: tcx.erase_regions(&tail) + }) + } + _ => { + bug!("SizeSkeleton::compute({}): layout errored ({}), yet \ + tail `{}` is not a type parameter or a projection", + ty, err, tail) + } + } } ty::TyAdt(def, substs) => { @@ -2109,142 +1967,184 @@ pub fn same_size(self, other: SizeSkeleton) -> bool { } } -/// A pair of a type and its layout. Implements various -/// type traversal APIs (e.g. recursing into fields). +/// The details of the layout of a type, alongside the type itself. +/// Provides various type traversal APIs (e.g. recursing into fields). +/// +/// Note that the details are NOT guaranteed to always be identical +/// to those obtained from `layout_of(ty)`, as we need to produce +/// layouts for which Rust types do not exist, such as enum variants +/// or synthetic fields of enums (i.e. discriminants) and fat pointers. #[derive(Copy, Clone, Debug)] pub struct TyLayout<'tcx> { pub ty: Ty<'tcx>, - pub layout: &'tcx Layout, - pub variant_index: Option, + details: &'tcx LayoutDetails } impl<'tcx> Deref for TyLayout<'tcx> { - type Target = Layout; - fn deref(&self) -> &Layout { - self.layout + type Target = &'tcx LayoutDetails; + fn deref(&self) -> &&'tcx LayoutDetails { + &self.details } } -pub trait LayoutTyper<'tcx>: HasDataLayout { - type TyLayout; - +pub trait HasTyCtxt<'tcx>: HasDataLayout { fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout; - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>; } -/// Combines a tcx with the parameter environment so that you can -/// compute layout operations. -#[derive(Copy, Clone)] -pub struct LayoutCx<'a, 'tcx: 'a> { - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, +impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + &self.data_layout + } } -impl<'a, 'tcx> LayoutCx<'a, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self { - LayoutCx { tcx, param_env } +impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { + self.global_tcx() } } -impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> { +impl<'a, 'gcx, 'tcx, T: Copy> HasDataLayout for (TyCtxt<'a, 'gcx, 'tcx>, T) { fn data_layout(&self) -> &TargetDataLayout { - &self.tcx.data_layout + self.0.data_layout() } } -impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> { - type TyLayout = Result, LayoutError<'tcx>>; +impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> { + self.0.tcx() + } +} - fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { - self.tcx +pub trait MaybeResult { + fn from_ok(x: T) -> Self; + fn map_same T>(self, f: F) -> Self; +} + +impl MaybeResult for T { + fn from_ok(x: T) -> Self { + x + } + fn map_same T>(self, f: F) -> Self { + f(self) } +} +impl MaybeResult for Result { + fn from_ok(x: T) -> Self { + Ok(x) + } + fn map_same T>(self, f: F) -> Self { + self.map(f) + } +} + +pub trait LayoutOf { + type TyLayout; + + fn layout_of(self, ty: T) -> Self::TyLayout; +} + +impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { + type TyLayout = Result, LayoutError<'tcx>>; + + /// Computes the layout of a type. Note that this implicitly + /// executes in "reveal all" mode. + #[inline] fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - let ty = self.normalize_projections(ty); + let (tcx, param_env) = self; - Ok(TyLayout { + let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); + let details = tcx.layout_raw(param_env.reveal_all().and(ty))?; + let layout = TyLayout { ty, - layout: ty.layout(self.tcx, self.param_env)?, - variant_index: None - }) - } + details + }; + + // NB: This recording is normally disabled; when enabled, it + // can however trigger recursive invocations of `layout_of`. + // Therefore, we execute it *after* the main query has + // completed, to avoid problems around recursive structures + // and the like. (Admitedly, I wasn't able to reproduce a problem + // here, but it seems like the right thing to do. -nmatsakis) + LayoutDetails::record_layout_for_printing(tcx, ty, param_env, layout); - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.tcx.normalize_associated_type_in_env(&ty, self.param_env) + Ok(layout) } } -impl<'a, 'tcx> TyLayout<'tcx> { - pub fn for_variant(&self, variant_index: usize) -> Self { - TyLayout { - variant_index: Some(variant_index), - ..*self - } - } +impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, + ty::ParamEnv<'tcx>) { + type TyLayout = Result, LayoutError<'tcx>>; - pub fn field_offset(&self, cx: C, i: usize) -> Size { - self.layout.field_offset(cx, i, self.variant_index) - } + /// Computes the layout of a type. Note that this implicitly + /// executes in "reveal all" mode. + #[inline] + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { + let (tcx_at, param_env) = self; - pub fn field_count(&self) -> usize { - // Handle enum/union through the type rather than Layout. - if let ty::TyAdt(def, _) = self.ty.sty { - let v = self.variant_index.unwrap_or(0); - if def.variants.is_empty() { - assert_eq!(v, 0); - return 0; - } else { - return def.variants[v].fields.len(); - } - } + let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); + let details = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; + let layout = TyLayout { + ty, + details + }; - match *self.layout { - Scalar { .. } => { - bug!("TyLayout::field_count({:?}): not applicable", self) - } + // NB: This recording is normally disabled; when enabled, it + // can however trigger recursive invocations of `layout_of`. + // Therefore, we execute it *after* the main query has + // completed, to avoid problems around recursive structures + // and the like. (Admitedly, I wasn't able to reproduce a problem + // here, but it seems like the right thing to do. -nmatsakis) + LayoutDetails::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout); - // Handled above (the TyAdt case). - CEnum { .. } | - General { .. } | - UntaggedUnion { .. } | - RawNullablePointer { .. } | - StructWrappedNullablePointer { .. } => bug!(), + Ok(layout) + } +} - FatPointer { .. } => 2, +impl<'a, 'tcx> TyLayout<'tcx> { + pub fn for_variant(&self, cx: C, variant_index: usize) -> Self + where C: LayoutOf> + HasTyCtxt<'tcx>, + C::TyLayout: MaybeResult> + { + let details = match self.variants { + Variants::Single { index } if index == variant_index => self.details, + + Variants::Single { index } => { + // Deny calling for_variant more than once for non-Single enums. + cx.layout_of(self.ty).map_same(|layout| { + assert_eq!(layout.variants, Variants::Single { index }); + layout + }); + + let fields = match self.ty.sty { + ty::TyAdt(def, _) => def.variants[variant_index].fields.len(), + _ => bug!() + }; + let mut details = LayoutDetails::uninhabited(fields); + details.variants = Variants::Single { index: variant_index }; + cx.tcx().intern_layout(details) + } - Vector { count, .. } | - Array { count, .. } => { - let usize_count = count as usize; - assert_eq!(usize_count as u64, count); - usize_count + Variants::NicheFilling { ref variants, .. } | + Variants::Tagged { ref variants, .. } => { + &variants[variant_index] } + }; + + assert_eq!(details.variants, Variants::Single { index: variant_index }); - Univariant { ref variant, .. } => variant.offsets.len(), + TyLayout { + ty: self.ty, + details } } - pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { + pub fn field(&self, cx: C, i: usize) -> C::TyLayout + where C: LayoutOf> + HasTyCtxt<'tcx>, + C::TyLayout: MaybeResult> + { let tcx = cx.tcx(); - - let ptr_field_type = |pointee: Ty<'tcx>| { - assert!(i < 2); - let slice = |element: Ty<'tcx>| { - if i == 0 { - tcx.mk_mut_ptr(element) - } else { - tcx.types.usize - } - }; - match tcx.struct_tail(pointee).sty { - ty::TySlice(element) => slice(element), - ty::TyStr => slice(tcx.types.u8), - ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()), - _ => bug!("TyLayout::field_type({:?}): not applicable", self) - } - }; - - match self.ty.sty { + cx.layout_of(match self.ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | @@ -2261,10 +2161,35 @@ pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { // Potentially-fat pointers. ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_field_type(pointee) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_field_type(self.ty.boxed_ty()) + assert!(i < 2); + + // Reuse the fat *T type as its own thin pointer data field. + // This provides information about e.g. DST struct pointees + // (which may have no non-DST form), and will work as long + // as the `Abi` or `FieldPlacement` is checked by users. + if i == 0 { + let nil = tcx.mk_nil(); + let ptr_ty = if self.ty.is_unsafe_ptr() { + tcx.mk_mut_ptr(nil) + } else { + tcx.mk_mut_ref(tcx.types.re_static, nil) + }; + return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| { + ptr_layout.ty = self.ty; + ptr_layout + }); + } + + match tcx.struct_tail(pointee).sty { + ty::TySlice(_) | + ty::TyStr => tcx.types.usize, + ty::TyDynamic(..) => { + // FIXME(eddyb) use an usize/fn() array with + // the correct number of vtables slots. + tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil()) + } + _ => bug!("TyLayout::field_type({:?}): not applicable", self) + } } // Arrays and slices. @@ -2290,94 +2215,232 @@ pub fn field_type>(&self, cx: C, i: usize) -> Ty<'tcx> { // ADTs. ty::TyAdt(def, substs) => { - def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs) + match self.variants { + Variants::Single { index } => { + def.variants[index].fields[i].ty(tcx, substs) + } + + // Discriminant field for enums (where applicable). + Variants::Tagged { ref discr, .. } | + Variants::NicheFilling { niche: ref discr, .. } => { + assert_eq!(i, 0); + let layout = LayoutDetails::scalar(tcx, discr.clone()); + return MaybeResult::from_ok(TyLayout { + details: tcx.intern_layout(layout), + ty: discr.value.to_ty(tcx) + }); + } + } } ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | ty::TyInfer(_) | ty::TyError => { bug!("TyLayout::field_type: unexpected type `{}`", self.ty) } + }) + } + + /// Returns true if the layout corresponds to an unsized type. + pub fn is_unsized(&self) -> bool { + self.abi.is_unsized() + } + + /// Returns true if the fields of the layout are packed. + pub fn is_packed(&self) -> bool { + self.abi.is_packed() + } + + /// Returns true if the type is a ZST and not unsized. + pub fn is_zst(&self) -> bool { + match self.abi { + Abi::Uninhabited => true, + Abi::Scalar(_) | Abi::ScalarPair(..) => false, + Abi::Vector => self.size.bytes() == 0, + Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 } } - pub fn field>(&self, - cx: C, - i: usize) - -> C::TyLayout { - cx.layout_of(cx.normalize_projections(self.field_type(cx, i))) + pub fn size_and_align(&self) -> (Size, Align) { + (self.size, self.align) + } + + /// Find the offset of a niche leaf field, starting from + /// the given type and recursing through aggregates, which + /// has at least `count` consecutive invalid values. + /// The tuple is `(offset, scalar, niche_value)`. + // FIXME(eddyb) traverse already optimized enums. + fn find_niche(&self, cx: C, count: u128) + -> Result, LayoutError<'tcx>> + where C: LayoutOf, TyLayout = Result>> + + HasTyCtxt<'tcx> + { + let scalar_component = |scalar: &Scalar, offset| { + let Scalar { value, valid_range: ref v } = *scalar; + + let bits = value.size(cx).bits(); + assert!(bits <= 128); + let max_value = !0u128 >> (128 - bits); + + // Find out how many values are outside the valid range. + let niches = if v.start <= v.end { + v.start + (max_value - v.end) + } else { + v.start - v.end - 1 + }; + + // Give up if we can't fit `count` consecutive niches. + if count > niches { + return None; + } + + let niche_start = v.end.wrapping_add(1) & max_value; + let niche_end = v.end.wrapping_add(count) & max_value; + Some((offset, Scalar { + value, + valid_range: v.start..=niche_end + }, niche_start)) + }; + + match self.abi { + Abi::Scalar(ref scalar) => { + return Ok(scalar_component(scalar, Size::from_bytes(0))); + } + Abi::ScalarPair(ref a, ref b) => { + return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| { + scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx))) + })); + } + _ => {} + } + + // Perhaps one of the fields is non-zero, let's recurse and find out. + if let FieldPlacement::Union(_) = self.fields { + // Only Rust enums have safe-to-inspect fields + // (a discriminant), other unions are unsafe. + if let Variants::Single { .. } = self.variants { + return Ok(None); + } + } + if let FieldPlacement::Array { .. } = self.fields { + if self.fields.count() > 0 { + return self.field(cx, 0)?.find_niche(cx, count); + } + } + for i in 0..self.fields.count() { + let r = self.field(cx, i)?.find_niche(cx, count)?; + if let Some((offset, scalar, niche_value)) = r { + let offset = self.fields.offset(i) + offset; + return Ok(Some((offset, scalar, niche_value))); + } + } + Ok(None) } } -impl<'gcx> HashStable> for Layout -{ +impl<'gcx> HashStable> for Variants { fn hash_stable(&self, hcx: &mut StableHashingContext<'gcx>, hasher: &mut StableHasher) { - use ty::layout::Layout::*; + use ty::layout::Variants::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { - Scalar { value, non_zero } => { - value.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); - } - Vector { element, count } => { - element.hash_stable(hcx, hasher); - count.hash_stable(hcx, hasher); - } - Array { sized, align, primitive_align, element_size, count } => { - sized.hash_stable(hcx, hasher); - align.hash_stable(hcx, hasher); - primitive_align.hash_stable(hcx, hasher); - element_size.hash_stable(hcx, hasher); - count.hash_stable(hcx, hasher); + Single { index } => { + index.hash_stable(hcx, hasher); } - FatPointer { ref metadata, non_zero } => { - metadata.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); - } - CEnum { discr, signed, non_zero, min, max } => { + Tagged { + ref discr, + ref variants, + } => { discr.hash_stable(hcx, hasher); - signed.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); - min.hash_stable(hcx, hasher); - max.hash_stable(hcx, hasher); - } - Univariant { ref variant, non_zero } => { - variant.hash_stable(hcx, hasher); - non_zero.hash_stable(hcx, hasher); - } - UntaggedUnion { ref variants } => { variants.hash_stable(hcx, hasher); } - General { discr, ref variants, size, align, primitive_align } => { - discr.hash_stable(hcx, hasher); + NicheFilling { + dataful_variant, + niche_variants: RangeInclusive { start, end }, + ref niche, + niche_start, + ref variants, + } => { + dataful_variant.hash_stable(hcx, hasher); + start.hash_stable(hcx, hasher); + end.hash_stable(hcx, hasher); + niche.hash_stable(hcx, hasher); + niche_start.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); - size.hash_stable(hcx, hasher); - align.hash_stable(hcx, hasher); - primitive_align.hash_stable(hcx, hasher); } - RawNullablePointer { nndiscr, ref value } => { - nndiscr.hash_stable(hcx, hasher); + } + } +} + +impl<'gcx> HashStable> for FieldPlacement { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use ty::layout::FieldPlacement::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Union(count) => { + count.hash_stable(hcx, hasher); + } + Array { count, stride } => { + count.hash_stable(hcx, hasher); + stride.hash_stable(hcx, hasher); + } + Arbitrary { ref offsets, ref memory_index } => { + offsets.hash_stable(hcx, hasher); + memory_index.hash_stable(hcx, hasher); + } + } + } +} + +impl<'gcx> HashStable> for Abi { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + use ty::layout::Abi::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Uninhabited => {} + Scalar(ref value) => { value.hash_stable(hcx, hasher); } - StructWrappedNullablePointer { - nndiscr, - ref nonnull, - ref discrfield, - ref discrfield_source - } => { - nndiscr.hash_stable(hcx, hasher); - nonnull.hash_stable(hcx, hasher); - discrfield.hash_stable(hcx, hasher); - discrfield_source.hash_stable(hcx, hasher); + ScalarPair(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } + Vector => {} + Aggregate { packed, sized } => { + packed.hash_stable(hcx, hasher); + sized.hash_stable(hcx, hasher); } } } } +impl<'gcx> HashStable> for Scalar { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'gcx>, + hasher: &mut StableHasher) { + let Scalar { value, valid_range: RangeInclusive { start, end } } = *self; + value.hash_stable(hcx, hasher); + start.hash_stable(hcx, hasher); + end.hash_stable(hcx, hasher); + } +} + +impl_stable_hash_for!(struct ::ty::layout::LayoutDetails { + variants, + fields, + abi, + size, + align +}); + impl_stable_hash_for!(enum ::ty::layout::Integer { - I1, I8, I16, I32, @@ -2386,7 +2449,7 @@ fn hash_stable(&self, }); impl_stable_hash_for!(enum ::ty::layout::Primitive { - Int(integer), + Int(integer, signed), F32, F64, Pointer @@ -2415,20 +2478,3 @@ fn hash_stable(&self, } } } - -impl_stable_hash_for!(struct ::ty::layout::Struct { - align, - primitive_align, - packed, - sized, - offsets, - memory_index, - min_size -}); - -impl_stable_hash_for!(struct ::ty::layout::Union { - align, - primitive_align, - min_size, - packed -}); diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs index 320f65148498712c3b5cbfdcc87cf914f845e54b..2f648e8d3ff82c68e612278ec1c303b7c79e7a96 100644 --- a/src/librustc/ty/maps/mod.rs +++ b/src/librustc/ty/maps/mod.rs @@ -34,7 +34,6 @@ use traits::Vtable; use traits::specialization_graph; use ty::{self, CrateInherentImpls, Ty, TyCtxt}; -use ty::layout::{Layout, LayoutError}; use ty::steal::Steal; use ty::subst::Substs; use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet}; @@ -265,7 +264,8 @@ [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx Layout, LayoutError<'tcx>>, + -> Result<&'tcx ty::layout::LayoutDetails, + ty::layout::LayoutError<'tcx>>, [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) -> Rc>, diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index a584f2ce1919a2218a70ef7f48d64dfa597327c7..48ec92a255b4c66275750c47d4992b562b994c84 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -1674,11 +1674,6 @@ pub fn all_fields<'s>(&'s self) -> impl Iterator { self.variants.iter().flat_map(|v| v.fields.iter()) } - #[inline] - pub fn is_univariant(&self) -> bool { - self.variants.len() == 1 - } - pub fn is_payloadfree(&self) -> bool { !self.variants.is_empty() && self.variants.iter().all(|v| v.fields.is_empty()) @@ -2622,9 +2617,10 @@ fn original_crate_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } pub fn provide(providers: &mut ty::maps::Providers) { - util::provide(providers); context::provide(providers); erase_regions::provide(providers); + layout::provide(providers); + util::provide(providers); *providers = ty::maps::Providers { associated_item, associated_item_def_ids, diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index a0219f2f95b8453c62c7b0f37c39ca8dafe6434b..23dd3f1bc2bba36e55bb9e22f10344afa538312f 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -19,7 +19,6 @@ use traits::{self, Reveal}; use ty::{self, Ty, TyCtxt, TypeFoldable}; use ty::fold::TypeVisitor; -use ty::layout::{Layout, LayoutError}; use ty::subst::{Subst, Kind}; use ty::TypeVariants::*; use util::common::ErrorReported; @@ -852,30 +851,6 @@ pub fn needs_drop(&'tcx self, tcx.needs_drop_raw(param_env.and(self)) } - /// Computes the layout of a type. Note that this implicitly - /// executes in "reveal all" mode. - #[inline] - pub fn layout<'lcx>(&'tcx self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) - -> Result<&'tcx Layout, LayoutError<'tcx>> { - let ty = tcx.erase_regions(&self); - let layout = tcx.layout_raw(param_env.reveal_all().and(ty)); - - // NB: This recording is normally disabled; when enabled, it - // can however trigger recursive invocations of `layout()`. - // Therefore, we execute it *after* the main query has - // completed, to avoid problems around recursive structures - // and the like. (Admitedly, I wasn't able to reproduce a problem - // here, but it seems like the right thing to do. -nmatsakis) - if let Ok(l) = layout { - Layout::record_layout_for_printing(tcx, ty, param_env, l); - } - - layout - } - - /// Check whether a type is representable. This means it cannot contain unboxed /// structural recursion. This check is needed for structs and enums. pub fn is_representable(&'tcx self, @@ -1184,26 +1159,6 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx Layout, LayoutError<'tcx>> -{ - let (param_env, ty) = query.into_parts(); - - let rec_limit = tcx.sess.recursion_limit.get(); - let depth = tcx.layout_depth.get(); - if depth > rec_limit { - tcx.sess.fatal( - &format!("overflow representing the type `{}`", ty)); - } - - tcx.layout_depth.set(depth+1); - let layout = Layout::compute_uncached(tcx, param_env, ty); - tcx.layout_depth.set(depth); - - layout -} - pub enum ExplicitSelf<'tcx> { ByValue, ByReference(ty::Region<'tcx>, hir::Mutability), @@ -1262,7 +1217,6 @@ pub fn provide(providers: &mut ty::maps::Providers) { is_sized_raw, is_freeze_raw, needs_drop_raw, - layout_raw, ..*providers }; } diff --git a/src/librustc_const_eval/_match.rs b/src/librustc_const_eval/_match.rs index 6ebe3c679667f0a7afffa15811ae4cdbe3359af5..33d9bfa6e6b9c21b44d31f2ec37afbebc941832a 100644 --- a/src/librustc_const_eval/_match.rs +++ b/src/librustc_const_eval/_match.rs @@ -255,7 +255,7 @@ fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> usize { match self { &Variant(vid) => adt.variant_index_with_id(vid), &Single => { - assert_eq!(adt.variants.len(), 1); + assert!(!adt.is_enum()); 0 } _ => bug!("bad constructor {:?} for adt {:?}", self, adt) @@ -356,7 +356,7 @@ fn apply_constructor<'a>( }).collect(); if let ty::TyAdt(adt, substs) = ty.sty { - if adt.variants.len() > 1 { + if adt.is_enum() { PatternKind::Variant { adt_def: adt, substs, @@ -444,7 +444,7 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect() } } - ty::TyAdt(def, substs) if def.is_enum() && def.variants.len() != 1 => { + ty::TyAdt(def, substs) if def.is_enum() => { def.variants.iter() .filter(|v| !cx.is_variant_uninhabited(v, substs)) .map(|v| Variant(v.did)) diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs index 657156902b5c11974a5d3020341539f9e78db1f5..a548c1df16e28987e1e5d5c94e716935ef41c234 100644 --- a/src/librustc_const_eval/eval.rs +++ b/src/librustc_const_eval/eval.rs @@ -17,6 +17,7 @@ use rustc::hir::def::{Def, CtorKind}; use rustc::hir::def_id::DefId; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::LayoutOf; use rustc::ty::maps::Providers; use rustc::ty::util::IntTypeExt; use rustc::ty::subst::{Substs, Subst}; @@ -313,18 +314,18 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>, if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic { let layout_of = |ty: Ty<'tcx>| { let ty = tcx.erase_regions(&ty); - tcx.at(e.span).layout_raw(cx.param_env.reveal_all().and(ty)).map_err(|err| { + (tcx.at(e.span), cx.param_env).layout_of(ty).map_err(|err| { ConstEvalErr { span: e.span, kind: LayoutError(err) } }) }; match &tcx.item_name(def_id)[..] { "size_of" => { - let size = layout_of(substs.type_at(0))?.size(tcx).bytes(); + let size = layout_of(substs.type_at(0))?.size.bytes(); return Ok(mk_const(Integral(Usize(ConstUsize::new(size, tcx.sess.target.usize_ty).unwrap())))); } "min_align_of" => { - let align = layout_of(substs.type_at(0))?.align(tcx).abi(); + let align = layout_of(substs.type_at(0))?.align.abi(); return Ok(mk_const(Integral(Usize(ConstUsize::new(align, tcx.sess.target.usize_ty).unwrap())))); } diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs index d7a16e9d2fc7565ba9b32949951cb37c6bf436a7..cfbb9623f7dc9ddf29b6f1e44fdf08a5c1dce298 100644 --- a/src/librustc_const_eval/pattern.rs +++ b/src/librustc_const_eval/pattern.rs @@ -150,7 +150,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { Some(&adt_def.variants[variant_index]) } _ => if let ty::TyAdt(adt, _) = self.ty.sty { - if adt.is_univariant() { + if !adt.is_enum() { Some(&adt.variants[0]) } else { None @@ -598,7 +598,7 @@ fn lower_variant_or_leaf( Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { let enum_id = self.tcx.parent_def_id(variant_id).unwrap(); let adt_def = self.tcx.adt_def(enum_id); - if adt_def.variants.len() > 1 { + if adt_def.is_enum() { let substs = match ty.sty { ty::TyAdt(_, substs) | ty::TyFnDef(_, substs) => substs, diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index 8f08987505b945441110b8c6d9d7b52fe009ac5a..1356574f646aa73b2ad026b60165661c39e21407 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -13,7 +13,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, AdtKind, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, Primitive}; +use rustc::ty::layout::{self, LayoutOf}; use middle::const_val::ConstVal; use rustc_const_eval::ConstContext; use util::nodemap::FxHashSet; @@ -748,25 +748,23 @@ fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { // sizes only make sense for non-generic types let item_def_id = cx.tcx.hir.local_def_id(it.id); let t = cx.tcx.type_of(item_def_id); - let param_env = cx.param_env.reveal_all(); let ty = cx.tcx.erase_regions(&t); - let layout = ty.layout(cx.tcx, param_env).unwrap_or_else(|e| { + let layout = cx.layout_of(ty).unwrap_or_else(|e| { bug!("failed to get layout for `{}`: {}", t, e) }); - if let Layout::General { ref variants, ref size, discr, .. } = *layout { - let discr_size = Primitive::Int(discr).size(cx.tcx).bytes(); + if let layout::Variants::Tagged { ref variants, ref discr, .. } = layout.variants { + let discr_size = discr.value.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", - t, size.bytes(), layout); + t, layout.size.bytes(), layout); let (largest, slargest, largest_index) = enum_definition.variants .iter() .zip(variants) .map(|(variant, variant_layout)| { // Subtract the size of the enum discriminant - let bytes = variant_layout.min_size - .bytes() + let bytes = variant_layout.size.bytes() .saturating_sub(discr_size); debug!("- variant `{}` is {} bytes large", variant.node.name, bytes); diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 24c3963fbc4b31b9388376cc6af18496ca9e0714..f8c71d4825513f30daa1b2b51e1b9b95da64a9ce 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -575,8 +575,6 @@ pub fn LLVMStructTypeInContext(C: ContextRef, ElementCount: c_uint, Packed: Bool) -> TypeRef; - pub fn LLVMCountStructElementTypes(StructTy: TypeRef) -> c_uint; - pub fn LLVMGetStructElementTypes(StructTy: TypeRef, Dest: *mut TypeRef); pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool; // Operations on array, pointer, and vector types (sequence types) @@ -585,7 +583,6 @@ pub fn LLVMStructTypeInContext(C: ContextRef, pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) -> TypeRef; pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef; - pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint; pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint; // Operations on other types @@ -611,10 +608,7 @@ pub fn LLVMStructTypeInContext(C: ContextRef, pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef; pub fn LLVMConstICmp(Pred: IntPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef; pub fn LLVMConstFCmp(Pred: RealPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef; - // only for isize/vector pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef; - pub fn LLVMIsNull(Val: ValueRef) -> Bool; - pub fn LLVMIsUndef(Val: ValueRef) -> Bool; // Operations on metadata pub fn LLVMMDStringInContext(C: ContextRef, Str: *const c_char, SLen: c_uint) -> ValueRef; @@ -736,7 +730,9 @@ pub fn LLVMRustGetOrInsertFunction(M: ModuleRef, FunctionTy: TypeRef) -> ValueRef; pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint); + pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32); pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64); pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute); pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef, index: c_uint, @@ -766,7 +762,11 @@ pub fn LLVMAppendBasicBlockInContext(C: ContextRef, // Operations on call sites pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint); pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute); + pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32); pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef, + index: c_uint, + bytes: u64); // Operations on load/store instructions (only) pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool); @@ -1205,15 +1205,13 @@ pub fn LLVMBuildPtrDiff(B: BuilderRef, pub fn LLVMRustBuildAtomicLoad(B: BuilderRef, PointerVal: ValueRef, Name: *const c_char, - Order: AtomicOrdering, - Alignment: c_uint) + Order: AtomicOrdering) -> ValueRef; pub fn LLVMRustBuildAtomicStore(B: BuilderRef, Val: ValueRef, Ptr: ValueRef, - Order: AtomicOrdering, - Alignment: c_uint) + Order: AtomicOrdering) -> ValueRef; pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef, @@ -1247,23 +1245,6 @@ pub fn LLVMRustBuildAtomicFence(B: BuilderRef, /// Creates target data from a target layout string. pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef; - /// Number of bytes clobbered when doing a Store to *T. - pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Distance between successive elements in an array of T. Includes ABI padding. - pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Returns the preferred alignment of a type. - pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - /// Returns the minimum alignment of a type. - pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - - /// Computes the byte offset of the indexed struct element for a - /// target. - pub fn LLVMOffsetOfElement(TD: TargetDataRef, - StructTy: TypeRef, - Element: c_uint) - -> c_ulonglong; /// Disposes target data. pub fn LLVMDisposeTargetData(TD: TargetDataRef); @@ -1341,11 +1322,6 @@ pub fn LLVMStructSetBody(StructTy: TypeRef, ElementCount: c_uint, Packed: Bool); - pub fn LLVMConstNamedStruct(S: TypeRef, - ConstantVals: *const ValueRef, - Count: c_uint) - -> ValueRef; - /// Enables LLVM debug output. pub fn LLVMRustSetDebug(Enabled: c_int); diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index 5ccce8de7063995ccb2e9378a83f7a96bae4234c..592bd62056455534cfcbb331abb1aa555a2da635 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -74,22 +74,19 @@ pub fn AddFunctionAttrStringValue(llfn: ValueRef, } } -#[repr(C)] #[derive(Copy, Clone)] pub enum AttributePlace { + ReturnValue, Argument(u32), Function, } impl AttributePlace { - pub fn ReturnValue() -> Self { - AttributePlace::Argument(0) - } - pub fn as_uint(self) -> c_uint { match self { + AttributePlace::ReturnValue => 0, + AttributePlace::Argument(i) => 1 + i, AttributePlace::Function => !0, - AttributePlace::Argument(i) => i, } } } diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 9b3f16f1ab4326b06369a1301008912449f1bb50..a7599f19244c215df39ef654b64adafff34e81f0 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -98,19 +98,16 @@ fn simplify_match_pair<'pat>(&mut self, } PatternKind::Variant { adt_def, substs, variant_index, ref subpatterns } => { - if self.hir.tcx().sess.features.borrow().never_type { - let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| { - i == variant_index || { - self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs) - } - }); - if irrefutable { - let lvalue = match_pair.lvalue.downcast(adt_def, variant_index); - candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns)); - Ok(()) - } else { - Err(match_pair) + let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| { + i == variant_index || { + self.hir.tcx().sess.features.borrow().never_type && + self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs) } + }); + if irrefutable { + let lvalue = match_pair.lvalue.downcast(adt_def, variant_index); + candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns)); + Ok(()) } else { Err(match_pair) } diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index 1cf35af3a9e1ba796da7415b18b183a8e7b93d5d..02a7bc83f6ee822cece374180c16a62a9ea554c9 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -39,7 +39,7 @@ pub fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> { span: match_pair.pattern.span, kind: TestKind::Switch { adt_def: adt_def.clone(), - variants: BitVector::new(self.hir.num_variants(adt_def)), + variants: BitVector::new(adt_def.variants.len()), }, } } @@ -184,7 +184,7 @@ pub fn perform_test(&mut self, match test.kind { TestKind::Switch { adt_def, ref variants } => { // Variants is a BitVec of indexes into adt_def.variants. - let num_enum_variants = self.hir.num_variants(adt_def); + let num_enum_variants = adt_def.variants.len(); let used_variants = variants.count(); let mut otherwise_block = None; let mut target_blocks = Vec::with_capacity(num_enum_variants); diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index 50264238aacb21850cc41c53ddf1835a92a77375..b1f4b849b8928f4ec09c1cb9bd4c49f905733532 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -213,10 +213,6 @@ pub fn trait_method(&mut self, bug!("found no method `{}` in `{:?}`", method_name, trait_def_id); } - pub fn num_variants(&mut self, adt_def: &ty::AdtDef) -> usize { - adt_def.variants.len() - } - pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: usize) -> Vec { (0..adt_def.variants[variant_index].fields.len()) .map(Field::new) diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index 61b4716c56409b20d8afc66b1f9d1c6e09b020d4..e2ecd4839fb483f0d8f202711a05f131ff182562 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -67,7 +67,7 @@ fn run_pass<'a, 'tcx>(&self, let ty = variant_def.fields[i].ty(tcx, substs); let rhs = Rvalue::Use(op.clone()); - let lhs_cast = if adt_def.variants.len() > 1 { + let lhs_cast = if adt_def.is_enum() { Lvalue::Projection(Box::new(LvalueProjection { base: lhs.clone(), elem: ProjectionElem::Downcast(adt_def, variant), @@ -89,7 +89,7 @@ fn run_pass<'a, 'tcx>(&self, } // if the aggregate was an enum, we need to set the discriminant - if adt_def.variants.len() > 1 { + if adt_def.is_enum() { let set_discriminant = Statement { kind: StatementKind::SetDiscriminant { lvalue: lhs.clone(), diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index 628a8161615e3da28bdfda8a2c0d24f875916303..4b7856f857b778b10637d570279343e41f05936a 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -19,6 +19,7 @@ use rustc::mir::*; use rustc::mir::visit::*; use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable}; +use rustc::ty::layout::LayoutOf; use rustc::ty::subst::{Subst,Substs}; use std::collections::VecDeque; @@ -625,9 +626,7 @@ fn create_temp_if_necessary( fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> Option { - ty.layout(tcx, param_env).ok().map(|layout| { - layout.size(&tcx.data_layout).bytes() - }) + (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes()) } fn subst_and_normalize<'a, 'tcx: 'a>( diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs index b70d0fb9c2c5d8a969848e4bdb63298afed2cbdc..cc6b702090314fc59fbface4ebe8e044ce818501 100644 --- a/src/librustc_mir/transform/type_check.rs +++ b/src/librustc_mir/transform/type_check.rs @@ -344,7 +344,7 @@ fn field_ty( variant_index, } => (&adt_def.variants[variant_index], substs), LvalueTy::Ty { ty } => match ty.sty { - ty::TyAdt(adt_def, substs) if adt_def.is_univariant() => { + ty::TyAdt(adt_def, substs) if !adt_def.is_enum() => { (&adt_def.variants[0], substs) } ty::TyClosure(def_id, substs) => { diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index 3b9772079adb9c90546cdec00bb07b5137010d8f..1852712a083751a02410e93fb3a3cbfe9a027590 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -384,7 +384,7 @@ fn open_drop_for_adt_contents(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) -> (BasicBlock, Unwind) { let (succ, unwind) = self.drop_ladder_bottom(); - if adt.variants.len() == 1 { + if !adt.is_enum() { let fields = self.move_paths_for_fields( self.lvalue, self.path, diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 6df40c34ec54ae7b820434e42a76ebf662c80d00..54828044de670a4636dd0db6b4ead816535aa85f 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -11,7 +11,7 @@ use llvm::{self, ValueRef, AttributePlace}; use base; use builder::Builder; -use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_usize}; +use common::{instance_ty, ty_fn_sig, C_usize}; use context::CrateContext; use cabi_x86; use cabi_x86_64; @@ -30,31 +30,34 @@ use cabi_nvptx; use cabi_nvptx64; use cabi_hexagon; -use machine::llalign_of_min; +use mir::lvalue::{Alignment, LvalueRef}; +use mir::operand::OperandValue; use type_::Type; -use type_of; +use type_of::{LayoutLlvmExt, PointerKind}; -use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size}; -use rustc_back::PanicStrategy; +use rustc::ty::layout::{self, Align, Size, TyLayout}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; use libc::c_uint; -use std::cmp; -use std::iter; +use std::{cmp, iter}; pub use syntax::abi::Abi; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; -#[derive(Clone, Copy, PartialEq, Debug)] -enum ArgKind { - /// Pass the argument directly using the normal converted - /// LLVM type or by coercing to another specified type - Direct, - /// Pass the argument indirectly via a hidden pointer - Indirect, - /// Ignore the argument (useful for empty struct) +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum PassMode { + /// Ignore the argument (useful for empty struct). Ignore, + /// Pass the argument directly. + Direct(ArgAttributes), + /// Pass a pair's elements directly in two arguments. + Pair(ArgAttributes, ArgAttributes), + /// Pass the argument after casting it, to either + /// a single uniform or a pair of registers. + Cast(CastTarget), + /// Pass the argument indirectly via a hidden pointer. + Indirect(ArgAttributes), } // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest @@ -96,20 +99,24 @@ fn for_each_kind(&self, mut f: F) where F: FnMut(llvm::Attribute) { /// A compact representation of LLVM attributes (at least those relevant for this module) /// that can be manipulated without interacting with LLVM's Attribute machinery. -#[derive(Copy, Clone, Debug, Default)] +#[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct ArgAttributes { regular: ArgAttribute, - dereferenceable_bytes: u64, + pointee_size: Size, + pointee_align: Option } impl ArgAttributes { - pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { - self.regular = self.regular | attr; - self + fn new() -> Self { + ArgAttributes { + regular: ArgAttribute::default(), + pointee_size: Size::from_bytes(0), + pointee_align: None, + } } - pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { - self.dereferenceable_bytes = bytes; + pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { + self.regular = self.regular | attr; self } @@ -118,24 +125,52 @@ pub fn contains(&self, attr: ArgAttribute) -> bool { } pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { + let mut regular = self.regular; unsafe { - self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); - if self.dereferenceable_bytes != 0 { - llvm::LLVMRustAddDereferenceableAttr(llfn, - idx.as_uint(), - self.dereferenceable_bytes); + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableAttr(llfn, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentAttr(llfn, + idx.as_uint(), + align.abi() as u32); + } + regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); } } pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) { + let mut regular = self.regular; unsafe { - self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); - if self.dereferenceable_bytes != 0 { - llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, - idx.as_uint(), - self.dereferenceable_bytes); + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; + } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, + idx.as_uint(), + align.abi() as u32); } + regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); } } } @@ -174,7 +209,32 @@ impl Reg { } impl Reg { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn align(&self, ccx: &CrateContext) -> Align { + let dl = ccx.data_layout(); + match self.kind { + RegKind::Integer => { + match self.size.bits() { + 1 => dl.i1_align, + 2...8 => dl.i8_align, + 9...16 => dl.i16_align, + 17...32 => dl.i32_align, + 33...64 => dl.i64_align, + 65...128 => dl.i128_align, + _ => bug!("unsupported integer: {:?}", self) + } + } + RegKind::Float => { + match self.size.bits() { + 32 => dl.f32_align, + 64 => dl.f64_align, + _ => bug!("unsupported float: {:?}", self) + } + } + RegKind::Vector => dl.vector_align(self.size) + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { match self.kind { RegKind::Integer => Type::ix(ccx, self.size.bits()), RegKind::Float => { @@ -193,7 +253,7 @@ fn llvm_type(&self, ccx: &CrateContext) -> Type { /// An argument passed entirely registers with the /// same kind (e.g. HFA / HVA on PPC64 and AArch64). -#[derive(Copy, Clone)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Uniform { pub unit: Reg, @@ -216,7 +276,11 @@ fn from(unit: Reg) -> Uniform { } impl Uniform { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn align(&self, ccx: &CrateContext) -> Align { + self.unit.align(ccx) + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { let llunit = self.unit.llvm_type(ccx); if self.total <= self.unit.size { @@ -248,106 +312,62 @@ pub trait LayoutExt<'tcx> { impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn is_aggregate(&self) -> bool { - match *self.layout { - Layout::Scalar { .. } | - Layout::RawNullablePointer { .. } | - Layout::CEnum { .. } | - Layout::Vector { .. } => false, - - Layout::Array { .. } | - Layout::FatPointer { .. } | - Layout::Univariant { .. } | - Layout::UntaggedUnion { .. } | - Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => true + match self.abi { + layout::Abi::Uninhabited | + layout::Abi::Scalar(_) | + layout::Abi::Vector => false, + layout::Abi::ScalarPair(..) | + layout::Abi::Aggregate { .. } => true } } fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { - match *self.layout { - // The primitives for this algorithm. - Layout::Scalar { value, .. } | - Layout::RawNullablePointer { value, .. } => { - let kind = match value { - layout::Int(_) | + match self.abi { + layout::Abi::Uninhabited => None, + + // The primitive for this algorithm. + layout::Abi::Scalar(ref scalar) => { + let kind = match scalar.value { + layout::Int(..) | layout::Pointer => RegKind::Integer, layout::F32 | layout::F64 => RegKind::Float }; Some(Reg { kind, - size: self.size(ccx) + size: self.size }) } - Layout::CEnum { .. } => { - Some(Reg { - kind: RegKind::Integer, - size: self.size(ccx) - }) - } - - Layout::Vector { .. } => { + layout::Abi::Vector => { Some(Reg { kind: RegKind::Vector, - size: self.size(ccx) + size: self.size }) } - Layout::Array { count, .. } => { - if count > 0 { - self.field(ccx, 0).homogeneous_aggregate(ccx) - } else { - None - } - } - - Layout::Univariant { ref variant, .. } => { - let mut unaligned_offset = Size::from_bytes(0); + layout::Abi::ScalarPair(..) | + layout::Abi::Aggregate { .. } => { + let mut total = Size::from_bytes(0); let mut result = None; - for i in 0..self.field_count() { - if unaligned_offset != variant.offsets[i] { - return None; - } - - let field = self.field(ccx, i); - match (result, field.homogeneous_aggregate(ccx)) { - // The field itself must be a homogeneous aggregate. - (_, None) => return None, - // If this is the first field, record the unit. - (None, Some(unit)) => { - result = Some(unit); - } - // For all following fields, the unit must be the same. - (Some(prev_unit), Some(unit)) => { - if prev_unit != unit { - return None; - } + let is_union = match self.fields { + layout::FieldPlacement::Array { count, .. } => { + if count > 0 { + return self.field(ccx, 0).homogeneous_aggregate(ccx); + } else { + return None; } } + layout::FieldPlacement::Union(_) => true, + layout::FieldPlacement::Arbitrary { .. } => false + }; - // Keep track of the offset (without padding). - let size = field.size(ccx); - match unaligned_offset.checked_add(size, ccx) { - Some(offset) => unaligned_offset = offset, - None => return None + for i in 0..self.fields.count() { + if !is_union && total != self.fields.offset(i) { + return None; } - } - - // There needs to be no padding. - if unaligned_offset != self.size(ccx) { - None - } else { - result - } - } - Layout::UntaggedUnion { .. } => { - let mut max = Size::from_bytes(0); - let mut result = None; - - for i in 0..self.field_count() { let field = self.field(ccx, i); match (result, field.homogeneous_aggregate(ccx)) { // The field itself must be a homogeneous aggregate. @@ -365,28 +385,26 @@ fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option } // Keep track of the offset (without padding). - let size = field.size(ccx); - if size > max { - max = size; + let size = field.size; + if is_union { + total = cmp::max(total, size); + } else { + total += size; } } // There needs to be no padding. - if max != self.size(ccx) { + if total != self.size { None } else { result } } - - // Rust-specific types, which we can ignore for C ABIs. - Layout::FatPointer { .. } | - Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => None } } } +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum CastTarget { Uniform(Uniform), Pair(Reg, Reg) @@ -405,7 +423,28 @@ fn from(uniform: Uniform) -> CastTarget { } impl CastTarget { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn size(&self, ccx: &CrateContext) -> Size { + match *self { + CastTarget::Uniform(u) => u.total, + CastTarget::Pair(a, b) => { + (a.size.abi_align(a.align(ccx)) + b.size) + .abi_align(self.align(ccx)) + } + } + } + + pub fn align(&self, ccx: &CrateContext) -> Align { + match *self { + CastTarget::Uniform(u) => u.align(ccx), + CastTarget::Pair(a, b) => { + ccx.data_layout().aggregate_align + .max(a.align(ccx)) + .max(b.align(ccx)) + } + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { match *self { CastTarget::Uniform(u) => u.llvm_type(ccx), CastTarget::Pair(a, b) => { @@ -418,131 +457,118 @@ fn llvm_type(&self, ccx: &CrateContext) -> Type { } } -/// Information about how a specific C type -/// should be passed to or returned from a function -/// -/// This is borrowed from clang's ABIInfo.h -#[derive(Clone, Copy, Debug)] +/// Information about how to pass an argument to, +/// or return a value from, a function, under some ABI. +#[derive(Debug)] pub struct ArgType<'tcx> { - kind: ArgKind, pub layout: TyLayout<'tcx>, - /// Coerced LLVM Type - pub cast: Option, - /// Dummy argument, which is emitted before the real argument - pub pad: Option, - /// LLVM attributes of argument - pub attrs: ArgAttributes + + /// Dummy argument, which is emitted before the real argument. + pub pad: Option, + + pub mode: PassMode, } impl<'a, 'tcx> ArgType<'tcx> { fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { ArgType { - kind: ArgKind::Direct, layout, - cast: None, pad: None, - attrs: ArgAttributes::default() + mode: PassMode::Direct(ArgAttributes::new()), } } - pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) { - assert_eq!(self.kind, ArgKind::Direct); - - // Wipe old attributes, likely not valid through indirection. - self.attrs = ArgAttributes::default(); + pub fn make_indirect(&mut self) { + assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new())); - let llarg_sz = self.layout.size(ccx).bytes(); + // Start with fresh attributes for the pointer. + let mut attrs = ArgAttributes::new(); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture - self.attrs.set(ArgAttribute::NoAlias) - .set(ArgAttribute::NoCapture) - .set_dereferenceable(llarg_sz); - - self.kind = ArgKind::Indirect; + attrs.set(ArgAttribute::NoAlias) + .set(ArgAttribute::NoCapture) + .set(ArgAttribute::NonNull); + attrs.pointee_size = self.layout.size; + // FIXME(eddyb) We should be doing this, but at least on + // i686-pc-windows-msvc, it results in wrong stack offsets. + // attrs.pointee_align = Some(self.layout.align); + + self.mode = PassMode::Indirect(attrs); } - pub fn ignore(&mut self) { - assert_eq!(self.kind, ArgKind::Direct); - self.kind = ArgKind::Ignore; + pub fn make_indirect_byval(&mut self) { + self.make_indirect(); + match self.mode { + PassMode::Indirect(ref mut attrs) => { + attrs.set(ArgAttribute::ByVal); + } + _ => bug!() + } } pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - let (i, signed) = match *self.layout { - Layout::Scalar { value, .. } => { - match value { - layout::Int(i) => { - if self.layout.ty.is_integral() { - (i, self.layout.ty.is_signed()) + if let layout::Abi::Scalar(ref scalar) = self.layout.abi { + if let layout::Int(i, signed) = scalar.value { + if i.size().bits() < bits { + if let PassMode::Direct(ref mut attrs) = self.mode { + attrs.set(if signed { + ArgAttribute::SExt } else { - return; - } + ArgAttribute::ZExt + }); } - _ => return } } - - // Rust enum types that map onto C enums also need to follow - // the target ABI zero-/sign-extension rules. - Layout::CEnum { discr, signed, .. } => (discr, signed), - - _ => return - }; - - if i.size().bits() < bits { - self.attrs.set(if signed { - ArgAttribute::SExt - } else { - ArgAttribute::ZExt - }); } } - pub fn cast_to>(&mut self, ccx: &CrateContext, target: T) { - self.cast = Some(target.into().llvm_type(ccx)); + pub fn cast_to>(&mut self, target: T) { + assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new())); + self.mode = PassMode::Cast(target.into()); } - pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) { - self.pad = Some(reg.llvm_type(ccx)); + pub fn pad_with(&mut self, reg: Reg) { + self.pad = Some(reg); } pub fn is_indirect(&self) -> bool { - self.kind == ArgKind::Indirect + match self.mode { + PassMode::Indirect(_) => true, + _ => false + } } pub fn is_ignore(&self) -> bool { - self.kind == ArgKind::Ignore + self.mode == PassMode::Ignore } /// Get the LLVM type for an lvalue of the original Rust type of /// this argument/return, i.e. the result of `type_of::type_of`. pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { - type_of::type_of(ccx, self.layout.ty) + self.layout.llvm_type(ccx) } /// Store a direct/indirect value described by this ArgType into a /// lvalue for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) { + pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: LvalueRef<'tcx>) { if self.is_ignore() { return; } let ccx = bcx.ccx; if self.is_indirect() { - let llsz = C_usize(ccx, self.layout.size(ccx).bytes()); - let llalign = self.layout.align(ccx).abi(); - base::call_memcpy(bcx, dst, val, llsz, llalign as u32); - } else if let Some(ty) = self.cast { + OperandValue::Ref(val, Alignment::AbiAligned).store(bcx, dst) + } else if let PassMode::Cast(cast) = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bcx.pointercast(dst, ty.ptr_to()); - let llalign = self.layout.align(ccx).abi(); - bcx.store(val, cast_dst, Some(llalign as u32)); + let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to()); + bcx.store(val, cast_dst, Some(self.layout.align)); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -559,40 +585,45 @@ pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.alloca(ty, "abi_cast", None); - base::Lifetime::Start.call(bcx, llscratch); + let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", cast.align(ccx)); + let scratch_size = cast.size(ccx); + bcx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... bcx.store(val, llscratch, None); // ...and then memcpy it to the intended destination. base::call_memcpy(bcx, - bcx.pointercast(dst, Type::i8p(ccx)), + bcx.pointercast(dst.llval, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), - C_usize(ccx, self.layout.size(ccx).bytes()), - cmp::min(self.layout.align(ccx).abi() as u32, - llalign_of_min(ccx, ty))); + C_usize(ccx, self.layout.size.bytes()), + self.layout.align.min(cast.align(ccx))); - base::Lifetime::End.call(bcx, llscratch); + bcx.lifetime_end(llscratch, scratch_size); } } else { - if self.layout.ty == ccx.tcx().types.bool { - val = bcx.zext(val, Type::i8(ccx)); - } - bcx.store(val, dst, None); + OperandValue::Immediate(val).store(bcx, dst); } } - pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) { + pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: LvalueRef<'tcx>) { if self.pad.is_some() { *idx += 1; } - if self.is_ignore() { - return; + let mut next = || { + let val = llvm::get_param(bcx.llfn(), *idx as c_uint); + *idx += 1; + val + }; + match self.mode { + PassMode::Ignore => {}, + PassMode::Pair(..) => { + OperandValue::Pair(next(), next()).store(bcx, dst); + } + PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => { + self.store(bcx, next(), dst); + } } - let val = llvm::get_param(bcx.llfn(), *idx as c_uint); - *idx += 1; - self.store(bcx, val, dst); } } @@ -601,7 +632,7 @@ pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueR /// /// I will do my best to describe this structure, but these /// comments are reverse-engineered and may be inaccurate. -NDM -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct FnType<'tcx> { /// The LLVM types of each argument. pub args: Vec>, @@ -620,14 +651,14 @@ pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>) let fn_ty = instance_ty(ccx.tcx(), &instance); let sig = ty_fn_sig(ccx, fn_ty); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); - Self::new(ccx, sig, &[]) + FnType::new(ccx, sig, &[]) } pub fn new(ccx: &CrateContext<'a, 'tcx>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); - fn_ty.adjust_for_abi(ccx, sig); + fn_ty.adjust_for_abi(ccx, sig.abi); fn_ty } @@ -636,8 +667,23 @@ pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx> { let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); // Don't pass the vtable, it's not an argument of the virtual fn. - fn_ty.args[1].ignore(); - fn_ty.adjust_for_abi(ccx, sig); + { + let self_arg = &mut fn_ty.args[0]; + match self_arg.mode { + PassMode::Pair(data_ptr, _) => { + self_arg.mode = PassMode::Direct(data_ptr); + } + _ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg) + } + + let pointee = self_arg.layout.ty.builtin_deref(true, ty::NoPreference) + .unwrap_or_else(|| { + bug!("FnType::new_vtable: non-pointer self {:?}", self_arg) + }).ty; + let fat_ptr_ty = ccx.tcx().mk_mut_ptr(pointee); + self_arg.layout = ccx.layout_of(fat_ptr_ty).field(ccx, 0); + } + fn_ty.adjust_for_abi(ccx, sig.abi); fn_ty } @@ -702,120 +748,113 @@ pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>, _ => false }; - let arg_of = |ty: Ty<'tcx>, is_return: bool| { - let mut arg = ArgType::new(ccx.layout_of(ty)); - if ty.is_bool() { - arg.attrs.set(ArgAttribute::ZExt); - } else { - if arg.layout.size(ccx).bytes() == 0 { - // For some forsaken reason, x86_64-pc-windows-gnu - // doesn't ignore zero-sized struct arguments. - // The same is true for s390x-unknown-linux-gnu. - if is_return || rust_abi || - (!win_x64_gnu && !linux_s390x) { - arg.ignore(); - } - } + // Handle safe Rust thin and fat pointers. + let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, + scalar: &layout::Scalar, + layout: TyLayout<'tcx>, + offset: Size, + is_return: bool| { + // Booleans are always an i1 that needs to be zero-extended. + if scalar.is_bool() { + attrs.set(ArgAttribute::ZExt); + return; } - arg - }; - - let ret_ty = sig.output(); - let mut ret = arg_of(ret_ty, true); - if !type_is_fat_ptr(ccx, ret_ty) { - // The `noalias` attribute on the return value is useful to a - // function ptr caller. - if ret_ty.is_box() { - // `Box` pointer return values never alias because ownership - // is transferred - ret.attrs.set(ArgAttribute::NoAlias); + // Only pointer types handled below. + if scalar.value != layout::Pointer { + return; } - // We can also mark the return value as `dereferenceable` in certain cases - match ret_ty.sty { - // These are not really pointers but pairs, (pointer, len) - ty::TyRef(_, ty::TypeAndMut { ty, .. }) => { - ret.attrs.set_dereferenceable(ccx.size_of(ty)); - } - ty::TyAdt(def, _) if def.is_box() => { - ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty())); + if scalar.valid_range.start < scalar.valid_range.end { + if scalar.valid_range.start > 0 { + attrs.set(ArgAttribute::NonNull); } - _ => {} } - } - let mut args = Vec::with_capacity(inputs.len() + extra_args.len()); + if let Some(pointee) = layout.pointee_info_at(ccx, offset) { + if let Some(kind) = pointee.safe { + attrs.pointee_size = pointee.size; + attrs.pointee_align = Some(pointee.align); - // Handle safe Rust thin and fat pointers. - let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty { - // `Box` pointer parameters never alias because ownership is transferred - ty::TyAdt(def, _) if def.is_box() => { - arg.attrs.set(ArgAttribute::NoAlias); - Some(ty.boxed_ty()) - } + // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions + // with align attributes, and those calls later block optimizations. + if !is_return { + attrs.pointee_align = None; + } - ty::TyRef(_, mt) => { - // `&mut` pointer parameters never alias other parameters, or mutable global data - // - // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as - // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely - // on memory dependencies rather than pointer equality - let is_freeze = ccx.shared().type_is_freeze(mt.ty); - - let no_alias_is_safe = - if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || - ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { - // Mutable refrences or immutable shared references - mt.mutbl == hir::MutMutable || is_freeze - } else { - // Only immutable shared references - mt.mutbl != hir::MutMutable && is_freeze + // `Box` pointer parameters never alias because ownership is transferred + // `&mut` pointer parameters never alias other parameters, + // or mutable global data + // + // `&T` where `T` contains no `UnsafeCell` is immutable, + // and can be marked as both `readonly` and `noalias`, as + // LLVM's definition of `noalias` is based solely on memory + // dependencies rather than pointer equality + let no_alias = match kind { + PointerKind::Shared => false, + PointerKind::Frozen | PointerKind::UniqueOwned => true, + PointerKind::UniqueBorrowed => !is_return }; + if no_alias { + attrs.set(ArgAttribute::NoAlias); + } - if no_alias_is_safe { - arg.attrs.set(ArgAttribute::NoAlias); + if kind == PointerKind::Frozen && !is_return { + attrs.set(ArgAttribute::ReadOnly); + } } + } + }; - if mt.mutbl == hir::MutImmutable && is_freeze { - arg.attrs.set(ArgAttribute::ReadOnly); + let arg_of = |ty: Ty<'tcx>, is_return: bool| { + let mut arg = ArgType::new(ccx.layout_of(ty)); + if arg.layout.is_zst() { + // For some forsaken reason, x86_64-pc-windows-gnu + // doesn't ignore zero-sized struct arguments. + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) { + arg.mode = PassMode::Ignore; } - - Some(mt.ty) } - _ => None - }; - for ty in inputs.iter().chain(extra_args.iter()) { - let mut arg = arg_of(ty, false); - - if let ty::layout::FatPointer { .. } = *arg.layout { - let mut data = ArgType::new(arg.layout.field(ccx, 0)); - let mut info = ArgType::new(arg.layout.field(ccx, 1)); - - if let Some(inner) = rust_ptr_attrs(ty, &mut data) { - data.attrs.set(ArgAttribute::NonNull); - if ccx.tcx().struct_tail(inner).is_trait() { - // vtables can be safely marked non-null, readonly - // and noalias. - info.attrs.set(ArgAttribute::NonNull); - info.attrs.set(ArgAttribute::ReadOnly); - info.attrs.set(ArgAttribute::NoAlias); - } + // FIXME(eddyb) other ABIs don't have logic for scalar pairs. + if !is_return && rust_abi { + if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi { + let mut a_attrs = ArgAttributes::new(); + let mut b_attrs = ArgAttributes::new(); + adjust_for_rust_scalar(&mut a_attrs, + a, + arg.layout, + Size::from_bytes(0), + false); + adjust_for_rust_scalar(&mut b_attrs, + b, + arg.layout, + a.value.size(ccx).abi_align(b.value.align(ccx)), + false); + arg.mode = PassMode::Pair(a_attrs, b_attrs); + return arg; } - args.push(data); - args.push(info); - } else { - if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { - arg.attrs.set_dereferenceable(ccx.size_of(inner)); + } + + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if let PassMode::Direct(ref mut attrs) = arg.mode { + adjust_for_rust_scalar(attrs, + scalar, + arg.layout, + Size::from_bytes(0), + is_return); } - args.push(arg); } - } + + arg + }; FnType { - args, - ret, + ret: arg_of(sig.output(), true), + args: inputs.iter().chain(extra_args.iter()).map(|ty| { + arg_of(ty, false) + }).collect(), variadic: sig.variadic, cconv, } @@ -823,63 +862,38 @@ pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>, fn adjust_for_abi(&mut self, ccx: &CrateContext<'a, 'tcx>, - sig: ty::FnSig<'tcx>) { - let abi = sig.abi; + abi: Abi) { if abi == Abi::Unadjusted { return } if abi == Abi::Rust || abi == Abi::RustCall || abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { let fixup = |arg: &mut ArgType<'tcx>| { - if !arg.layout.is_aggregate() { - return; - } + if arg.is_ignore() { return; } - let size = arg.layout.size(ccx); - - if let Some(unit) = arg.layout.homogeneous_aggregate(ccx) { - // Replace newtypes with their inner-most type. - if unit.size == size { - // Needs a cast as we've unpacked a newtype. - arg.cast_to(ccx, unit); - return; - } - - // Pairs of floats. - if unit.kind == RegKind::Float { - if unit.size.checked_mul(2, ccx) == Some(size) { - // FIXME(eddyb) This should be using Uniform instead of a pair, - // but the resulting [2 x float/double] breaks emscripten. - // See https://github.com/kripken/emscripten-fastcomp/issues/178. - arg.cast_to(ccx, CastTarget::Pair(unit, unit)); - return; - } - } + match arg.layout.abi { + layout::Abi::Aggregate { .. } => {} + _ => return } + let size = arg.layout.size; if size > layout::Pointer.size(ccx) { - arg.make_indirect(ccx); + arg.make_indirect(); } else { // We want to pass small aggregates as immediates, but using // a LLVM aggregate type for this leads to bad optimizations, // so we pick an appropriately sized integer type instead. - arg.cast_to(ccx, Reg { + arg.cast_to(Reg { kind: RegKind::Integer, size }); } }; - // Fat pointers are returned by-value. - if !self.ret.is_ignore() { - if !type_is_fat_ptr(ccx, sig.output()) { - fixup(&mut self.ret); - } - } + fixup(&mut self.ret); for arg in &mut self.args { - if arg.is_ignore() { continue; } fixup(arg); } - if self.ret.is_indirect() { - self.ret.attrs.set(ArgAttribute::StructRet); + if let PassMode::Indirect(ref mut attrs) = self.ret.mode { + attrs.set(ArgAttribute::StructRet); } return; } @@ -896,7 +910,7 @@ fn adjust_for_abi(&mut self, "x86_64" => if abi == Abi::SysV64 { cabi_x86_64::compute_abi_info(ccx, self); } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows { - cabi_x86_win64::compute_abi_info(ccx, self); + cabi_x86_win64::compute_abi_info(self); } else { cabi_x86_64::compute_abi_info(ccx, self); }, @@ -909,51 +923,52 @@ fn adjust_for_abi(&mut self, "s390x" => cabi_s390x::compute_abi_info(ccx, self), "asmjs" => cabi_asmjs::compute_abi_info(ccx, self), "wasm32" => cabi_asmjs::compute_abi_info(ccx, self), - "msp430" => cabi_msp430::compute_abi_info(ccx, self), + "msp430" => cabi_msp430::compute_abi_info(self), "sparc" => cabi_sparc::compute_abi_info(ccx, self), "sparc64" => cabi_sparc64::compute_abi_info(ccx, self), - "nvptx" => cabi_nvptx::compute_abi_info(ccx, self), - "nvptx64" => cabi_nvptx64::compute_abi_info(ccx, self), - "hexagon" => cabi_hexagon::compute_abi_info(ccx, self), + "nvptx" => cabi_nvptx::compute_abi_info(self), + "nvptx64" => cabi_nvptx64::compute_abi_info(self), + "hexagon" => cabi_hexagon::compute_abi_info(self), a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)) } - if self.ret.is_indirect() { - self.ret.attrs.set(ArgAttribute::StructRet); + if let PassMode::Indirect(ref mut attrs) = self.ret.mode { + attrs.set(ArgAttribute::StructRet); } } pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { let mut llargument_tys = Vec::new(); - let llreturn_ty = if self.ret.is_ignore() { - Type::void(ccx) - } else if self.ret.is_indirect() { - llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); - Type::void(ccx) - } else { - self.ret.cast.unwrap_or_else(|| { - type_of::immediate_type_of(ccx, self.ret.layout.ty) - }) + let llreturn_ty = match self.ret.mode { + PassMode::Ignore => Type::void(ccx), + PassMode::Direct(_) | PassMode::Pair(..) => { + self.ret.layout.immediate_llvm_type(ccx) + } + PassMode::Cast(cast) => cast.llvm_type(ccx), + PassMode::Indirect(_) => { + llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); + Type::void(ccx) + } }; for arg in &self.args { - if arg.is_ignore() { - continue; - } // add padding if let Some(ty) = arg.pad { - llargument_tys.push(ty); + llargument_tys.push(ty.llvm_type(ccx)); } - let llarg_ty = if arg.is_indirect() { - arg.memory_ty(ccx).ptr_to() - } else { - arg.cast.unwrap_or_else(|| { - type_of::immediate_type_of(ccx, arg.layout.ty) - }) + let llarg_ty = match arg.mode { + PassMode::Ignore => continue, + PassMode::Direct(_) => arg.layout.immediate_llvm_type(ccx), + PassMode::Pair(..) => { + llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 0)); + llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 1)); + continue; + } + PassMode::Cast(cast) => cast.llvm_type(ccx), + PassMode::Indirect(_) => arg.memory_ty(ccx).ptr_to(), }; - llargument_tys.push(llarg_ty); } @@ -965,31 +980,61 @@ pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { } pub fn apply_attrs_llfn(&self, llfn: ValueRef) { - let mut i = if self.ret.is_indirect() { 1 } else { 0 }; - if !self.ret.is_ignore() { - self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); + let mut i = 0; + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); + i += 1; + }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn); + } + PassMode::Indirect(ref attrs) => apply(attrs), + _ => {} } - i += 1; for arg in &self.args { - if !arg.is_ignore() { - if arg.pad.is_some() { i += 1; } - arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn); - i += 1; + if arg.pad.is_some() { + apply(&ArgAttributes::new()); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | + PassMode::Indirect(ref attrs) => apply(attrs), + PassMode::Pair(ref a, ref b) => { + apply(a); + apply(b); + } + PassMode::Cast(_) => apply(&ArgAttributes::new()), } } } pub fn apply_attrs_callsite(&self, callsite: ValueRef) { - let mut i = if self.ret.is_indirect() { 1 } else { 0 }; - if !self.ret.is_ignore() { - self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); + let mut i = 0; + let mut apply = |attrs: &ArgAttributes| { + attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); + i += 1; + }; + match self.ret.mode { + PassMode::Direct(ref attrs) => { + attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite); + } + PassMode::Indirect(ref attrs) => apply(attrs), + _ => {} } - i += 1; for arg in &self.args { - if !arg.is_ignore() { - if arg.pad.is_some() { i += 1; } - arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); - i += 1; + if arg.pad.is_some() { + apply(&ArgAttributes::new()); + } + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(ref attrs) | + PassMode::Indirect(ref attrs) => apply(attrs), + PassMode::Pair(ref a, ref b) => { + apply(a); + apply(b); + } + PassMode::Cast(_) => apply(&ArgAttributes::new()), } } @@ -998,7 +1043,3 @@ pub fn apply_attrs_callsite(&self, callsite: ValueRef) { } } } - -pub fn align_up_to(off: u64, a: u64) -> u64 { - (off + a - 1) / a * a -} diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs deleted file mode 100644 index b06f8e4e671162bc007e4a20392c3d9f43ddb601..0000000000000000000000000000000000000000 --- a/src/librustc_trans/adt.rs +++ /dev/null @@ -1,497 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Representation of Algebraic Data Types -//! -//! This module determines how to represent enums, structs, and tuples -//! based on their monomorphized types; it is responsible both for -//! choosing a representation and translating basic operations on -//! values of those types. (Note: exporting the representations for -//! debuggers is handled in debuginfo.rs, not here.) -//! -//! Note that the interface treats everything as a general case of an -//! enum, so structs/tuples/etc. have one pseudo-variant with -//! discriminant 0; i.e., as if they were a univariant enum. -//! -//! Having everything in one place will enable improvements to data -//! structure representation; possibilities include: -//! -//! - User-specified alignment (e.g., cacheline-aligning parts of -//! concurrently accessed data structures); LLVM can't represent this -//! directly, so we'd have to insert padding fields in any structure -//! that might contain one and adjust GEP indices accordingly. See -//! issue #4578. -//! -//! - Store nested enums' discriminants in the same word. Rather, if -//! some variants start with enums, and those enums representations -//! have unused alignment padding between discriminant and body, the -//! outer enum's discriminant can be stored there and those variants -//! can start at offset 0. Kind of fancy, and might need work to -//! make copies of the inner enum type cooperate, but it could help -//! with `Option` or `Result` wrapped around another enum. -//! -//! - Tagged pointers would be neat, but given that any type can be -//! used unboxed and any field can have pointers (including mutable) -//! taken to it, implementing them for Rust seems difficult. - -use std; - -use llvm::{ValueRef, True, IntEQ, IntNE}; -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, LayoutTyper}; -use common::*; -use builder::Builder; -use base; -use machine; -use monomorphize; -use type_::Type; -use type_of; - -use mir::lvalue::Alignment; - -/// Given an enum, struct, closure, or tuple, extracts fields. -/// Treats closures as a struct with one variant. -/// `empty_if_no_variants` is a switch to deal with empty enums. -/// If true, `variant_index` is disregarded and an empty Vec returned in this case. -pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - variant_index: usize, - empty_if_no_variants: bool) -> Vec> { - match t.sty { - ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => { - Vec::default() - }, - ty::TyAdt(ref def, ref substs) => { - def.variants[variant_index].fields.iter().map(|f| { - monomorphize::field_ty(cx.tcx(), substs, f) - }).collect::>() - }, - ty::TyTuple(fields, _) => fields.to_vec(), - ty::TyClosure(def_id, substs) => { - if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);} - substs.upvar_tys(def_id, cx.tcx()).collect() - }, - ty::TyGenerator(def_id, substs, _) => { - if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);} - substs.field_tys(def_id, cx.tcx()).map(|t| { - cx.tcx().fully_normalize_associated_types_in(&t) - }).collect() - }, - _ => bug!("{} is not a type that can have fields.", t) - } -} - -/// LLVM-level types are a little complicated. -/// -/// C-like enums need to be actual ints, not wrapped in a struct, -/// because that changes the ABI on some platforms (see issue #10308). -/// -/// For nominal types, in some cases, we need to use LLVM named structs -/// and fill in the actual contents in a second pass to prevent -/// unbounded recursion; see also the comments in `trans::type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - generic_type_of(cx, t, None) -} - -pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, name: &str) -> Type { - generic_type_of(cx, t, Some(name)) -} - -pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, llty: &mut Type) { - let l = cx.layout_of(t); - debug!("finish_type_of: {} with layout {:#?}", t, l); - match *l { - layout::CEnum { .. } | layout::General { .. } - | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { } - layout::Univariant { ..} - | layout::StructWrappedNullablePointer { .. } => { - let (nonnull_variant_index, nonnull_variant, packed) = match *l { - layout::Univariant { ref variant, .. } => (0, variant, variant.packed), - layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => - (nndiscr, nonnull, nonnull.packed), - _ => unreachable!() - }; - let fields = compute_fields(cx, t, nonnull_variant_index as usize, true); - llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant), - packed) - }, - _ => bug!("This function cannot handle {} with layout {:#?}", t, l) - } -} - -fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - name: Option<&str>) -> Type { - let l = cx.layout_of(t); - debug!("adt::generic_type_of t: {:?} name: {:?}", t, name); - match *l { - layout::CEnum { discr, .. } => Type::from_integer(cx, discr), - layout::RawNullablePointer { nndiscr, .. } => { - let (def, substs) = match t.sty { - ty::TyAdt(d, s) => (d, s), - _ => bug!("{} is not an ADT", t) - }; - let nnty = monomorphize::field_ty(cx.tcx(), substs, - &def.variants[nndiscr as usize].fields[0]); - if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) { - Type::i8p(cx) - } else { - type_of::type_of(cx, nnty) - } - } - layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { - let fields = compute_fields(cx, t, nndiscr as usize, false); - match name { - None => { - Type::struct_(cx, &struct_llfields(cx, &fields, nonnull), - nonnull.packed) - } - Some(name) => { - Type::named_struct(cx, name) - } - } - } - layout::Univariant { ref variant, .. } => { - // Note that this case also handles empty enums. - // Thus the true as the final parameter here. - let fields = compute_fields(cx, t, 0, true); - match name { - None => { - let fields = struct_llfields(cx, &fields, &variant); - Type::struct_(cx, &fields, variant.packed) - } - Some(name) => { - // Hypothesis: named_struct's can never need a - // drop flag. (... needs validation.) - Type::named_struct(cx, name) - } - } - } - layout::UntaggedUnion { ref variants, .. }=> { - // Use alignment-sized ints to fill all the union storage. - let size = variants.stride().bytes(); - let align = variants.align.abi(); - let fill = union_fill(cx, size, align); - match name { - None => { - Type::struct_(cx, &[fill], variants.packed) - } - Some(name) => { - let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], variants.packed); - llty - } - } - } - layout::General { discr, size, align, primitive_align, .. } => { - // We need a representation that has: - // * The alignment of the most-aligned field - // * The size of the largest variant (rounded up to that alignment) - // * No alignment padding anywhere any variant has actual data - // (currently matters only for enums small enough to be immediate) - // * The discriminant in an obvious place. - // - // So we start with the discriminant, pad it up to the alignment with - // more of its own type, then use alignment-sized ints to get the rest - // of the size. - let size = size.bytes(); - let align = align.abi(); - let primitive_align = primitive_align.abi(); - assert!(align <= std::u32::MAX as u64); - let discr_ty = Type::from_integer(cx, discr); - let discr_size = discr.size().bytes(); - let padded_discr_size = roundup(discr_size, align as u32); - let variant_part_size = size-padded_discr_size; - let variant_fill = union_fill(cx, variant_part_size, primitive_align); - - assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32); - assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly - let fields: Vec = - [discr_ty, - Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), - variant_fill].iter().cloned().collect(); - match name { - None => { - Type::struct_(cx, &fields, false) - } - Some(name) => { - let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&fields, false); - llty - } - } - } - _ => bug!("Unsupported type {} represented as {:#?}", t, l) - } -} - -fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { - assert_eq!(size%align, 0); - assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); - let align_units = size/align; - let layout_align = layout::Align::from_bytes(align, align).unwrap(); - if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) { - Type::array(&Type::from_integer(cx, ity), align_units) - } else { - Type::array(&Type::vector(&Type::i32(cx), align/4), - align_units) - } -} - - -// Double index to account for padding (FieldPath already uses `Struct::memory_index`) -fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec { - discrfield.iter().map(|&i| (i as usize) << 1).collect::>() -} - - -// Lookup `Struct::memory_index` and double it to account for padding -pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize { - (variant.memory_index[index] as usize) << 1 -} - - -pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec>, - variant: &layout::Struct) -> Vec { - debug!("struct_llfields: variant: {:?}", variant); - let mut first_field = true; - let mut min_offset = 0; - let mut result: Vec = Vec::with_capacity(field_tys.len() * 2); - let field_iter = variant.field_index_by_increasing_offset().map(|i| { - (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) }); - for (index, ty, target_offset) in field_iter { - if first_field { - debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}", - index, ty, min_offset, target_offset); - first_field = false; - } else { - assert!(target_offset >= min_offset); - let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset }; - result.push(Type::array(&Type::i8(cx), padding_bytes)); - debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}", - index, ty, padding_bytes, min_offset, target_offset); - } - let llty = type_of::in_memory_type_of(cx, ty); - result.push(llty); - let layout = cx.layout_of(ty); - let target_size = layout.size(&cx.tcx().data_layout).bytes(); - min_offset = target_offset + target_size; - } - if variant.sized && !field_tys.is_empty() { - if variant.stride().bytes() < min_offset { - bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(), - min_offset); - } - let padding_bytes = variant.stride().bytes() - min_offset; - debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n", - padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes()); - result.push(Type::array(&Type::i8(cx), padding_bytes)); - assert!(result.len() == (field_tys.len() * 2)); - } else { - debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n", - min_offset, variant.min_size.bytes(), variant.stride().bytes()); - } - - result -} - -pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool { - match *l { - layout::CEnum { signed, .. }=> signed, - _ => false, - } -} - -/// Obtain the actual discriminant of a value. -pub fn trans_get_discr<'a, 'tcx>( - bcx: &Builder<'a, 'tcx>, - t: Ty<'tcx>, - scrutinee: ValueRef, - alignment: Alignment, - cast_to: Option, - range_assert: bool -) -> ValueRef { - debug!("trans_get_discr t: {:?}", t); - let l = bcx.ccx.layout_of(t); - - let val = match *l { - layout::CEnum { discr, min, max, .. } => { - load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert) - } - layout::General { discr, ref variants, .. } => { - let ptr = bcx.struct_gep(scrutinee, 0); - load_discr(bcx, discr, ptr, alignment, - 0, variants.len() as u64 - 1, - range_assert) - } - layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), - layout::RawNullablePointer { nndiscr, .. } => { - let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - let discr = bcx.load(scrutinee, alignment.to_align()); - bcx.icmp(cmp, discr, C_null(val_ty(discr))) - } - layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { - struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment) - }, - _ => bug!("{} is not an enum", t) - }; - match cast_to { - None => val, - Some(llty) => bcx.intcast(val, llty, is_discr_signed(&l)) - } -} - -fn struct_wrapped_nullable_bitdiscr( - bcx: &Builder, - nndiscr: u64, - discrfield: &layout::FieldPath, - scrutinee: ValueRef, - alignment: Alignment, -) -> ValueRef { - let path = struct_llfields_path(discrfield); - let llptrptr = bcx.gepi(scrutinee, &path); - let llptr = bcx.load(llptrptr, alignment.to_align()); - let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; - bcx.icmp(cmp, llptr, C_null(val_ty(llptr))) -} - -/// Helper for cases where the discriminant is simply loaded. -fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, - alignment: Alignment, min: u64, max: u64, - range_assert: bool) - -> ValueRef { - let llty = Type::from_integer(bcx.ccx, ity); - assert_eq!(val_ty(ptr), llty.ptr_to()); - let bits = ity.size().bits(); - assert!(bits <= 64); - let bits = bits as usize; - let mask = !0u64 >> (64 - bits); - // For a (max) discr of -1, max will be `-1 as usize`, which overflows. - // However, that is fine here (it would still represent the full range), - if max.wrapping_add(1) & mask == min & mask || !range_assert { - // i.e., if the range is everything. The lo==hi case would be - // rejected by the LLVM verifier (it would mean either an - // empty set, which is impossible, or the entire range of the - // type, which is pointless). - bcx.load(ptr, alignment.to_align()) - } else { - // llvm::ConstantRange can deal with ranges that wrap around, - // so an overflow on (max + 1) is fine. - bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True, - alignment.to_align()) - } -} - -/// Set the discriminant for a new value of the given case of the given -/// representation. -pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: u64) { - let l = bcx.ccx.layout_of(t); - match *l { - layout::CEnum{ discr, min, max, .. } => { - assert_discr_in_range(min, max, to); - bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), - val, None); - } - layout::General{ discr, .. } => { - bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64), - bcx.struct_gep(val, 0), None); - } - layout::Univariant { .. } - | layout::UntaggedUnion { .. } - | layout::Vector { .. } => { - assert_eq!(to, 0); - } - layout::RawNullablePointer { nndiscr, .. } => { - if to != nndiscr { - let llptrty = val_ty(val).element_type(); - bcx.store(C_null(llptrty), val, None); - } - } - layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => { - if to != nndiscr { - if target_sets_discr_via_memset(bcx) { - // Issue #34427: As workaround for LLVM bug on - // ARM, use memset of 0 on whole struct rather - // than storing null to single target field. - let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to()); - let fill_byte = C_u8(bcx.ccx, 0); - let size = C_usize(bcx.ccx, nonnull.stride().bytes()); - let align = C_i32(bcx.ccx, nonnull.align.abi() as i32); - base::call_memset(bcx, llptr, fill_byte, size, align, false); - } else { - let path = struct_llfields_path(discrfield); - let llptrptr = bcx.gepi(val, &path); - let llptrty = val_ty(llptrptr).element_type(); - bcx.store(C_null(llptrty), llptrptr, None); - } - } - } - _ => bug!("Cannot handle {} represented as {:#?}", t, l) - } -} - -fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool { - bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64" -} - -pub fn assert_discr_in_range(min: D, max: D, discr: D) { - if min <= max { - assert!(min <= discr && discr <= max) - } else { - assert!(min <= discr || discr <= max) - } -} - -// FIXME this utility routine should be somewhere more general -#[inline] -fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } - -/// Extract a field of a constant value, as appropriate for its -/// representation. -/// -/// (Not to be confused with `common::const_get_elt`, which operates on -/// raw LLVM-level structs and arrays.) -pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, - val: ValueRef, - ix: usize) -> ValueRef { - let l = ccx.layout_of(t); - match *l { - layout::CEnum { .. } => bug!("element access in C-like enum const"), - layout::Univariant { ref variant, .. } => { - const_struct_field(val, variant.memory_index[ix] as usize) - } - layout::Vector { .. } => const_struct_field(val, ix), - layout::UntaggedUnion { .. } => const_struct_field(val, 0), - _ => bug!("{} does not have fields.", t) - } -} - -/// Extract field of struct-like const, skipping our alignment padding. -fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef { - // Get the ix-th non-undef element of the struct. - let mut real_ix = 0; // actual position in the struct - let mut ix = ix; // logical index relative to real_ix - let mut field; - loop { - loop { - field = const_get_elt(val, &[real_ix]); - if !is_undef(field) { - break; - } - real_ix = real_ix + 1; - } - if ix == 0 { - return field; - } - ix = ix - 1; - real_ix = real_ix + 1; - } -} diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs index 92cbd004206e7c53fc066821a34cc6418f5b5c58..1959fd13ccb294d0fcbda5a667cd0cdcd084bceb 100644 --- a/src/librustc_trans/asm.rs +++ b/src/librustc_trans/asm.rs @@ -11,16 +11,15 @@ //! # Translation of inline assembly. use llvm::{self, ValueRef}; -use base; use common::*; -use type_of; use type_::Type; +use type_of::LayoutLlvmExt; use builder::Builder; use rustc::hir; -use rustc::ty::Ty; -use mir::lvalue::Alignment; +use mir::lvalue::LvalueRef; +use mir::operand::OperandValue; use std::ffi::CString; use syntax::ast::AsmDialect; @@ -30,7 +29,7 @@ pub fn trans_inline_asm<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, ia: &hir::InlineAsm, - outputs: Vec<(ValueRef, Ty<'tcx>)>, + outputs: Vec>, mut inputs: Vec ) { let mut ext_constraints = vec![]; @@ -38,20 +37,15 @@ pub fn trans_inline_asm<'a, 'tcx>( // Prepare the output operands let mut indirect_outputs = vec![]; - for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() { - let val = if out.is_rw || out.is_indirect { - Some(base::load_ty(bcx, val, Alignment::Packed, ty)) - } else { - None - }; + for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() { if out.is_rw { - inputs.push(val.unwrap()); + inputs.push(lvalue.load(bcx).immediate()); ext_constraints.push(i.to_string()); } if out.is_indirect { - indirect_outputs.push(val.unwrap()); + indirect_outputs.push(lvalue.load(bcx).immediate()); } else { - output_types.push(type_of::type_of(bcx.ccx, ty)); + output_types.push(lvalue.layout.llvm_type(bcx.ccx)); } } if !indirect_outputs.is_empty() { @@ -106,9 +100,9 @@ pub fn trans_inline_asm<'a, 'tcx>( // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, &(val, _))) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) }; - bcx.store(v, val, None); + for (i, (_, &lvalue)) in outputs.enumerate() { + let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) }; + OperandValue::Immediate(v).store(bcx, lvalue); } // Store mark in a metadata node so we can map LLVM errors diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs index b6ca1460a7d0ac9df44378709e3783aaa35f63d8..745aa0da82900d1681a7432a9afd6745c4ce5c0b 100644 --- a/src/librustc_trans/attributes.rs +++ b/src/librustc_trans/attributes.rs @@ -116,7 +116,7 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe naked(llfn, true); } else if attr.check_name("allocator") { Attribute::NoAlias.apply_llfn( - llvm::AttributePlace::ReturnValue(), llfn); + llvm::AttributePlace::ReturnValue, llfn); } else if attr.check_name("unwind") { unwind(llfn, true); } else if attr.check_name("rustc_allocator_nounwind") { diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 69bcd0aa50b99a3db724f71a4b3d8f586450164c..b7408681ed0c8a214e24237f94953dc28a4cd499 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -28,6 +28,7 @@ use super::ModuleTranslation; use super::ModuleKind; +use abi; use assert_module_sources; use back::link; use back::symbol_export; @@ -40,6 +41,7 @@ use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -47,7 +49,6 @@ use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; use rustc_incremental; -use abi; use allocator; use mir::lvalue::LvalueRef; use attributes; @@ -55,25 +56,20 @@ use callee; use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use collector::{self, TransItemCollectionMode}; -use common::{C_struct_in_context, C_u64, C_undef, C_array}; -use common::CrateContext; -use common::{type_is_zero_size, val_ty}; -use common; +use common::{self, C_struct_in_context, C_array, CrateContext, val_ty}; use consts; use context::{self, LocalCrateContext, SharedCrateContext}; use debuginfo; use declare; -use machine; use meth; use mir; -use monomorphize::{self, Instance}; +use monomorphize::Instance; use partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; use symbol_names_test; use time_graph; use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames}; use type_::Type; -use type_of; -use value::Value; +use type_of::LayoutLlvmExt; use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; use CrateInfo; @@ -90,7 +86,7 @@ use rustc::hir; use syntax::ast; -use mir::lvalue::Alignment; +use mir::operand::OperandValue; pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr}; pub use rustc_trans_utils::trans_item::linkage_by_name; @@ -125,14 +121,6 @@ fn drop(&mut self) { } } -pub fn get_meta(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { - bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA) -} - -pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef { - bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR) -} - pub fn bin_op_to_icmp_predicate(op: hir::BinOp_, signed: bool) -> llvm::IntPredicate { @@ -216,8 +204,10 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, old_info.expect("unsized_info: missing old info for trait upcast") } (_, &ty::TyDynamic(ref data, ..)) => { + let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target)) + .field(ccx, abi::FAT_PTR_EXTRA); consts::ptrcast(meth::get_vtable(ccx, source, data.principal()), - Type::vtable_ptr(ccx)) + vtable_ptr.llvm_type(ccx)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, @@ -241,15 +231,40 @@ pub fn unsize_thin_ptr<'a, 'tcx>( (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => { assert!(bcx.ccx.shared().type_is_sized(a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to(); + let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => { let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); assert!(bcx.ccx.shared().type_is_sized(a)); - let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to(); + let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to(); (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None)) } + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bcx.ccx.layout_of(src_ty); + let dst_layout = bcx.ccx.layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bcx.ccx, i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bcx.ccx, i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bcx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bcx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 0)), + bcx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 1))) + } _ => bug!("unsize_thin_ptr: called on bad types"), } } @@ -257,25 +272,26 @@ pub fn unsize_thin_ptr<'a, 'tcx>( /// Coerce `src`, which is a reference to a value of type `src_ty`, /// to a value of type `dst_ty` and store the result in `dst` pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, - src: &LvalueRef<'tcx>, - dst: &LvalueRef<'tcx>) { - let src_ty = src.ty.to_ty(bcx.tcx()); - let dst_ty = dst.ty.to_ty(bcx.tcx()); + src: LvalueRef<'tcx>, + dst: LvalueRef<'tcx>) { + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; let coerce_ptr = || { - let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) { - // fat-ptr to fat-ptr unsize preserves the vtable - // i.e. &'a fmt::Debug+Send => &'a fmt::Debug - // So we need to pointercast the base to ensure - // the types match up. - let (base, info) = load_fat_ptr(bcx, src.llval, src.alignment, src_ty); - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); - let base = bcx.pointercast(base, llcast_ty); - (base, info) - } else { - let base = load_ty(bcx, src.llval, src.alignment, src_ty); - unsize_thin_ptr(bcx, base, src_ty, dst_ty) + let (base, info) = match src.load(bcx).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR); + (bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bcx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() }; - store_fat_ptr(bcx, base, info, dst.llval, dst.alignment, dst_ty); + OperandValue::Pair(base, info).store(bcx, dst); }; match (&src_ty.sty, &dst_ty.sty) { (&ty::TyRef(..), &ty::TyRef(..)) | @@ -287,32 +303,22 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, coerce_ptr() } - (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => { + (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => { assert_eq!(def_a, def_b); - let src_fields = def_a.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.tcx(), substs_a, f) - }); - let dst_fields = def_b.variants[0].fields.iter().map(|f| { - monomorphize::field_ty(bcx.tcx(), substs_b, f) - }); + for i in 0..def_a.variants[0].fields.len() { + let src_f = src.project_field(bcx, i); + let dst_f = dst.project_field(bcx, i); - let iter = src_fields.zip(dst_fields).enumerate(); - for (i, (src_fty, dst_fty)) in iter { - if type_is_zero_size(bcx.ccx, dst_fty) { + if dst_f.layout.is_zst() { continue; } - let (src_f, src_f_align) = src.trans_field_ptr(bcx, i); - let (dst_f, dst_f_align) = dst.trans_field_ptr(bcx, i); - if src_fty == dst_fty { - memcpy_ty(bcx, dst_f, src_f, src_fty, None); + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout, + (src_f.alignment | dst_f.alignment).non_abi()); } else { - coerce_unsized_into( - bcx, - &LvalueRef::new_sized_ty(src_f, src_fty, src_f_align), - &LvalueRef::new_sized_ty(dst_f, dst_fty, dst_f_align) - ); + coerce_unsized_into(bcx, src_f, dst_f); } } } @@ -385,94 +391,6 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) { b.call(assume_intrinsic, &[val], None); } -/// Helper for loading values from memory. Does the necessary conversion if the in-memory type -/// differs from the type used for SSA values. Also handles various special cases where the type -/// gives us better information about what we are loading. -pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, - alignment: Alignment, t: Ty<'tcx>) -> ValueRef { - let ccx = b.ccx; - if type_is_zero_size(ccx, t) { - return C_undef(type_of::type_of(ccx, t)); - } - - unsafe { - let global = llvm::LLVMIsAGlobalVariable(ptr); - if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { - let val = llvm::LLVMGetInitializer(global); - if !val.is_null() { - if t.is_bool() { - return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref()); - } - return val; - } - } - } - - if t.is_bool() { - b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False, alignment.to_align()), - Type::i1(ccx)) - } else if t.is_char() { - // a char is a Unicode codepoint, and so takes values from 0 - // to 0x10FFFF inclusive only. - b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align()) - } else if (t.is_region_ptr() || t.is_box() || t.is_fn()) - && !common::type_is_fat_ptr(ccx, t) - { - b.load_nonnull(ptr, alignment.to_align()) - } else { - b.load(ptr, alignment.to_align()) - } -} - -/// Helper for storing values in memory. Does the necessary conversion if the in-memory type -/// differs from the type used for SSA values. -pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, - dst_align: Alignment, t: Ty<'tcx>) { - debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v)); - - if common::type_is_fat_ptr(cx.ccx, t) { - let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR); - let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA); - store_fat_ptr(cx, lladdr, llextra, dst, dst_align, t); - } else { - cx.store(from_immediate(cx, v), dst, dst_align.to_align()); - } -} - -pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>, - data: ValueRef, - extra: ValueRef, - dst: ValueRef, - dst_align: Alignment, - _ty: Ty<'tcx>) { - // FIXME: emit metadata - cx.store(data, get_dataptr(cx, dst), dst_align.to_align()); - cx.store(extra, get_meta(cx, dst), dst_align.to_align()); -} - -pub fn load_fat_ptr<'a, 'tcx>( - b: &Builder<'a, 'tcx>, src: ValueRef, alignment: Alignment, t: Ty<'tcx> -) -> (ValueRef, ValueRef) { - let ptr = get_dataptr(b, src); - let ptr = if t.is_region_ptr() || t.is_box() { - b.load_nonnull(ptr, alignment.to_align()) - } else { - b.load(ptr, alignment.to_align()) - }; - - let meta = get_meta(b, src); - let meta_ty = val_ty(meta); - // If the 'meta' field is a pointer, it's a vtable, so use load_nonnull - // instead - let meta = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer { - b.load_nonnull(meta, None) - } else { - b.load(meta, None) - }; - - (ptr, meta) -} - pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { if val_ty(val) == Type::i1(bcx.ccx) { bcx.zext(val, Type::i8(bcx.ccx)) @@ -481,50 +399,20 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { - if ty.is_bool() { - bcx.trunc(val, Type::i1(bcx.ccx)) - } else { - val - } -} - -pub enum Lifetime { Start, End } - -impl Lifetime { - // If LLVM lifetime intrinsic support is enabled (i.e. optimizations - // on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - // and the intrinsic for `lt` and passes them to `emit`, which is in - // charge of generating code to call the passed intrinsic on whatever - // block of generated code is targeted for the intrinsic. - // - // If LLVM lifetime intrinsic support is disabled (i.e. optimizations - // off) or `ptr` is zero-sized, then no-op (does not call `emit`). - pub fn call(self, b: &Builder, ptr: ValueRef) { - if b.ccx.sess().opts.optimize == config::OptLevel::No { - return; +pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef { + if let layout::Abi::Scalar(ref scalar) = layout.abi { + if scalar.is_bool() { + return bcx.trunc(val, Type::i1(bcx.ccx)); } - - let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type()); - if size == 0 { - return; - } - - let lifetime_intrinsic = b.ccx.get_intrinsic(match self { - Lifetime::Start => "llvm.lifetime.start", - Lifetime::End => "llvm.lifetime.end" - }); - - let ptr = b.pointercast(ptr, Type::i8p(b.ccx)); - b.call(lifetime_intrinsic, &[C_u64(b.ccx, size), ptr], None); } + val } -pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, - dst: ValueRef, - src: ValueRef, - n_bytes: ValueRef, - align: u32) { +pub fn call_memcpy(b: &Builder, + dst: ValueRef, + src: ValueRef, + n_bytes: ValueRef, + align: Align) { let ccx = b.ccx; let ptr_width = &ccx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); @@ -532,7 +420,7 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, let src_ptr = b.pointercast(src, Type::i8p(ccx)); let dst_ptr = b.pointercast(dst, Type::i8p(ccx)); let size = b.intcast(n_bytes, ccx.isize_ty(), false); - let align = C_i32(ccx, align as i32); + let align = C_i32(ccx, align.abi() as i32); let volatile = C_bool(ccx, false); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } @@ -541,18 +429,16 @@ pub fn memcpy_ty<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, dst: ValueRef, src: ValueRef, - t: Ty<'tcx>, - align: Option, + layout: TyLayout<'tcx>, + align: Option, ) { - let ccx = bcx.ccx; - - let size = ccx.size_of(t); + let size = layout.size.bytes(); if size == 0 { return; } - let align = align.unwrap_or_else(|| ccx.align_of(t)); - call_memcpy(bcx, dst, src, C_usize(ccx, size), align); + let align = align.unwrap_or(layout.align); + call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align); } pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index b366d5579c3d108cc6f3c70d45e038c5ce4cd5b8..50e673bdbfdd77a068b33bb00a2525b7a1c84821 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -15,15 +15,16 @@ use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; use common::*; -use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; -use rustc::session::Session; +use rustc::ty::layout::{Align, Size}; +use rustc::session::{config, Session}; use std::borrow::Cow; use std::ffi::CString; +use std::ops::Range; use std::ptr; use syntax_pos::Span; @@ -487,7 +488,7 @@ pub fn not(&self, v: ValueRef) -> ValueRef { } } - pub fn alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { + pub fn alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef { let builder = Builder::with_ccx(self.ccx); builder.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -495,7 +496,7 @@ pub fn alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { builder.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option) -> ValueRef { + pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -505,9 +506,7 @@ pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option) -> ValueR llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), name.as_ptr()) }; - if let Some(align) = align { - llvm::LLVMSetAlignment(alloca, align as c_uint); - } + llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); alloca } } @@ -519,12 +518,12 @@ pub fn free(&self, ptr: ValueRef) { } } - pub fn load(&self, ptr: ValueRef, align: Option) -> ValueRef { + pub fn load(&self, ptr: ValueRef, align: Option) -> ValueRef { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); if let Some(align) = align { - llvm::LLVMSetAlignment(load, align as c_uint); + llvm::LLVMSetAlignment(load, align.abi() as c_uint); } load } @@ -539,49 +538,42 @@ pub fn volatile_load(&self, ptr: ValueRef) -> ValueRef { } } - pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef { + pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering, align: Align) -> ValueRef { self.count_insn("load.atomic"); unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); - let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order, - align as c_uint) + let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // However, 64-bit atomic loads on `i686-apple-darwin` appear to + // require `___atomic_load` with ABI-alignment, so it's staying. + llvm::LLVMSetAlignment(load, align.pref() as c_uint); + load } } - pub fn load_range_assert(&self, ptr: ValueRef, lo: u64, - hi: u64, signed: llvm::Bool, - align: Option) -> ValueRef { - let value = self.load(ptr, align); - + pub fn range_metadata(&self, load: ValueRef, range: Range) { unsafe { - let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr)); - let min = llvm::LLVMConstInt(t, lo, signed); - let max = llvm::LLVMConstInt(t, hi, signed); - - let v = [min, max]; + let llty = val_ty(load); + let v = [ + C_uint_big(llty, range.start), + C_uint_big(llty, range.end) + ]; - llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint, + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, llvm::LLVMMDNodeInContext(self.ccx.llcx(), v.as_ptr(), v.len() as c_uint)); } - - value } - pub fn load_nonnull(&self, ptr: ValueRef, align: Option) -> ValueRef { - let value = self.load(ptr, align); + pub fn nonnull_metadata(&self, load: ValueRef) { unsafe { - llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint, + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0)); } - - value } - pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRef { + pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRef { debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store"); @@ -589,7 +581,7 @@ pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRe unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); if let Some(align) = align { - llvm::LLVMSetAlignment(store, align as c_uint); + llvm::LLVMSetAlignment(store, align.abi() as c_uint); } store } @@ -607,14 +599,16 @@ pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { } } - pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, + order: AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); - let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); + let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // Also see `atomic_load` for more context. + llvm::LLVMSetAlignment(store, align.pref() as c_uint); } } @@ -626,25 +620,6 @@ pub fn gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef { } } - // Simple wrapper around GEP that takes an array of ints and wraps them - // in C_i32() - #[inline] - pub fn gepi(&self, base: ValueRef, ixs: &[usize]) -> ValueRef { - // Small vector optimization. This should catch 100% of the cases that - // we care about. - if ixs.len() < 16 { - let mut small_vec = [ C_i32(self.ccx, 0); 16 ]; - for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs) { - *small_vec_e = C_i32(self.ccx, ix as i32); - } - self.inbounds_gep(base, &small_vec[..ixs.len()]) - } else { - let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::>(); - self.count_insn("gepi"); - self.inbounds_gep(base, &v) - } - } - pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef { self.count_insn("inboundsgep"); unsafe { @@ -653,8 +628,9 @@ pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef { } } - pub fn struct_gep(&self, ptr: ValueRef, idx: usize) -> ValueRef { + pub fn struct_gep(&self, ptr: ValueRef, idx: u64) -> ValueRef { self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) } @@ -960,16 +936,18 @@ pub fn vector_splat(&self, num_elts: usize, elt: ValueRef) -> ValueRef { } } - pub fn extract_value(&self, agg_val: ValueRef, idx: usize) -> ValueRef { + pub fn extract_value(&self, agg_val: ValueRef, idx: u64) -> ValueRef { self.count_insn("extractvalue"); + assert_eq!(idx as c_uint as u64, idx); unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname()) } } pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef, - idx: usize) -> ValueRef { + idx: u64) -> ValueRef { self.count_insn("insertvalue"); + assert_eq!(idx as c_uint as u64, idx); unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, noname()) @@ -1151,14 +1129,12 @@ pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) { unsafe { - if llvm::LLVMIsUndef(s) == llvm::True { return; } llvm::LLVMAddCase(s, on_val, dest) } } pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) { unsafe { - if llvm::LLVMIsUndef(phi) == llvm::True { return; } llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } @@ -1233,4 +1209,36 @@ fn check_call<'b>(&self, return Cow::Owned(casted_args); } + + pub fn lifetime_start(&self, ptr: ValueRef, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); + } + + pub fn lifetime_end(&self, ptr: ValueRef, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); + } + + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targetted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ValueRef, size: Size) { + if self.ccx.sess().opts.optimize == config::OptLevel::No { + return; + } + + let size = size.bytes(); + if size == 0 { + return; + } + + let lifetime_intrinsic = self.ccx.get_intrinsic(intrinsic); + + let ptr = self.pointercast(ptr, Type::i8p(self.ccx)); + self.call(lifetime_intrinsic, &[C_u64(self.ccx, size), ptr], None); + } } diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs index bf842e6358f87860a5835bbfd11865419e750779..d5f341f9685833dd987b2e0b1d94afed5b8ba875 100644 --- a/src/librustc_trans/cabi_aarch64.rs +++ b/src/librustc_trans/cabi_aarch64.rs @@ -14,7 +14,7 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); + let size = arg.layout.size; // Ensure we have at most four uniquely addressable members. if size > unit.size.checked_mul(4, ccx).unwrap() { @@ -44,10 +44,10 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -60,13 +60,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); return; } - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { @@ -75,10 +75,10 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } - let size = arg.layout.size(ccx); + let size = arg.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -91,13 +91,13 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc Reg::i64() }; - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit, total: size }); return; } - arg.make_indirect(ccx); + arg.make_indirect(); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 635741b4d1ac5dd2e9a2ac6adb59fd3032a243ec..438053d63b51d37bd638ad3c699464cc5b17ef17 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -15,7 +15,7 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); + let size = arg.layout.size; // Ensure we have at most four uniquely addressable members. if size > unit.size.checked_mul(4, ccx).unwrap() { @@ -47,12 +47,12 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc if vfp { if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 32 { let unit = if bits <= 8 { @@ -62,13 +62,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } else { Reg::i32() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); return; } - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) { @@ -79,14 +79,14 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc if vfp { if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } } - let align = arg.layout.align(ccx).abi(); - let total = arg.layout.size(ccx); - arg.cast_to(ccx, Uniform { + let align = arg.layout.align.abi(); + let total = arg.layout.size; + arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total }); diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs index 6fcd3ed581d27c85f2914cf200fce0c85eac108f..1664251cf897b0edd10b84bc6787814714dc695c 100644 --- a/src/librustc_trans/cabi_asmjs.rs +++ b/src/librustc_trans/cabi_asmjs.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, ArgType, ArgAttribute, LayoutExt, Uniform}; +use abi::{FnType, ArgType, LayoutExt, Uniform}; use context::CrateContext; // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 @@ -19,9 +19,9 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { if ret.layout.is_aggregate() { if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) { - let size = ret.layout.size(ccx); + let size = ret.layout.size; if unit.size == size { - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -29,14 +29,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } } - ret.make_indirect(ccx); + ret.make_indirect(); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { +fn classify_arg_ty(arg: &mut ArgType) { if arg.layout.is_aggregate() { - arg.make_indirect(ccx); - arg.attrs.set(ArgAttribute::ByVal); + arg.make_indirect_byval(); } } @@ -47,6 +46,6 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_hexagon.rs b/src/librustc_trans/cabi_hexagon.rs index 1acda72675c317a0426697f91302a162b36fd1d4..7e7e483fea0c0cd340cdddf514a0303c6b61e6ec 100644 --- a/src/librustc_trans/cabi_hexagon.rs +++ b/src/librustc_trans/cabi_hexagon.rs @@ -11,33 +11,32 @@ #![allow(non_upper_case_globals)] use abi::{FnType, ArgType, LayoutExt}; -use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 { + ret.make_indirect(); } else { ret.extend_integer_width_to(32); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 { + arg.make_indirect(); } else { arg.extend_integer_width_to(32); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs index b7b60859d4a048b9f53e0952659bcc7c133948ee..fe61670a1086f93157f8f8c02ee6e835f37bfbc0 100644 --- a/src/librustc_trans/cabi_mips.rs +++ b/src/librustc_trans/cabi_mips.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { - ret.make_indirect(ccx); + ret.make_indirect(); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { - let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index dff75e628de10270fa83b09c4c6da22281ea21d5..16d0cfe072d5772d2070df703fbc7ff6dad219fe 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); } else { - ret.make_indirect(ccx); + ret.make_indirect(); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { - let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i64(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i64()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i64()); } } else { arg.extend_integer_width_to(64); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 8 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_msp430.rs b/src/librustc_trans/cabi_msp430.rs index 546bb5ad9b44ef39b140c81dea2d817d8b186aeb..d270886a19cd11c835da1cccde7230d153ed6f3f 100644 --- a/src/librustc_trans/cabi_msp430.rs +++ b/src/librustc_trans/cabi_msp430.rs @@ -12,7 +12,6 @@ // http://www.ti.com/lit/an/slaa534/slaa534.pdf use abi::{ArgType, FnType, LayoutExt}; -use context::CrateContext; // 3.5 Structures or Unions Passed and Returned by Reference // @@ -20,31 +19,31 @@ // returned by reference. To pass a structure or union by reference, the caller // places its address in the appropriate location: either in a register or on // the stack, according to its position in the argument list. (..)" -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 { + ret.make_indirect(); } else { ret.extend_integer_width_to(16); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 { + arg.make_indirect(); } else { arg.extend_integer_width_to(16); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_nvptx.rs b/src/librustc_trans/cabi_nvptx.rs index 3873752b25470638b2456a276a4b784499a6713e..69cfc690a9f9d311888547718118789a8868b391 100644 --- a/src/librustc_trans/cabi_nvptx.rs +++ b/src/librustc_trans/cabi_nvptx.rs @@ -12,33 +12,32 @@ // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability use abi::{ArgType, FnType, LayoutExt}; -use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 { + ret.make_indirect(); } else { ret.extend_integer_width_to(32); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 { + arg.make_indirect(); } else { arg.extend_integer_width_to(32); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_nvptx64.rs b/src/librustc_trans/cabi_nvptx64.rs index 24bf4920c16c1277dd50435442a0ffbd9cef9e3a..4d76c156038001569228bc8ca9770de4b65dc62d 100644 --- a/src/librustc_trans/cabi_nvptx64.rs +++ b/src/librustc_trans/cabi_nvptx64.rs @@ -12,33 +12,32 @@ // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability use abi::{ArgType, FnType, LayoutExt}; -use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 { - ret.make_indirect(ccx); +fn classify_ret_ty(ret: &mut ArgType) { + if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 { + ret.make_indirect(); } else { ret.extend_integer_width_to(64); } } -fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 { - arg.make_indirect(ccx); +fn classify_arg_ty(arg: &mut ArgType) { + if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 { + arg.make_indirect(); } else { arg.extend_integer_width_to(64); } } -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { +pub fn compute_abi_info(fty: &mut FnType) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { if arg.is_ignore() { continue; } - classify_arg_ty(ccx, arg); + classify_arg_ty(arg); } } diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs index f951ac76391f6614b0936f013bbf12a3bd9d87e8..c3c8c745e3a61c175b96b2abc8cc2511b6c872fe 100644 --- a/src/librustc_trans/cabi_powerpc.rs +++ b/src/librustc_trans/cabi_powerpc.rs @@ -8,46 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{align_up_to, FnType, ArgType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use std::cmp; +use rustc::ty::layout::Size; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { - ret.make_indirect(ccx); + ret.make_indirect(); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { - let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs index fb5472eb6ae1fa3bb154efa6583329ca08945d7e..2206a4fa00cc3af1ff41b49af4c9b142199604d5 100644 --- a/src/librustc_trans/cabi_powerpc64.rs +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -28,25 +28,23 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, abi: ABI) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); - // ELFv1 only passes one-member aggregates transparently. // ELFv2 passes up to eight uniquely addressable members. - if (abi == ELFv1 && size > unit.size) - || size > unit.size.checked_mul(8, ccx).unwrap() { + if (abi == ELFv1 && arg.layout.size > unit.size) + || arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() { return None; } let valid_unit = match unit.kind { RegKind::Integer => false, RegKind::Float => true, - RegKind::Vector => size.bits() == 128 + RegKind::Vector => arg.layout.size.bits() == 128 }; if valid_unit { Some(Uniform { unit, - total: size + total: arg.layout.size }) } else { None @@ -62,16 +60,16 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc // The ELFv1 ABI doesn't return aggregates in registers if abi == ELFv1 { - ret.make_indirect(ccx); + ret.make_indirect(); return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret, abi) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -84,14 +82,14 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); return; } - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) { @@ -101,11 +99,11 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg, abi) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } - let size = arg.layout.size(ccx); + let size = arg.layout.size; let (unit, total) = match abi { ELFv1 => { // In ELFv1, aggregates smaller than a doubleword should appear in @@ -124,7 +122,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc }, }; - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit, total }); diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index fedebea3f4c998ea177607167a3a83b2748ff0cb..9fb460043ae81d55490f25a3edae5f0054f57d65 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -14,23 +14,27 @@ use abi::{FnType, ArgType, LayoutExt, Reg}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout}; +use rustc::ty::layout::{self, TyLayout}; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { - if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { +fn classify_ret_ty(ret: &mut ArgType) { + if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 { ret.extend_integer_width_to(64); } else { - ret.make_indirect(ccx); + ret.make_indirect(); } } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> bool { - match *layout { - Layout::Scalar { value: layout::F32, .. } | - Layout::Scalar { value: layout::F64, .. } => true, - Layout::Univariant { .. } => { - if layout.field_count() == 1 { + match layout.abi { + layout::Abi::Scalar(ref scalar) => { + match scalar.value { + layout::F32 | layout::F64 => true, + _ => false + } + } + layout::Abi::Aggregate { .. } => { + if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { false @@ -41,32 +45,31 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { - let size = arg.layout.size(ccx); - if !arg.layout.is_aggregate() && size.bits() <= 64 { + if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 { arg.extend_integer_width_to(64); return; } if is_single_fp_element(ccx, arg.layout) { - match size.bytes() { - 4 => arg.cast_to(ccx, Reg::f32()), - 8 => arg.cast_to(ccx, Reg::f64()), - _ => arg.make_indirect(ccx) + match arg.layout.size.bytes() { + 4 => arg.cast_to(Reg::f32()), + 8 => arg.cast_to(Reg::f64()), + _ => arg.make_indirect() } } else { - match size.bytes() { - 1 => arg.cast_to(ccx, Reg::i8()), - 2 => arg.cast_to(ccx, Reg::i16()), - 4 => arg.cast_to(ccx, Reg::i32()), - 8 => arg.cast_to(ccx, Reg::i64()), - _ => arg.make_indirect(ccx) + match arg.layout.size.bytes() { + 1 => arg.cast_to(Reg::i8()), + 2 => arg.cast_to(Reg::i16()), + 4 => arg.cast_to(Reg::i32()), + 8 => arg.cast_to(Reg::i64()), + _ => arg.make_indirect() } } } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(&mut fty.ret); } for arg in &mut fty.args { diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs index c17901e1adebc113aace587a507e7cf3044cc122..fe61670a1086f93157f8f8c02ee6e835f37bfbc0 100644 --- a/src/librustc_trans/cabi_sparc.rs +++ b/src/librustc_trans/cabi_sparc.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { - ret.make_indirect(ccx); + ret.make_indirect(); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { - let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; + let size = arg.layout.size; + let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { - arg.extend_integer_width_to(32) + arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs index 8383007550e1e7ef987f2b714bd8866c158a583d..7c52e27fa67d1b2d2310e07f0090008ac0c82f61 100644 --- a/src/librustc_trans/cabi_sparc64.rs +++ b/src/librustc_trans/cabi_sparc64.rs @@ -16,23 +16,21 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) -> Option { arg.layout.homogeneous_aggregate(ccx).and_then(|unit| { - let size = arg.layout.size(ccx); - // Ensure we have at most eight uniquely addressable members. - if size > unit.size.checked_mul(8, ccx).unwrap() { + if arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() { return None; } let valid_unit = match unit.kind { RegKind::Integer => false, RegKind::Float => true, - RegKind::Vector => size.bits() == 128 + RegKind::Vector => arg.layout.size.bits() == 128 }; if valid_unit { Some(Uniform { unit, - total: size + total: arg.layout.size }) } else { None @@ -47,10 +45,10 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } - let size = ret.layout.size(ccx); + let size = ret.layout.size; let bits = size.bits(); if bits <= 128 { let unit = if bits <= 8 { @@ -63,7 +61,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -71,7 +69,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } // don't return aggregates in registers - ret.make_indirect(ccx); + ret.make_indirect(); } fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) { @@ -81,12 +79,12 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } - let total = arg.layout.size(ccx); - arg.cast_to(ccx, Uniform { + let total = arg.layout.size; + arg.cast_to(Uniform { unit: Reg::i64(), total }); diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 49634d6e78ce9a589eb68d9e68c5833aa7558bbc..6fd0140c39901d7ce6ee4f7df3c0cb530e17a7d9 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -8,10 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; +use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind}; use common::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout}; +use rustc::ty::layout::{self, TyLayout}; #[derive(PartialEq)] pub enum Flavor { @@ -21,11 +21,15 @@ pub enum Flavor { fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> bool { - match *layout { - Layout::Scalar { value: layout::F32, .. } | - Layout::Scalar { value: layout::F64, .. } => true, - Layout::Univariant { .. } => { - if layout.field_count() == 1 { + match layout.abi { + layout::Abi::Scalar(ref scalar) => { + match scalar.value { + layout::F32 | layout::F64 => true, + _ => false + } + } + layout::Abi::Aggregate { .. } => { + if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 { is_single_fp_element(ccx, layout.field(ccx, 0)) } else { false @@ -50,27 +54,25 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let t = &ccx.sess().target.target; if t.options.is_like_osx || t.options.is_like_windows || t.options.is_like_openbsd { - let size = fty.ret.layout.size(ccx); - // According to Clang, everyone but MSVC returns single-element // float aggregates directly in a floating-point register. if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) { - match size.bytes() { - 4 => fty.ret.cast_to(ccx, Reg::f32()), - 8 => fty.ret.cast_to(ccx, Reg::f64()), - _ => fty.ret.make_indirect(ccx) + match fty.ret.layout.size.bytes() { + 4 => fty.ret.cast_to(Reg::f32()), + 8 => fty.ret.cast_to(Reg::f64()), + _ => fty.ret.make_indirect() } } else { - match size.bytes() { - 1 => fty.ret.cast_to(ccx, Reg::i8()), - 2 => fty.ret.cast_to(ccx, Reg::i16()), - 4 => fty.ret.cast_to(ccx, Reg::i32()), - 8 => fty.ret.cast_to(ccx, Reg::i64()), - _ => fty.ret.make_indirect(ccx) + match fty.ret.layout.size.bytes() { + 1 => fty.ret.cast_to(Reg::i8()), + 2 => fty.ret.cast_to(Reg::i16()), + 4 => fty.ret.cast_to(Reg::i32()), + 8 => fty.ret.cast_to(Reg::i64()), + _ => fty.ret.make_indirect() } } } else { - fty.ret.make_indirect(ccx); + fty.ret.make_indirect(); } } else { fty.ret.extend_integer_width_to(32); @@ -80,8 +82,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, for arg in &mut fty.args { if arg.is_ignore() { continue; } if arg.layout.is_aggregate() { - arg.make_indirect(ccx); - arg.attrs.set(ArgAttribute::ByVal); + arg.make_indirect_byval(); } else { arg.extend_integer_width_to(32); } @@ -100,17 +101,24 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let mut free_regs = 2; for arg in &mut fty.args { - if arg.is_ignore() || arg.is_indirect() { continue; } + let attrs = match arg.mode { + PassMode::Ignore | + PassMode::Indirect(_) => continue, + PassMode::Direct(ref mut attrs) => attrs, + PassMode::Pair(..) | + PassMode::Cast(_) => { + bug!("x86 shouldn't be passing arguments by {:?}", arg.mode) + } + }; // At this point we know this must be a primitive of sorts. let unit = arg.layout.homogeneous_aggregate(ccx).unwrap(); - let size = arg.layout.size(ccx); - assert_eq!(unit.size, size); + assert_eq!(unit.size, arg.layout.size); if unit.kind == RegKind::Float { continue; } - let size_in_regs = (size.bits() + 31) / 32; + let size_in_regs = (arg.layout.size.bits() + 31) / 32; if size_in_regs == 0 { continue; @@ -122,8 +130,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, free_regs -= size_in_regs; - if size.bits() <= 32 && unit.kind == RegKind::Integer { - arg.attrs.set(ArgAttribute::InReg); + if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer { + attrs.set(ArgAttribute::InReg); } if free_regs == 0 { diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index a814f458e12aa6dcaf762050b22f3dc91e7e5702..81eb362ca46dc776281e3a7d62cd763dd12d2bd3 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -11,10 +11,10 @@ // The classification code for the x86_64 ABI is taken from the clay language // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp -use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; +use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, TyLayout, Size}; +use rustc::ty::layout::{self, TyLayout, Size}; #[derive(Clone, Copy, PartialEq, Debug)] enum Class { @@ -34,9 +34,9 @@ enum Class { fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) -> Result<[Class; MAX_EIGHTBYTES], Memory> { fn unify(cls: &mut [Class], - off: u64, + off: Size, c: Class) { - let i = (off / 8) as usize; + let i = (off.bytes() / 8) as usize; let to_write = match (cls[i], c) { (Class::None, _) => c, (_, Class::None) => return, @@ -55,20 +55,21 @@ fn unify(cls: &mut [Class], fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>, cls: &mut [Class], - off: u64) + off: Size) -> Result<(), Memory> { - if off % layout.align(ccx).abi() != 0 { - if layout.size(ccx).bytes() > 0 { + if !off.is_abi_aligned(layout.align) { + if !layout.is_zst() { return Err(Memory); } return Ok(()); } - match *layout { - Layout::Scalar { value, .. } | - Layout::RawNullablePointer { value, .. } => { - let reg = match value { - layout::Int(_) | + match layout.abi { + layout::Abi::Uninhabited => {} + + layout::Abi::Scalar(ref scalar) => { + let reg = match scalar.value { + layout::Int(..) | layout::Pointer => Class::Int, layout::F32 | layout::F64 => Class::Sse @@ -76,59 +77,43 @@ fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, unify(cls, off, reg); } - Layout::CEnum { .. } => { - unify(cls, off, Class::Int); - } - - Layout::Vector { element, count } => { + layout::Abi::Vector => { unify(cls, off, Class::Sse); // everything after the first one is the upper // half of a register. - let eltsz = element.size(ccx).bytes(); - for i in 1..count { - unify(cls, off + i * eltsz, Class::SseUp); + for i in 1..layout.fields.count() { + let field_off = off + layout.fields.offset(i); + unify(cls, field_off, Class::SseUp); } } - Layout::Array { count, .. } => { - if count > 0 { - let elt = layout.field(ccx, 0); - let eltsz = elt.size(ccx).bytes(); - for i in 0..count { - classify(ccx, elt, cls, off + i * eltsz)?; + layout::Abi::ScalarPair(..) | + layout::Abi::Aggregate { .. } => { + match layout.variants { + layout::Variants::Single { .. } => { + for i in 0..layout.fields.count() { + let field_off = off + layout.fields.offset(i); + classify(ccx, layout.field(ccx, i), cls, field_off)?; + } } + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => return Err(Memory), } } - Layout::Univariant { ref variant, .. } => { - for i in 0..layout.field_count() { - let field_off = off + variant.offsets[i].bytes(); - classify(ccx, layout.field(ccx, i), cls, field_off)?; - } - } - - Layout::UntaggedUnion { .. } => { - for i in 0..layout.field_count() { - classify(ccx, layout.field(ccx, i), cls, off)?; - } - } - - Layout::FatPointer { .. } | - Layout::General { .. } | - Layout::StructWrappedNullablePointer { .. } => return Err(Memory) } Ok(()) } - let n = ((arg.layout.size(ccx).bytes() + 7) / 8) as usize; + let n = ((arg.layout.size.bytes() + 7) / 8) as usize; if n > MAX_EIGHTBYTES { return Err(Memory); } let mut cls = [Class::None; MAX_EIGHTBYTES]; - classify(ccx, arg.layout, &mut cls, 0)?; + classify(ccx, arg.layout, &mut cls, Size::from_bytes(0))?; if n > 2 { if cls[0] != Class::Sse { return Err(Memory); @@ -153,7 +138,7 @@ fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, Ok(cls) } -fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { +fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option { if *i >= cls.len() { return None; } @@ -162,7 +147,7 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { Class::None => None, Class::Int => { *i += 1; - Some(match size { + Some(match size.bytes() { 1 => Reg::i8(), 2 => Reg::i16(), 3 | @@ -174,14 +159,14 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count(); *i += vec_len; Some(if vec_len == 1 { - match size { + match size.bytes() { 4 => Reg::f32(), _ => Reg::f64() } } else { Reg { kind: RegKind::Vector, - size: Size::from_bytes(vec_len as u64 * 8) + size: Size::from_bytes(8) * (vec_len as u64) } }) } @@ -189,17 +174,17 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option { } } -fn cast_target(cls: &[Class], size: u64) -> CastTarget { +fn cast_target(cls: &[Class], size: Size) -> CastTarget { let mut i = 0; let lo = reg_component(cls, &mut i, size).unwrap(); - let offset = i as u64 * 8; + let offset = Size::from_bytes(8) * (i as u64); let target = if size <= offset { CastTarget::from(lo) } else { let hi = reg_component(cls, &mut i, size - offset).unwrap(); CastTarget::Pair(lo, hi) }; - assert_eq!(reg_component(cls, &mut i, 0), None); + assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None); target } @@ -229,11 +214,11 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType }; if in_mem { - arg.make_indirect(ccx); if is_arg { - arg.attrs.set(ArgAttribute::ByVal); + arg.make_indirect_byval(); } else { // `sret` parameter thus one less integer register available + arg.make_indirect(); int_regs -= 1; } } else { @@ -242,8 +227,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType sse_regs -= needed_sse; if arg.layout.is_aggregate() { - let size = arg.layout.size(ccx).bytes(); - arg.cast_to(ccx, cast_target(cls.as_ref().unwrap(), size)) + let size = arg.layout.size; + arg.cast_to(cast_target(cls.as_ref().unwrap(), size)) } else { arg.extend_integer_width_to(32); } diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index 39e728d4e4f9b987bf4252031dbb2567b8665dbd..473c00120a740c6b2d3e3e4068a975f36c373ce1 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -8,32 +8,36 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{ArgType, FnType, LayoutExt, Reg}; -use common::CrateContext; +use abi::{ArgType, FnType, Reg}; -use rustc::ty::layout::Layout; +use rustc::ty::layout; // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx -pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { - let fixup = |a: &mut ArgType<'tcx>| { - let size = a.layout.size(ccx); - if a.layout.is_aggregate() { - match size.bits() { - 8 => a.cast_to(ccx, Reg::i8()), - 16 => a.cast_to(ccx, Reg::i16()), - 32 => a.cast_to(ccx, Reg::i32()), - 64 => a.cast_to(ccx, Reg::i64()), - _ => a.make_indirect(ccx) - }; - } else { - if let Layout::Vector { .. } = *a.layout { +pub fn compute_abi_info(fty: &mut FnType) { + let fixup = |a: &mut ArgType| { + match a.layout.abi { + layout::Abi::Uninhabited => {} + layout::Abi::ScalarPair(..) | + layout::Abi::Aggregate { .. } => { + match a.layout.size.bits() { + 8 => a.cast_to(Reg::i8()), + 16 => a.cast_to(Reg::i16()), + 32 => a.cast_to(Reg::i32()), + 64 => a.cast_to(Reg::i64()), + _ => a.make_indirect() + } + } + layout::Abi::Vector => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). - } else if size.bytes() > 8 { - a.make_indirect(ccx); - } else { - a.extend_integer_width_to(32); + } + layout::Abi::Scalar(_) => { + if a.layout.size.bytes() > 8 { + a.make_indirect(); + } else { + a.extend_integer_width_to(32); + } } } }; diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs index b515c9420bf36d18812dad1c4208b88bad65ecbb..4afeac2e8f5895380d6c20c7ed22e824a242fc28 100644 --- a/src/librustc_trans/callee.rs +++ b/src/librustc_trans/callee.rs @@ -20,12 +20,14 @@ use declare; use llvm::{self, ValueRef}; use monomorphize::Instance; +use type_of::LayoutLlvmExt; + use rustc::hir::def_id::DefId; use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::LayoutOf; use rustc::traits; use rustc::ty::subst::Substs; use rustc_back::PanicStrategy; -use type_of; /// Translates a reference to a fn/method item, monomorphizing and /// inlining as it goes. @@ -56,7 +58,7 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(ccx, fn_ty)); - let llptrty = type_of::type_of(ccx, fn_ptr_ty); + let llptrty = ccx.layout_of(fn_ptr_ty).llvm_type(ccx); let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) { // This is subtle and surprising, but sometimes we have to bitcast diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e3856cabcf910c77b3123462928f5040d34f150d..7bd8a0c81ee3438d6c3eac4b9c30e315709a8bef 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -18,17 +18,17 @@ use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::middle::lang_items::LangItem; +use abi; use base; use builder::Builder; use consts; use declare; -use machine; -use monomorphize; use type_::Type; +use type_of::LayoutLlvmExt; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -41,105 +41,6 @@ pub use context::{CrateContext, SharedCrateContext}; -pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - if let Layout::FatPointer { .. } = *ccx.layout_of(ty) { - true - } else { - false - } -} - -pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - let layout = ccx.layout_of(ty); - match *layout { - Layout::CEnum { .. } | - Layout::Scalar { .. } | - Layout::Vector { .. } => true, - - Layout::FatPointer { .. } => false, - - Layout::Array { .. } | - Layout::Univariant { .. } | - Layout::General { .. } | - Layout::UntaggedUnion { .. } | - Layout::RawNullablePointer { .. } | - Layout::StructWrappedNullablePointer { .. } => { - !layout.is_unsized() && layout.size(ccx).bytes() == 0 - } - } -} - -/// Returns Some([a, b]) if the type has a pair of fields with types a and b. -pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - -> Option<[Ty<'tcx>; 2]> { - match ty.sty { - ty::TyAdt(adt, substs) => { - assert_eq!(adt.variants.len(), 1); - let fields = &adt.variants[0].fields; - if fields.len() != 2 { - return None; - } - Some([monomorphize::field_ty(ccx.tcx(), substs, &fields[0]), - monomorphize::field_ty(ccx.tcx(), substs, &fields[1])]) - } - ty::TyClosure(def_id, substs) => { - let mut tys = substs.upvar_tys(def_id, ccx.tcx()); - tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| { - if tys.next().is_some() { - None - } else { - Some([first_ty, second_ty]) - } - })) - } - ty::TyGenerator(def_id, substs, _) => { - let mut tys = substs.field_tys(def_id, ccx.tcx()); - tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| { - if tys.next().is_some() { - None - } else { - Some([first_ty, second_ty]) - } - })) - } - ty::TyTuple(tys, _) => { - if tys.len() != 2 { - return None; - } - Some([tys[0], tys[1]]) - } - _ => None - } -} - -/// Returns true if the type is represented as a pair of immediates. -pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) - -> bool { - match *ccx.layout_of(ty) { - Layout::FatPointer { .. } => true, - Layout::Univariant { ref variant, .. } => { - // There must be only 2 fields. - if variant.offsets.len() != 2 { - return false; - } - - match type_pair_fields(ccx, ty) { - Some([a, b]) => { - type_is_immediate(ccx, a) && type_is_immediate(ccx, b) - } - None => false - } - } - _ => false - } -} - -/// Identify types which have size zero at runtime. -pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - let layout = ccx.layout_of(ty); - !layout.is_unsized() && layout.size(ccx).bytes() == 0 -} - pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All)) } @@ -245,17 +146,13 @@ pub fn C_uint(t: Type, i: u64) -> ValueRef { } } -pub fn C_big_integral(t: Type, u: u128) -> ValueRef { +pub fn C_uint_big(t: Type, u: u128) -> ValueRef { unsafe { - let words = [u as u64, u.wrapping_shr(64) as u64]; + let words = [u as u64, (u >> 64) as u64]; llvm::LLVMConstIntOfArbitraryPrecision(t.to_ref(), 2, words.as_ptr()) } } -pub fn C_nil(ccx: &CrateContext) -> ValueRef { - C_struct(ccx, &[], false) -} - pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef { C_uint(Type::i1(ccx), val as u64) } @@ -273,8 +170,7 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { } pub fn C_usize(ccx: &CrateContext, i: u64) -> ValueRef { - let bit_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); - + let bit_size = ccx.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1< Va // you will be kicked off fast isel. See issue #4352 for an example of this. pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx)); - C_named_struct(cx.str_slice_type(), &[cs, C_usize(cx, len as u64)]) + let cs = consts::ptrcast(C_cstr(cx, s, false), + cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to()); + C_fat_ptr(cx, cs, C_usize(cx, len as u64)) +} + +pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_struct(cx, &[ptr, meta], false) } pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { @@ -333,12 +236,6 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> } } -pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef { - unsafe { - llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint) - } -} - pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef { unsafe { return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint); @@ -362,13 +259,14 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef { } } -pub fn const_get_elt(v: ValueRef, us: &[c_uint]) - -> ValueRef { +pub fn const_get_elt(v: ValueRef, idx: u64) -> ValueRef { unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - debug!("const_get_elt(v={:?}, us={:?}, r={:?})", - Value(v), us, Value(r)); + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + Value(v), idx, Value(r)); r } @@ -408,19 +306,6 @@ pub fn const_to_opt_u128(v: ValueRef, sign_ext: bool) -> Option { } } -pub fn is_undef(val: ValueRef) -> bool { - unsafe { - llvm::LLVMIsUndef(val) != False - } -} - -#[allow(dead_code)] // potentially useful -pub fn is_null(val: ValueRef) -> bool { - unsafe { - llvm::LLVMIsNull(val) != False - } -} - pub fn langcall(tcx: TyCtxt, span: Option, msg: &str, diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 4ae289cfada00a4b6746c0edb07211d7c5c61b53..cfca3b57cb9d7a7a875e71af0d7d5fcbfa018f2a 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -14,19 +14,19 @@ use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; use rustc::middle::const_val::ConstEvalErr; -use {debuginfo, machine}; +use debuginfo; use base; use trans_item::{TransItem, TransItemExt}; use common::{self, CrateContext, val_ty}; use declare; use monomorphize::Instance; use type_::Type; -use type_of; +use type_of::LayoutLlvmExt; use rustc::ty; +use rustc::ty::layout::{Align, LayoutOf}; use rustc::hir; -use std::cmp; use std::ffi::{CStr, CString}; use syntax::ast; use syntax::attr; @@ -45,26 +45,26 @@ pub fn bitcast(val: ValueRef, ty: Type) -> ValueRef { fn set_global_alignment(ccx: &CrateContext, gv: ValueRef, - mut align: machine::llalign) { + mut align: Align) { // The target may require greater alignment for globals than the type does. // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // which can force it to be smaller. Rust doesn't support this yet. if let Some(min) = ccx.sess().target.target.options.min_global_align { match ty::layout::Align::from_bits(min, min) { - Ok(min) => align = cmp::max(align, min.abi() as machine::llalign), + Ok(min) => align = align.max(min), Err(err) => { ccx.sess().err(&format!("invalid minimum global alignment: {}", err)); } } } unsafe { - llvm::LLVMSetAlignment(gv, align); + llvm::LLVMSetAlignment(gv, align.abi() as u32); } } pub fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, - align: machine::llalign, + align: Align, kind: &str) -> ValueRef { unsafe { @@ -82,15 +82,16 @@ pub fn addr_of_mut(ccx: &CrateContext, pub fn addr_of(ccx: &CrateContext, cv: ValueRef, - align: machine::llalign, + align: Align, kind: &str) -> ValueRef { if let Some(&gv) = ccx.const_globals().borrow().get(&cv) { unsafe { // Upgrade the alignment in cases where the same constant is used with different // alignment requirements - if align > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, align); + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); } } return gv; @@ -112,7 +113,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { let ty = common::instance_ty(ccx.tcx(), &instance); let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) { - let llty = type_of::type_of(ccx, ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let (g, attrs) = match ccx.tcx().hir.get(id) { hir_map::NodeItem(&hir::Item { ref attrs, span, node: hir::ItemStatic(..), .. @@ -157,7 +158,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { } }; let llty2 = match ty.sty { - ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty), + ty::TyRawPtr(ref mt) => ccx.layout_of(mt.ty).llvm_type(ccx), _ => { ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`"); } @@ -206,7 +207,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef { // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? // FIXME(nagisa): investigate whether it can be changed into define_global - let g = declare::declare_global(ccx, &sym, type_of::type_of(ccx, ty)); + let g = declare::declare_global(ccx, &sym, ccx.layout_of(ty).llvm_type(ccx)); // Thread-local statics in some other crate need to *always* be linked // against in a thread-local fashion, so we need to be sure to apply the // thread-local attribute locally if it was present remotely. If we @@ -266,7 +267,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let instance = Instance::mono(ccx.tcx(), def_id); let ty = common::instance_ty(ccx.tcx(), &instance); - let llty = type_of::type_of(ccx, ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let g = if val_llty == llty { g } else { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index cb71ef104d3d9424fc0310c63c3da083f5636d1b..b2bb605d01b462cee3d90c05e89a353a9d7af540 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -24,12 +24,14 @@ use partitioning::CodegenUnit; use type_::Type; +use type_of::PointeeInfo; + use rustc_data_structures::base_n; use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutCx, LayoutError, LayoutTyper, TyLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_trans_utils; @@ -99,10 +101,10 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details used_statics: RefCell>, - lltypes: RefCell, Type>>, + lltypes: RefCell, Option), Type>>, + scalar_lltypes: RefCell, Type>>, + pointee_infos: RefCell, Size), Option>>, isize_ty: Type, - opaque_vec_type: Type, - str_slice_type: Type, dbg_cx: Option>, @@ -377,9 +379,9 @@ pub fn new(shared: &SharedCrateContext<'a, 'tcx>, statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), lltypes: RefCell::new(FxHashMap()), + scalar_lltypes: RefCell::new(FxHashMap()), + pointee_infos: RefCell::new(FxHashMap()), isize_ty: Type::from_ref(ptr::null_mut()), - opaque_vec_type: Type::from_ref(ptr::null_mut()), - str_slice_type: Type::from_ref(ptr::null_mut()), dbg_cx, eh_personality: Cell::new(None), eh_unwind_resume: Cell::new(None), @@ -389,25 +391,19 @@ pub fn new(shared: &SharedCrateContext<'a, 'tcx>, placeholder: PhantomData, }; - let (isize_ty, opaque_vec_type, str_slice_ty, mut local_ccx) = { + let (isize_ty, mut local_ccx) = { // Do a little dance to create a dummy CrateContext, so we can // create some things in the LLVM module of this codegen unit let mut local_ccxs = vec![local_ccx]; - let (isize_ty, opaque_vec_type, str_slice_ty) = { + let isize_ty = { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); - let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); - str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx), - Type::isize(&dummy_ccx)], - false); - (Type::isize(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty) + Type::isize(&dummy_ccx) }; - (isize_ty, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap()) + (isize_ty, local_ccxs.pop().unwrap()) }; local_ccx.isize_ty = isize_ty; - local_ccx.opaque_vec_type = opaque_vec_type; - local_ccx.str_slice_type = str_slice_ty; local_ccx } @@ -512,10 +508,19 @@ pub fn used_statics<'a>(&'a self) -> &'a RefCell> { &self.local().used_statics } - pub fn lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + pub fn lltypes<'a>(&'a self) -> &'a RefCell, Option), Type>> { &self.local().lltypes } + pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + &self.local().scalar_lltypes + } + + pub fn pointee_infos<'a>(&'a self) + -> &'a RefCell, Size), Option>> { + &self.local().pointee_infos + } + pub fn stats<'a>(&'a self) -> &'a RefCell { &self.local().stats } @@ -524,10 +529,6 @@ pub fn isize_ty(&self) -> Type { self.local().isize_ty } - pub fn str_slice_type(&self) -> Type { - self.local().str_slice_type - } - pub fn dbg_cx<'a>(&'a self) -> &'a Option> { &self.local().dbg_cx } @@ -647,48 +648,44 @@ fn data_layout(&self) -> &ty::layout::TargetDataLayout { } } +impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a SharedCrateContext<'a, 'tcx> { + fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { + self.tcx + } +} + impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CrateContext<'a, 'tcx> { fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.shared.tcx.data_layout } } -impl<'a, 'tcx> LayoutTyper<'tcx> for &'a SharedCrateContext<'a, 'tcx> { - type TyLayout = TyLayout<'tcx>; - +impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { - self.tcx + self.shared.tcx } +} + +impl<'a, 'tcx> LayoutOf> for &'a SharedCrateContext<'a, 'tcx> { + type TyLayout = TyLayout<'tcx>; fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { - let param_env = ty::ParamEnv::empty(traits::Reveal::All); - LayoutCx::new(self.tcx, param_env) + (self.tcx, ty::ParamEnv::empty(traits::Reveal::All)) .layout_of(ty) .unwrap_or_else(|e| match e { LayoutError::SizeOverflow(_) => self.sess().fatal(&e.to_string()), _ => bug!("failed to get layout for `{}`: {}", ty, e) }) } - - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.tcx().fully_normalize_associated_types_in(&ty) - } } -impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> { +impl<'a, 'tcx> LayoutOf> for &'a CrateContext<'a, 'tcx> { type TyLayout = TyLayout<'tcx>; - fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { - self.shared.tcx - } fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { self.shared.layout_of(ty) } - - fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> { - self.shared.normalize_projections(ty) - } } /// Declare any llvm intrinsics that you might need diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index a68390eab7fd28415ab16dcfd605bec73e046bed..b2ad538a8ab290f9f1e84bede3f02ccf99e18e11 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -9,11 +9,10 @@ // except according to those terms. use self::RecursiveTypeDescription::*; -use self::MemberOffset::*; use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; -use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, +use super::utils::{debug_context, DIB, span_start, get_namespace_for_item, create_DIArray, is_node_local_to_unit}; use super::namespace::mangled_name_of_item; use super::type_names::compute_debuginfo_type_name; @@ -30,19 +29,17 @@ use rustc::ty::fold::TypeVisitor; use rustc::ty::subst::Substs; use rustc::ty::util::TypeIdHasher; -use rustc::hir; use rustc::ich::Fingerprint; -use {type_of, machine, monomorphize}; use common::{self, CrateContext}; -use type_::Type; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; use libc::{c_uint, c_longlong}; use std::ffi::CString; +use std::fmt::Write; use std::ptr; use std::path::Path; use syntax::ast; @@ -184,7 +181,6 @@ enum RecursiveTypeDescription<'tcx> { unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: DICompositeType, - llvm_type: Type, member_description_factory: MemberDescriptionFactory<'tcx>, }, FinalMetadata(DICompositeType) @@ -195,7 +191,6 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: DICompositeType, - llvm_type: Type, member_description_factory: MemberDescriptionFactory<'tcx>) -> RecursiveTypeDescription<'tcx> { @@ -208,7 +203,6 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( unfinished_type, unique_type_id, metadata_stub, - llvm_type, member_description_factory, } } @@ -224,9 +218,7 @@ fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult { unfinished_type, unique_type_id, metadata_stub, - llvm_type, ref member_description_factory, - .. } => { // Make sure that we have a forward declaration of the type in // the TypeMap so that recursive references are possible. This @@ -251,7 +243,6 @@ fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult { // ... and attach them to the stub to complete it. set_members_of_composite_type(cx, metadata_stub, - llvm_type, &member_descriptions[..]); return MetadataCreationResult::new(metadata_stub, true); } @@ -274,20 +265,21 @@ fn finalize<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> MetadataCreationResult { fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id: UniqueTypeId, + array_or_slice_type: Ty<'tcx>, element_type: Ty<'tcx>, - len: Option, span: Span) -> MetadataCreationResult { let element_type_metadata = type_metadata(cx, element_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let element_llvm_type = type_of::type_of(cx, element_type); - let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type); + let (size, align) = cx.size_and_align_of(array_or_slice_type); - let (array_size_in_bytes, upper_bound) = match len { - Some(len) => (element_type_size * len, len as c_longlong), - None => (0, -1) + let upper_bound = match array_or_slice_type.sty { + ty::TyArray(_, len) => { + len.val.to_const_int().unwrap().to_u64().unwrap() as c_longlong + } + _ => -1 }; let subrange = unsafe { @@ -298,8 +290,8 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let metadata = unsafe { llvm::LLVMRustDIBuilderCreateArrayType( DIB(cx), - bytes_to_bits(array_size_in_bytes), - bytes_to_bits(element_type_align), + size.bits(), + align.abi_bits() as u32, element_type_metadata, subscripts) }; @@ -308,66 +300,52 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - vec_type: Ty<'tcx>, + slice_ptr_type: Ty<'tcx>, element_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span) -> MetadataCreationResult { - let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: element_type, - mutbl: hir::MutImmutable - }); + let data_ptr_type = cx.tcx().mk_imm_ptr(element_type); - let element_type_metadata = type_metadata(cx, data_ptr_type, span); + let data_ptr_metadata = type_metadata(cx, data_ptr_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let slice_llvm_type = type_of::type_of(cx, vec_type); - let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true); + let slice_type_name = compute_debuginfo_type_name(cx, slice_ptr_type, true); + + let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type); + let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx().types.usize); - let member_llvm_types = slice_llvm_type.field_types(); - assert!(slice_layout_is_correct(cx, - &member_llvm_types[..], - element_type)); let member_descriptions = [ MemberDescription { name: "data_ptr".to_string(), - llvm_type: member_llvm_types[0], - type_metadata: element_type_metadata, - offset: ComputedMemberOffset, + type_metadata: data_ptr_metadata, + offset: Size::from_bytes(0), + size: pointer_size, + align: pointer_align, flags: DIFlags::FlagZero, }, MemberDescription { name: "length".to_string(), - llvm_type: member_llvm_types[1], type_metadata: type_metadata(cx, cx.tcx().types.usize, span), - offset: ComputedMemberOffset, + offset: pointer_size, + size: usize_size, + align: usize_align, flags: DIFlags::FlagZero, }, ]; - assert!(member_descriptions.len() == member_llvm_types.len()); - let file_metadata = unknown_file_metadata(cx); let metadata = composite_type_metadata(cx, - slice_llvm_type, + slice_ptr_type, &slice_type_name[..], unique_type_id, &member_descriptions, NO_SCOPE_METADATA, file_metadata, span); - return MetadataCreationResult::new(metadata, false); - - fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - member_llvm_types: &[Type], - element_type: Ty<'tcx>) - -> bool { - member_llvm_types.len() == 2 && - member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() && - member_llvm_types[1] == cx.isize_ty() - } + MetadataCreationResult::new(metadata, false) } fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -436,38 +414,38 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let trait_type_name = compute_debuginfo_type_name(cx, trait_object_type, false); - let trait_llvm_type = type_of::type_of(cx, trait_object_type); let file_metadata = unknown_file_metadata(cx); - - let ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: cx.tcx().types.u8, - mutbl: hir::MutImmutable - }); - let ptr_type_metadata = type_metadata(cx, ptr_type, syntax_pos::DUMMY_SP); - let llvm_type = type_of::type_of(cx, ptr_type); + let layout = cx.layout_of(cx.tcx().mk_mut_ptr(trait_type)); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); + + let data_ptr_field = layout.field(cx, 0); + let vtable_field = layout.field(cx, 1); let member_descriptions = [ MemberDescription { name: "pointer".to_string(), - llvm_type: llvm_type, - type_metadata: ptr_type_metadata, - offset: ComputedMemberOffset, + type_metadata: type_metadata(cx, + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + syntax_pos::DUMMY_SP), + offset: layout.fields.offset(0), + size: data_ptr_field.size, + align: data_ptr_field.align, flags: DIFlags::FlagArtificial, }, MemberDescription { name: "vtable".to_string(), - llvm_type: llvm_type, - type_metadata: ptr_type_metadata, - offset: ComputedMemberOffset, + type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), + offset: layout.fields.offset(1), + size: vtable_field.size, + align: vtable_field.align, flags: DIFlags::FlagArtificial, }, ]; composite_type_metadata(cx, - trait_llvm_type, + trait_object_type, &trait_type_name[..], unique_type_id, &member_descriptions, @@ -556,15 +534,12 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyTuple(ref elements, _) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } - ty::TyArray(typ, len) => { - let len = len.val.to_const_int().unwrap().to_u64().unwrap(); - fixed_vec_metadata(cx, unique_type_id, typ, Some(len), usage_site_span) - } + ty::TyArray(typ, _) | ty::TySlice(typ) => { - fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span) + fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span) } ty::TyStr => { - fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span) + fixed_vec_metadata(cx, unique_type_id, t, cx.tcx().types.i8, usage_site_span) } ty::TyDynamic(..) => { MetadataCreationResult::new( @@ -770,15 +745,14 @@ fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, _ => bug!("debuginfo::basic_type_metadata - t is invalid type") }; - let llvm_type = type_of::type_of(cx, t); - let (size, align) = size_and_align_of(cx, llvm_type); + let (size, align) = cx.size_and_align_of(t); let name = CString::new(name).unwrap(); let ty_metadata = unsafe { llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), name.as_ptr(), - bytes_to_bits(size), - bytes_to_bits(align), + size.bits(), + align.abi_bits() as u32, encoding) }; @@ -790,29 +764,25 @@ fn foreign_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id: UniqueTypeId) -> DIType { debug!("foreign_type_metadata: {:?}", t); - let llvm_type = type_of::type_of(cx, t); - let name = compute_debuginfo_type_name(cx, t, false); - create_struct_stub(cx, llvm_type, &name, unique_type_id, NO_SCOPE_METADATA) + create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA) } fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, pointer_type: Ty<'tcx>, pointee_type_metadata: DIType) -> DIType { - let pointer_llvm_type = type_of::type_of(cx, pointer_type); - let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type); + let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type); let name = compute_debuginfo_type_name(cx, pointer_type, false); let name = CString::new(name).unwrap(); - let ptr_metadata = unsafe { + unsafe { llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), pointee_type_metadata, - bytes_to_bits(pointer_size), - bytes_to_bits(pointer_align), + pointer_size.bits(), + pointer_align.abi_bits() as u32, name.as_ptr()) - }; - return ptr_metadata; + } } pub fn compile_unit_metadata(scc: &SharedCrateContext, @@ -907,21 +877,15 @@ fn new(metadata: DIType, already_stored_in_typemap: bool) -> MetadataCreationRes } } -#[derive(Debug)] -enum MemberOffset { - FixedMemberOffset { bytes: usize }, - // For ComputedMemberOffset, the offset is read from the llvm type definition. - ComputedMemberOffset -} - // Description of a type member, which can either be a regular field (as in // structs or tuples) or an enum variant. #[derive(Debug)] struct MemberDescription { name: String, - llvm_type: Type, type_metadata: DIType, - offset: MemberOffset, + offset: Size, + size: Size, + align: Align, flags: DIFlags, } @@ -968,7 +932,6 @@ fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) struct StructMemberDescriptionFactory<'tcx> { ty: Ty<'tcx>, variant: &'tcx ty::VariantDef, - substs: &'tcx Substs<'tcx>, span: Span, } @@ -976,35 +939,20 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let layout = cx.layout_of(self.ty); - - let tmp; - let offsets = match *layout { - layout::Univariant { ref variant, .. } => &variant.offsets, - layout::Vector { element, count } => { - let element_size = element.size(cx).bytes(); - tmp = (0..count). - map(|i| layout::Size::from_bytes(i*element_size)) - .collect::>(); - &tmp - } - _ => bug!("{} is not a struct", self.ty) - }; - self.variant.fields.iter().enumerate().map(|(i, f)| { let name = if self.variant.ctor_kind == CtorKind::Fn { format!("__{}", i) } else { f.name.to_string() }; - let fty = monomorphize::field_ty(cx.tcx(), self.substs, f); - - let offset = FixedMemberOffset { bytes: offsets[i].bytes() as usize}; - + let field = layout.field(cx, i); + let (size, align) = field.size_and_align(); MemberDescription { name, - llvm_type: type_of::in_memory_type_of(cx, fty), - type_metadata: type_metadata(cx, fty, self.span), - offset, + type_metadata: type_metadata(cx, field.ty, self.span), + offset: layout.fields.offset(i), + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1018,17 +966,16 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let struct_name = compute_debuginfo_type_name(cx, struct_type, false); - let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type); - let (struct_def_id, variant, substs) = match struct_type.sty { - ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + let (struct_def_id, variant) = match struct_type.sty { + ty::TyAdt(def, _) => (def.did, def.struct_variant()), _ => bug!("prepare_struct_metadata on a non-ADT") }; let containing_scope = get_namespace_for_item(cx, struct_def_id); let struct_metadata_stub = create_struct_stub(cx, - struct_llvm_type, + struct_type, &struct_name, unique_type_id, containing_scope); @@ -1038,11 +985,9 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, struct_type, unique_type_id, struct_metadata_stub, - struct_llvm_type, StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, - substs, span, }) ) @@ -1063,21 +1008,14 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let layout = cx.layout_of(self.ty); - let offsets = if let layout::Univariant { ref variant, .. } = *layout { - &variant.offsets - } else { - bug!("{} is not a tuple", self.ty); - }; - - self.component_types - .iter() - .enumerate() - .map(|(i, &component_type)| { + self.component_types.iter().enumerate().map(|(i, &component_type)| { + let (size, align) = cx.size_and_align_of(component_type); MemberDescription { name: format!("__{}", i), - llvm_type: type_of::type_of(cx, component_type), type_metadata: type_metadata(cx, component_type, self.span), - offset: FixedMemberOffset { bytes: offsets[i].bytes() as usize }, + offset: layout.fields.offset(i), + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1091,18 +1029,16 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); - let tuple_llvm_type = type_of::type_of(cx, tuple_type); create_and_register_recursive_type_forward_declaration( cx, tuple_type, unique_type_id, create_struct_stub(cx, - tuple_llvm_type, + tuple_type, &tuple_name[..], unique_type_id, NO_SCOPE_METADATA), - tuple_llvm_type, TupleMDF(TupleMemberDescriptionFactory { ty: tuple_type, component_types: component_types.to_vec(), @@ -1116,21 +1052,23 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, //=----------------------------------------------------------------------------- struct UnionMemberDescriptionFactory<'tcx> { + layout: TyLayout<'tcx>, variant: &'tcx ty::VariantDef, - substs: &'tcx Substs<'tcx>, span: Span, } impl<'tcx> UnionMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { - self.variant.fields.iter().map(|field| { - let fty = monomorphize::field_ty(cx.tcx(), self.substs, field); + self.variant.fields.iter().enumerate().map(|(i, f)| { + let field = self.layout.field(cx, i); + let (size, align) = field.size_and_align(); MemberDescription { - name: field.name.to_string(), - llvm_type: type_of::type_of(cx, fty), - type_metadata: type_metadata(cx, fty, self.span), - offset: FixedMemberOffset { bytes: 0 }, + name: f.name.to_string(), + type_metadata: type_metadata(cx, field.ty, self.span), + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1143,17 +1081,16 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let union_name = compute_debuginfo_type_name(cx, union_type, false); - let union_llvm_type = type_of::in_memory_type_of(cx, union_type); - let (union_def_id, variant, substs) = match union_type.sty { - ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), + let (union_def_id, variant) = match union_type.sty { + ty::TyAdt(def, _) => (def.did, def.struct_variant()), _ => bug!("prepare_union_metadata on a non-ADT") }; let containing_scope = get_namespace_for_item(cx, union_def_id); let union_metadata_stub = create_union_stub(cx, - union_llvm_type, + union_type, &union_name, unique_type_id, containing_scope); @@ -1163,10 +1100,9 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, union_type, unique_type_id, union_metadata_stub, - union_llvm_type, UnionMDF(UnionMemberDescriptionFactory { + layout: cx.layout_of(union_type), variant, - substs, span, }) ) @@ -1183,10 +1119,9 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: &'tcx layout::Layout, + layout: TyLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, - file_metadata: DIFile, span: Span, } @@ -1194,162 +1129,70 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); - let substs = match self.enum_type.sty { - ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s, - _ => bug!("{} is not an enum", self.enum_type) - }; - match *self.type_rep { - layout::General { ref variants, .. } => { + match self.layout.variants { + layout::Variants::Single { .. } if adt.variants.is_empty() => vec![], + layout::Variants::Single { index } => { + let (variant_type_metadata, member_description_factory) = + describe_enum_variant(cx, + self.layout, + &adt.variants[index], + NoDiscriminant, + self.containing_scope, + self.span); + + let member_descriptions = + member_description_factory.create_member_descriptions(cx); + + set_members_of_composite_type(cx, + variant_type_metadata, + &member_descriptions[..]); + vec![ + MemberDescription { + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::from_bytes(0), + size: self.layout.size, + align: self.layout.align, + flags: DIFlags::FlagZero + } + ] + } + layout::Variants::Tagged { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); - variants - .iter() - .enumerate() - .map(|(i, struct_def)| { - let (variant_type_metadata, - variant_llvm_type, - member_desc_factory) = - describe_enum_variant(cx, - self.enum_type, - struct_def, - &adt.variants[i], - discriminant_info, - self.containing_scope, - self.span); - - let member_descriptions = member_desc_factory - .create_member_descriptions(cx); - - set_members_of_composite_type(cx, - variant_type_metadata, - variant_llvm_type, - &member_descriptions); - MemberDescription { - name: "".to_string(), - llvm_type: variant_llvm_type, - type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: DIFlags::FlagZero - } - }).collect() - }, - layout::Univariant{ ref variant, .. } => { - assert!(adt.variants.len() <= 1); - - if adt.variants.is_empty() { - vec![] - } else { - let (variant_type_metadata, - variant_llvm_type, - member_description_factory) = + (0..variants.len()).map(|i| { + let variant = self.layout.for_variant(cx, i); + let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, - self.enum_type, variant, - &adt.variants[0], - NoDiscriminant, + &adt.variants[i], + discriminant_info, self.containing_scope, self.span); - let member_descriptions = - member_description_factory.create_member_descriptions(cx); + let member_descriptions = member_desc_factory + .create_member_descriptions(cx); set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, - &member_descriptions[..]); - vec![ - MemberDescription { - name: "".to_string(), - llvm_type: variant_llvm_type, - type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: DIFlags::FlagZero - } - ] - } - } - layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => { - // As far as debuginfo is concerned, the pointer this enum - // represents is still wrapped in a struct. This is to make the - // DWARF representation of enums uniform. - - // First create a description of the artificial wrapper struct: - let non_null_variant = &adt.variants[non_null_variant_index as usize]; - let non_null_variant_name = non_null_variant.name.as_str(); - - // The llvm type and metadata of the pointer - let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] ); - let non_null_llvm_type = type_of::type_of(cx, nnty); - let non_null_type_metadata = type_metadata(cx, nnty, self.span); - - // The type of the artificial struct wrapping the pointer - let artificial_struct_llvm_type = Type::struct_(cx, - &[non_null_llvm_type], - false); - - // For the metadata of the wrapper struct, we need to create a - // MemberDescription of the struct's single field. - let sole_struct_member_description = MemberDescription { - name: match non_null_variant.ctor_kind { - CtorKind::Fn => "__0".to_string(), - CtorKind::Fictive => { - non_null_variant.fields[0].name.to_string() - } - CtorKind::Const => bug!() - }, - llvm_type: non_null_llvm_type, - type_metadata: non_null_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, - flags: DIFlags::FlagZero - }; - - let unique_type_id = debug_context(cx).type_map - .borrow_mut() - .get_unique_type_id_of_enum_variant( - cx, - self.enum_type, - &non_null_variant_name); - - // Now we can create the metadata of the artificial struct - let artificial_struct_metadata = - composite_type_metadata(cx, - artificial_struct_llvm_type, - &non_null_variant_name, - unique_type_id, - &[sole_struct_member_description], - self.containing_scope, - self.file_metadata, - syntax_pos::DUMMY_SP); - - // Encode the information about the null variant in the union - // member's name. - let null_variant_index = (1 - non_null_variant_index) as usize; - let null_variant_name = adt.variants[null_variant_index].name; - let union_member_name = format!("RUST$ENCODED$ENUM${}${}", - 0, - null_variant_name); - - // Finally create the (singleton) list of descriptions of union - // members. - vec![ + &member_descriptions); MemberDescription { - name: union_member_name, - llvm_type: artificial_struct_llvm_type, - type_metadata: artificial_struct_metadata, - offset: FixedMemberOffset { bytes: 0 }, + name: "".to_string(), + type_metadata: variant_type_metadata, + offset: Size::from_bytes(0), + size: variant.size, + align: variant.align, flags: DIFlags::FlagZero } - ] - }, - layout::StructWrappedNullablePointer { nonnull: ref struct_def, - nndiscr, - ref discrfield_source, ..} => { + }).collect() + } + layout::Variants::NicheFilling { dataful_variant, ref niche_variants, .. } => { + let variant = self.layout.for_variant(cx, dataful_variant); // Create a description of the non-null variant - let (variant_type_metadata, variant_llvm_type, member_description_factory) = + let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, - self.enum_type, - struct_def, - &adt.variants[nndiscr as usize], + variant, + &adt.variants[dataful_variant], OptimizedDiscriminant, self.containing_scope, self.span); @@ -1359,34 +1202,51 @@ fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &variant_member_descriptions[..]); // Encode the information about the null variant in the union // member's name. - let null_variant_index = (1 - nndiscr) as usize; - let null_variant_name = adt.variants[null_variant_index].name; - let discrfield_source = discrfield_source.iter() - .skip(1) - .map(|x| x.to_string()) - .collect::>().join("$"); - let union_member_name = format!("RUST$ENCODED$ENUM${}${}", - discrfield_source, - null_variant_name); + let mut name = String::from("RUST$ENCODED$ENUM$"); + // HACK(eddyb) the debuggers should just handle offset+size + // of discriminant instead of us having to recover its path. + // Right now it's not even going to work for `niche_start > 0`, + // and for multiple niche variants it only supports the first. + fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + name: &mut String, + layout: TyLayout<'tcx>, + offset: Size, + size: Size) { + for i in 0..layout.fields.count() { + let field_offset = layout.fields.offset(i); + if field_offset > offset { + continue; + } + let inner_offset = offset - field_offset; + let field = layout.field(ccx, i); + if inner_offset + size <= field.size { + write!(name, "{}$", i).unwrap(); + compute_field_path(ccx, name, field, inner_offset, size); + } + } + } + compute_field_path(cx, &mut name, + self.layout, + self.layout.fields.offset(0), + self.layout.field(cx, 0).size); + name.push_str(&adt.variants[niche_variants.start].name.as_str()); // Create the (singleton) list of descriptions of union members. vec![ MemberDescription { - name: union_member_name, - llvm_type: variant_llvm_type, + name, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: variant.size, + align: variant.align, flags: DIFlags::FlagZero } ] - }, - layout::CEnum { .. } => span_bug!(self.span, "This should be unreachable."), - ref l @ _ => bug!("Not an enum layout: {:#?}", l) + } } } } @@ -1394,7 +1254,7 @@ fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) // Creates MemberDescriptions for the fields of a single enum variant. struct VariantMemberDescriptionFactory<'tcx> { // Cloned from the layout::Struct describing the variant. - offsets: &'tcx [layout::Size], + offsets: Vec, args: Vec<(String, Ty<'tcx>)>, discriminant_type_metadata: Option, span: Span, @@ -1404,14 +1264,16 @@ impl<'tcx> VariantMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { self.args.iter().enumerate().map(|(i, &(ref name, ty))| { + let (size, align) = cx.size_and_align_of(ty); MemberDescription { name: name.to_string(), - llvm_type: type_of::type_of(cx, ty), type_metadata: match self.discriminant_type_metadata { Some(metadata) if i == 0 => metadata, _ => type_metadata(cx, ty, self.span) }, - offset: FixedMemberOffset { bytes: self.offsets[i].bytes() as usize }, + offset: self.offsets[i], + size, + align, flags: DIFlags::FlagZero } }).collect() @@ -1430,92 +1292,52 @@ enum EnumDiscriminantInfo { // descriptions of the fields of the variant. This is a rudimentary version of a // full RecursiveTypeDescription. fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - enum_type: Ty<'tcx>, - struct_def: &'tcx layout::Struct, + layout: layout::TyLayout<'tcx>, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) - -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { - let substs = match enum_type.sty { - ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, - ref t @ _ => bug!("{:#?} is not an enum", t) - }; - - let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) { - layout::CEnum {discr, ..} => Some((discr, true)), - layout::General{discr, ..} => Some((discr, false)), - layout::Univariant { .. } - | layout::RawNullablePointer { .. } - | layout::StructWrappedNullablePointer { .. } => None, - ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l) - }; - - let mut field_tys = variant.fields.iter().map(|f| { - monomorphize::field_ty(cx.tcx(), &substs, f) - }).collect::>(); - - if let Some((discr, signed)) = maybe_discr_and_signed { - field_tys.insert(0, discr.to_ty(&cx.tcx(), signed)); - } - - - let variant_llvm_type = - Type::struct_(cx, &field_tys - .iter() - .map(|t| type_of::type_of(cx, t)) - .collect::>() - , - struct_def.packed); - // Could do some consistency checks here: size, align, field count, discr type - + -> (DICompositeType, MemberDescriptionFactory<'tcx>) { let variant_name = variant.name.as_str(); let unique_type_id = debug_context(cx).type_map .borrow_mut() .get_unique_type_id_of_enum_variant( cx, - enum_type, + layout.ty, &variant_name); let metadata_stub = create_struct_stub(cx, - variant_llvm_type, + layout.ty, &variant_name, unique_type_id, containing_scope); - // Get the argument names from the enum variant info - let mut arg_names: Vec<_> = match variant.ctor_kind { - CtorKind::Const => vec![], - CtorKind::Fn => { - variant.fields - .iter() - .enumerate() - .map(|(i, _)| format!("__{}", i)) - .collect() - } - CtorKind::Fictive => { - variant.fields - .iter() - .map(|f| f.name.to_string()) - .collect() - } - }; - // If this is not a univariant enum, there is also the discriminant field. - match discriminant_info { - RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()), - _ => { /* do nothing */ } + let (discr_offset, discr_arg) = match discriminant_info { + RegularDiscriminant(_) => { + let enum_layout = cx.layout_of(layout.ty); + (Some(enum_layout.fields.offset(0)), + Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty))) + } + _ => (None, None), }; + let offsets = discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| { + layout.fields.offset(i) + })).collect(); // Build an array of (field name, field type) pairs to be captured in the factory closure. - let args: Vec<(String, Ty)> = arg_names.iter() - .zip(field_tys.iter()) - .map(|(s, &t)| (s.to_string(), t)) - .collect(); + let args = discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { + let name = if variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + variant.fields[i].name.to_string() + }; + (name, layout.field(cx, i).ty) + })).collect(); let member_description_factory = VariantMDF(VariantMemberDescriptionFactory { - offsets: &struct_def.offsets[..], + offsets, args, discriminant_type_metadata: match discriminant_info { RegularDiscriminant(discriminant_type_metadata) => { @@ -1526,7 +1348,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span, }); - (metadata_stub, variant_llvm_type, member_description_factory) + (metadata_stub, member_description_factory) } fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -1562,21 +1384,18 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }) .collect(); - let discriminant_type_metadata = |inttype: layout::Integer, signed: bool| { - let disr_type_key = (enum_def_id, inttype); + let discriminant_type_metadata = |discr: layout::Primitive| { + let disr_type_key = (enum_def_id, discr); let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types .borrow() .get(&disr_type_key).cloned(); match cached_discriminant_type_metadata { Some(discriminant_type_metadata) => discriminant_type_metadata, None => { - let discriminant_llvm_type = Type::from_integer(cx, inttype); let (discriminant_size, discriminant_align) = - size_and_align_of(cx, discriminant_llvm_type); + (discr.size(cx), discr.align(cx)); let discriminant_base_type_metadata = - type_metadata(cx, - inttype.to_ty(&cx.tcx(), signed), - syntax_pos::DUMMY_SP); + type_metadata(cx, discr.to_ty(cx.tcx()), syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); let name = CString::new(discriminant_name.as_bytes()).unwrap(); @@ -1587,8 +1406,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - bytes_to_bits(discriminant_size), - bytes_to_bits(discriminant_align), + discriminant_size.bits(), + discriminant_align.abi_bits() as u32, create_DIArray(DIB(cx), &enumerators_metadata), discriminant_base_type_metadata) }; @@ -1602,21 +1421,22 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } }; - let type_rep = cx.layout_of(enum_type); + let layout = cx.layout_of(enum_type); - let discriminant_type_metadata = match *type_rep { - layout::CEnum { discr, signed, .. } => { - return FinalMetadata(discriminant_type_metadata(discr, signed)) - }, - layout::RawNullablePointer { .. } | - layout::StructWrappedNullablePointer { .. } | - layout::Univariant { .. } => None, - layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)), - ref l @ _ => bug!("Not an enum layout: {:#?}", l) + let discriminant_type_metadata = match layout.variants { + layout::Variants::Single { .. } | + layout::Variants::NicheFilling { .. } => None, + layout::Variants::Tagged { ref discr, .. } => { + Some(discriminant_type_metadata(discr.value)) + } }; - let enum_llvm_type = type_of::type_of(cx, enum_type); - let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type); + match (&layout.abi, discriminant_type_metadata) { + (&layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr), + _ => {} + } + + let (enum_type_size, enum_type_align) = layout.size_and_align(); let enum_name = CString::new(enum_name).unwrap(); let unique_type_id_str = CString::new( @@ -1629,8 +1449,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - bytes_to_bits(enum_type_size), - bytes_to_bits(enum_type_align), + enum_type_size.bits(), + enum_type_align.abi_bits() as u32, DIFlags::FlagZero, ptr::null_mut(), 0, // RuntimeLang @@ -1642,13 +1462,11 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_type, unique_type_id, enum_metadata, - enum_llvm_type, EnumMDF(EnumMemberDescriptionFactory { enum_type, - type_rep: type_rep.layout, + layout, discriminant_type_metadata, containing_scope, - file_metadata, span, }), ); @@ -1664,28 +1482,27 @@ fn get_enum_discriminant_name(cx: &CrateContext, /// results in a LLVM struct. /// /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. -fn composite_type_metadata(cx: &CrateContext, - composite_llvm_type: Type, - composite_type_name: &str, - composite_type_unique_id: UniqueTypeId, - member_descriptions: &[MemberDescription], - containing_scope: DIScope, - - // Ignore source location information as long as it - // can't be reconstructed for non-local crates. - _file_metadata: DIFile, - _definition_span: Span) - -> DICompositeType { +fn composite_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + composite_type: Ty<'tcx>, + composite_type_name: &str, + composite_type_unique_id: UniqueTypeId, + member_descriptions: &[MemberDescription], + containing_scope: DIScope, + + // Ignore source location information as long as it + // can't be reconstructed for non-local crates. + _file_metadata: DIFile, + _definition_span: Span) + -> DICompositeType { // Create the (empty) struct metadata node ... let composite_type_metadata = create_struct_stub(cx, - composite_llvm_type, + composite_type, composite_type_name, composite_type_unique_id, containing_scope); // ... and immediately create and add the member descriptions. set_members_of_composite_type(cx, composite_type_metadata, - composite_llvm_type, member_descriptions); return composite_type_metadata; @@ -1693,7 +1510,6 @@ fn composite_type_metadata(cx: &CrateContext, fn set_members_of_composite_type(cx: &CrateContext, composite_type_metadata: DICompositeType, - composite_llvm_type: Type, member_descriptions: &[MemberDescription]) { // In some rare cases LLVM metadata uniquing would lead to an existing type // description being used instead of a new one created in @@ -1714,14 +1530,7 @@ fn set_members_of_composite_type(cx: &CrateContext, let member_metadata: Vec = member_descriptions .iter() - .enumerate() - .map(|(i, member_description)| { - let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type); - let member_offset = match member_description.offset { - FixedMemberOffset { bytes } => bytes as u64, - ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i) - }; - + .map(|member_description| { let member_name = member_description.name.as_bytes(); let member_name = CString::new(member_name).unwrap(); unsafe { @@ -1731,9 +1540,9 @@ fn set_members_of_composite_type(cx: &CrateContext, member_name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(member_size), - bytes_to_bits(member_align), - bytes_to_bits(member_offset), + member_description.size.bits(), + member_description.align.abi_bits() as u32, + member_description.offset.bits(), member_description.flags, member_description.type_metadata) } @@ -1750,13 +1559,13 @@ fn set_members_of_composite_type(cx: &CrateContext, // A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do // any caching, does not add any fields to the struct. This can be done later // with set_members_of_composite_type(). -fn create_struct_stub(cx: &CrateContext, - struct_llvm_type: Type, - struct_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type); +fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + struct_type: Ty<'tcx>, + struct_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (struct_size, struct_align) = cx.size_and_align_of(struct_type); let name = CString::new(struct_type_name).unwrap(); let unique_type_id = CString::new( @@ -1774,8 +1583,8 @@ fn create_struct_stub(cx: &CrateContext, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), + struct_size.bits(), + struct_align.abi_bits() as u32, DIFlags::FlagZero, ptr::null_mut(), empty_array, @@ -1787,13 +1596,13 @@ fn create_struct_stub(cx: &CrateContext, return metadata_stub; } -fn create_union_stub(cx: &CrateContext, - union_llvm_type: Type, - union_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (union_size, union_align) = size_and_align_of(cx, union_llvm_type); +fn create_union_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + union_type: Ty<'tcx>, + union_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (union_size, union_align) = cx.size_and_align_of(union_type); let name = CString::new(union_type_name).unwrap(); let unique_type_id = CString::new( @@ -1811,8 +1620,8 @@ fn create_union_stub(cx: &CrateContext, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(union_size), - bytes_to_bits(union_align), + union_size.bits(), + union_align.abi_bits() as u32, DIFlags::FlagZero, empty_array, 0, // RuntimeLang @@ -1867,7 +1676,7 @@ pub fn create_global_var_metadata(cx: &CrateContext, is_local_to_unit, global, ptr::null_mut(), - global_align, + global_align.abi() as u32, ); } } @@ -1899,8 +1708,6 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); - let llvm_vtable_type = Type::vtable_ptr(cx).element_type(); - let (struct_size, struct_align) = size_and_align_of(cx, llvm_vtable_type); unsafe { // LLVMRustDIBuilderCreateStructType() wants an empty array. A null @@ -1919,8 +1726,8 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), + Size::from_bytes(0).bits(), + cx.tcx().data_layout.pointer_align.abi_bits() as u32, DIFlags::FlagArtificial, ptr::null_mut(), empty_array, diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 15b299674eea36852a3a28117d43d75693617bf6..c0df25202d8a9379f2730cf27ae7813ebdc59233 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -43,7 +43,7 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::Symbol; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, LayoutOf}; pub mod gdb; mod utils; @@ -71,7 +71,7 @@ pub struct CrateDebugContext<'tcx> { llmod: ModuleRef, builder: DIBuilderRef, created_files: RefCell>, - created_enum_disr_types: RefCell>, + created_enum_disr_types: RefCell>, type_map: RefCell>, namespace_map: RefCell>, @@ -335,8 +335,7 @@ fn get_function_signature<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, signature.extend(inputs.iter().map(|&t| { let t = match t.sty { ty::TyArray(ct, _) - if (ct == cx.tcx().types.u8) || - (cx.layout_of(ct).size(cx).bytes() == 0) => { + if (ct == cx.tcx().types.u8) || cx.layout_of(ct).is_zst() => { cx.tcx().mk_imm_ptr(ct) } _ => t @@ -499,7 +498,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, cx.sess().opts.optimize != config::OptLevel::No, DIFlags::FlagZero, argument_index, - align, + align.abi() as u32, ) }; source_loc::set_debug_location(bcx, diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs index ad4fdfca7261ff294fa46724c1fe6fb7ef8aa57e..95427d9b3cd4e76a8f1606b75e98be0092b50858 100644 --- a/src/librustc_trans/debuginfo/utils.rs +++ b/src/librustc_trans/debuginfo/utils.rs @@ -18,15 +18,11 @@ use llvm; use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; -use machine; use common::{CrateContext}; -use type_::Type; use syntax_pos::{self, Span}; use syntax::ast; -use std::ops; - pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the @@ -53,15 +49,6 @@ pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc { cx.sess().codemap().lookup_char_pos(span.lo()) } -pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u32) { - (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type)) -} - -pub fn bytes_to_bits(bytes: T) -> T - where T: ops::Mul + From { - bytes * 8u8.into() -} - #[inline] pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>) -> &'a CrateDebugContext<'tcx> { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 453b98a1d74f796f7931862fbcfd7d3e55c12abc..6c7d7700adeb28b9b043e02c7c9a18a5ecaa4f44 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -19,8 +19,7 @@ use llvm::{ValueRef}; use llvm; use meth; -use monomorphize; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; use value::Value; @@ -29,17 +28,28 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); if bcx.ccx.shared().type_is_sized(t) { - let size = bcx.ccx.size_of(t); - let align = bcx.ccx.align_of(t); - debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", + let (size, align) = bcx.ccx.size_and_align_of(t); + debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, Value(info), size, align); - let size = C_usize(bcx.ccx, size); - let align = C_usize(bcx.ccx, align as u64); + let size = C_usize(bcx.ccx, size.bytes()); + let align = C_usize(bcx.ccx, align.abi()); return (size, align); } assert!(!info.is_null()); match t.sty { - ty::TyAdt(..) | ty::TyTuple(..) => { + ty::TyDynamic(..) => { + // load size/align from vtable + (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info)) + } + ty::TySlice(_) | ty::TyStr => { + let unit = t.sequence_element_type(bcx.tcx()); + // The info in this case is the length of the str, so the size is that + // times the unit size. + let (size, align) = bcx.ccx.size_and_align_of(unit); + (bcx.mul(info, C_usize(bcx.ccx, size.bytes())), + C_usize(bcx.ccx, align.abi())) + } + _ => { let ccx = bcx.ccx; // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we @@ -48,15 +58,9 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let layout = ccx.layout_of(t); debug!("DST {} layout: {:?}", t, layout); - let (sized_size, sized_align) = match *layout { - ty::layout::Layout::Univariant { ref variant, .. } => { - (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi()) - } - _ => { - bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}", - t, layout); - } - }; + let i = layout.fields.count() - 1; + let sized_size = layout.fields.offset(i).bytes(); + let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); let sized_size = C_usize(ccx, sized_size); @@ -64,14 +68,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = match t.sty { - ty::TyAdt(def, substs) => { - let last_field = def.struct_variant().fields.last().unwrap(); - monomorphize::field_ty(bcx.tcx(), substs, last_field) - }, - ty::TyTuple(tys, _) => tys.last().unwrap(), - _ => unreachable!(), - }; + let field_ty = layout.field(ccx, i).ty; let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding @@ -114,17 +111,5 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf (size, align) } - ty::TyDynamic(..) => { - // load size/align from vtable - (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info)) - } - ty::TySlice(_) | ty::TyStr => { - let unit = t.sequence_element_type(bcx.tcx()); - // The info in this case is the length of the str, so the size is that - // times the unit size. - (bcx.mul(info, C_usize(bcx.ccx, bcx.ccx.size_of(unit))), - C_usize(bcx.ccx, bcx.ccx.align_of(unit) as u64)) - } - _ => bug!("Unexpected unsized type, found {}", t) } } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index 2f1a95038eae59a543a580eb16198bc26f5e43d5..adbb45f893b08778d763e5a6e042d01c04437a97 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -11,20 +11,19 @@ #![allow(non_upper_case_globals)] use intrinsics::{self, Intrinsic}; -use libc; use llvm; use llvm::{ValueRef}; -use abi::{Abi, FnType}; -use adt; +use abi::{Abi, FnType, PassMode}; use mir::lvalue::{LvalueRef, Alignment}; +use mir::operand::{OperandRef, OperandValue}; use base::*; use common::*; use declare; use glue; -use type_of; -use machine; use type_::Type; +use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; +use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -88,8 +87,8 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option { /// add them to librustc_trans/trans/context.rs pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, callee_ty: Ty<'tcx>, - fn_ty: &FnType, - llargs: &[ValueRef], + fn_ty: &FnType<'tcx>, + args: &[OperandRef<'tcx>], llresult: ValueRef, span: Span) { let ccx = bcx.ccx; @@ -106,27 +105,34 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let ret_ty = sig.output(); let name = &*tcx.item_name(def_id); - let llret_ty = type_of::type_of(ccx, ret_ty); + let llret_ty = ccx.layout_of(ret_ty).llvm_type(ccx); + let result = LvalueRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned); let simple = get_simple_intrinsic(ccx, name); let llval = match name { _ if simple.is_some() => { - bcx.call(simple.unwrap(), &llargs, None) + bcx.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) } "unreachable" => { return; }, "likely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None) + bcx.call(expect, &[args[0].immediate(), C_bool(ccx, true)], None) } "unlikely" => { let expect = ccx.get_intrinsic(&("llvm.expect.i1")); - bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None) + bcx.call(expect, &[args[0].immediate(), C_bool(ccx, false)], None) } "try" => { - try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); - C_nil(ccx) + try_intrinsic(bcx, ccx, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); + return; } "breakpoint" => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); @@ -134,42 +140,35 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "size_of" => { let tp_ty = substs.type_at(0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); - if bcx.ccx.shared().type_is_sized(tp_ty) { - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) - } else if bcx.ccx.shared().type_has_metadata(tp_ty) { + if let OperandValue::Pair(_, meta) = args[0].val { let (llsize, _) = - glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, meta); llsize } else { - C_usize(ccx, 0u64) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } } "min_align_of" => { let tp_ty = substs.type_at(0); - C_usize(ccx, ccx.align_of(tp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); - if bcx.ccx.shared().type_is_sized(tp_ty) { - C_usize(ccx, ccx.align_of(tp_ty) as u64) - } else if bcx.ccx.shared().type_has_metadata(tp_ty) { + if let OperandValue::Pair(_, meta) = args[0].val { let (_, llalign) = - glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); + glue::size_and_align_of_dst(bcx, tp_ty, meta); llalign } else { - C_usize(ccx, 1u64) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llalign_of_pref(ccx, lltp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).pref()) } "type_name" => { let tp_ty = substs.type_at(0); @@ -181,18 +180,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "init" => { let ty = substs.type_at(0); - if !type_is_zero_size(ccx, ty) { + if !ccx.layout_of(ty).is_zst() { // Just zero out the stack slot. // If we store a zero constant, LLVM will drown in vreg allocation for large data // structures, and the generated code will be awful. (A telltale sign of this is // large quantities of `mov [byte ptr foo],0` in the generated code.) memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1)); } - C_nil(ccx) + return; } // Effectively no-ops "uninit" => { - C_nil(ccx) + return; } "needs_drop" => { let tp_ty = substs.type_at(0); @@ -200,69 +199,75 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty)) } "offset" => { - let ptr = llargs[0]; - let offset = llargs[1]; + let ptr = args[0].immediate(); + let offset = args[1].immediate(); bcx.inbounds_gep(ptr, &[offset]) } "arith_offset" => { - let ptr = llargs[0]; - let offset = llargs[1]; + let ptr = args[0].immediate(); + let offset = args[1].immediate(); bcx.gep(ptr, &[offset]) } "copy_nonoverlapping" => { - copy_intrinsic(bcx, false, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) + copy_intrinsic(bcx, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) } "copy" => { - copy_intrinsic(bcx, true, false, substs.type_at(0), llargs[1], llargs[0], llargs[2]) + copy_intrinsic(bcx, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) } "write_bytes" => { - memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + memset_intrinsic(bcx, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bcx, false, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + copy_intrinsic(bcx, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_copy_memory" => { - copy_intrinsic(bcx, true, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + copy_intrinsic(bcx, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_set_memory" => { - memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2]) + memset_intrinsic(bcx, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) } "volatile_load" => { let tp_ty = substs.type_at(0); - let mut ptr = llargs[0]; - if let Some(ty) = fn_ty.ret.cast { - ptr = bcx.pointercast(ptr, ty.ptr_to()); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_ty.ret.mode { + ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to()); } let load = bcx.volatile_load(ptr); unsafe { - llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty)); + llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32); } - to_immediate(bcx, load, tp_ty) + to_immediate(bcx, load, ccx.layout_of(tp_ty)) }, "volatile_store" => { let tp_ty = substs.type_at(0); - if type_is_fat_ptr(bcx.ccx, tp_ty) { - bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0])); - bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0])); + let dst = args[0].deref(bcx.ccx); + if let OperandValue::Pair(a, b) = args[1].val { + bcx.volatile_store(a, dst.project_field(bcx, 0).llval); + bcx.volatile_store(b, dst.project_field(bcx, 1).llval); } else { - let val = if fn_ty.args[1].is_indirect() { - bcx.load(llargs[1], None) + let val = if let OperandValue::Ref(ptr, align) = args[1].val { + bcx.load(ptr, align.non_abi()) } else { - if !type_is_zero_size(ccx, tp_ty) { - from_immediate(bcx, llargs[1]) - } else { - C_nil(ccx) + if dst.layout.is_zst() { + return; } + from_immediate(bcx, args[1].immediate()) }; - let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); + let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to()); let store = bcx.volatile_store(val, ptr); unsafe { - llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty)); + llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32); } } - C_nil(ccx) + return; }, "prefetch_read_data" | "prefetch_write_data" | "prefetch_read_instruction" | "prefetch_write_instruction" => { @@ -274,35 +279,40 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "prefetch_write_instruction" => (1, 0), _ => bug!() }; - bcx.call(expect, &[llargs[0], C_i32(ccx, rw), llargs[1], C_i32(ccx, cache_type)], None) + bcx.call(expect, &[ + args[0].immediate(), + C_i32(ccx, rw), + args[1].immediate(), + C_i32(ccx, cache_type) + ], None) }, "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => { - let sty = &arg_tys[0].sty; - match int_type_width_signed(sty, ccx) { + let ty = arg_tys[0]; + match int_type_width_signed(ty, ccx) { Some((width, signed)) => match name { "ctlz" | "cttz" => { let y = C_bool(bcx.ccx, false); let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - bcx.call(llfn, &[llargs[0], y], None) + bcx.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { let y = C_bool(bcx.ccx, true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llfn = ccx.get_intrinsic(llvm_name); - bcx.call(llfn, &[llargs[0], y], None) + bcx.call(llfn, &[args[0].immediate(), y], None) } "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &llargs, None), + &[args[0].immediate()], None), "bswap" => { if width == 8 { - llargs[0] // byte swap a u8/i8 is just a no-op + args[0].immediate() // byte swap a u8/i8 is just a no-op } else { bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &llargs, None) + &[args[0].immediate()], None) } } "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { @@ -312,35 +322,41 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let llfn = bcx.ccx.get_intrinsic(&intrinsic); // Convert `i1` to a `bool`, and write it to the out parameter - let val = bcx.call(llfn, &[llargs[0], llargs[1]], None); - let result = bcx.extract_value(val, 0); - let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx)); - bcx.store(result, bcx.struct_gep(llresult, 0), None); - bcx.store(overflow, bcx.struct_gep(llresult, 1), None); - - C_nil(bcx.ccx) + let pair = bcx.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); + let val = bcx.extract_value(pair, 0); + let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx)); + + let dest = result.project_field(bcx, 0); + bcx.store(val, dest.llval, dest.alignment.non_abi()); + let dest = result.project_field(bcx, 1); + bcx.store(overflow, dest.llval, dest.alignment.non_abi()); + + return; }, - "overflowing_add" => bcx.add(llargs[0], llargs[1]), - "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), - "overflowing_mul" => bcx.mul(llargs[0], llargs[1]), + "overflowing_add" => bcx.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => bcx.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => bcx.mul(args[0].immediate(), args[1].immediate()), "unchecked_div" => if signed { - bcx.sdiv(llargs[0], llargs[1]) + bcx.sdiv(args[0].immediate(), args[1].immediate()) } else { - bcx.udiv(llargs[0], llargs[1]) + bcx.udiv(args[0].immediate(), args[1].immediate()) }, "unchecked_rem" => if signed { - bcx.srem(llargs[0], llargs[1]) + bcx.srem(args[0].immediate(), args[1].immediate()) } else { - bcx.urem(llargs[0], llargs[1]) + bcx.urem(args[0].immediate(), args[1].immediate()) }, - "unchecked_shl" => bcx.shl(llargs[0], llargs[1]), + "unchecked_shl" => bcx.shl(args[0].immediate(), args[1].immediate()), "unchecked_shr" => if signed { - bcx.ashr(llargs[0], llargs[1]) + bcx.ashr(args[0].immediate(), args[1].immediate()) } else { - bcx.lshr(llargs[0], llargs[1]) + bcx.lshr(args[0].immediate(), args[1].immediate()) }, _ => bug!(), }, @@ -348,8 +364,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, span_invalid_monomorphization_error( tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); - C_nil(ccx) + expected basic integer type, found `{}`", name, ty)); + return; } } @@ -359,11 +375,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, match float_type_width(sty) { Some(_width) => match name { - "fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]), - "fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]), - "fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]), - "fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]), - "frem_fast" => bcx.frem_fast(llargs[0], llargs[1]), + "fadd_fast" => bcx.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => bcx.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => bcx.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => bcx.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => bcx.frem_fast(args[0].immediate(), args[1].immediate()), _ => bug!(), }, None => { @@ -371,40 +387,37 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ expected basic float type, found `{}`", name, sty)); - C_nil(ccx) + return; } } }, "discriminant_value" => { - let val_ty = substs.type_at(0); - match val_ty.sty { - ty::TyAdt(adt, ..) if adt.is_enum() => { - adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned, - Some(llret_ty), true) - } - _ => C_null(llret_ty) - } + args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty) } "align_offset" => { // `ptr as usize` - let ptr_val = bcx.ptrtoint(llargs[0], bcx.ccx.isize_ty()); + let ptr_val = bcx.ptrtoint(args[0].immediate(), bcx.ccx.isize_ty()); // `ptr_val % align` - let offset = bcx.urem(ptr_val, llargs[1]); + let align = args[1].immediate(); + let offset = bcx.urem(ptr_val, align); let zero = C_null(bcx.ccx.isize_ty()); // `offset == 0` let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero); // `if offset == 0 { 0 } else { offset - align }` - bcx.select(is_zero, zero, bcx.sub(offset, llargs[1])) + bcx.select(is_zero, zero, bcx.sub(offset, align)) } name if name.starts_with("simd_") => { - generic_simd_intrinsic(bcx, name, - callee_ty, - &llargs, - ret_ty, llret_ty, - span) + match generic_simd_intrinsic(bcx, name, + callee_ty, + args, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return + } } // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_[_]", and no ordering means SeqCst @@ -438,57 +451,66 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => ccx.sess().fatal("Atomic intrinsic not in correct format"), }; - let invalid_monomorphization = |sty| { + let invalid_monomorphization = |ty| { span_invalid_monomorphization_error(tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + expected basic integer type, found `{}`", name, ty)); }; match split[1] { "cxchg" | "cxchgweak" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, - failorder, weak); - let result = bcx.extract_value(val, 0); - let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); - bcx.store(result, bcx.struct_gep(llresult, 0), None); - bcx.store(success, bcx.struct_gep(llresult, 1), None); + let pair = bcx.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); + let val = bcx.extract_value(pair, 0); + let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx)); + + let dest = result.project_field(bcx, 0); + bcx.store(val, dest.llval, dest.alignment.non_abi()); + let dest = result.project_field(bcx, 1); + bcx.store(success, dest.llval, dest.alignment.non_abi()); + return; } else { - invalid_monomorphization(sty); + return invalid_monomorphization(ty); } - C_nil(ccx) } "load" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_load(llargs[0], order) + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + let align = ccx.align_of(ty); + bcx.atomic_load(args[0].immediate(), order, align) } else { - invalid_monomorphization(sty); - C_nil(ccx) + return invalid_monomorphization(ty); } } "store" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_store(llargs[1], llargs[0], order); + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + let align = ccx.align_of(ty); + bcx.atomic_store(args[1].immediate(), args[0].immediate(), order, align); + return; } else { - invalid_monomorphization(sty); + return invalid_monomorphization(ty); } - C_nil(ccx) } "fence" => { bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); - C_nil(ccx) + return; } "singlethreadfence" => { bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); - C_nil(ccx) + return; } // These are all AtomicRMW ops @@ -508,12 +530,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => ccx.sess().fatal("unknown atomic operation") }; - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + bcx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) } else { - invalid_monomorphization(sty); - C_nil(ccx) + return invalid_monomorphization(ty); } } } @@ -528,13 +549,11 @@ fn one(x: Vec) -> T { assert_eq!(x.len(), 1); x.into_iter().next().unwrap() } - fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, - any_changes_needed: &mut bool) -> Vec { + fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type) -> Vec { use intrinsics::Type::*; match *t { Void => vec![Type::void(ccx)], - Integer(_signed, width, llvm_width) => { - *any_changes_needed |= width != llvm_width; + Integer(_signed, _width, llvm_width) => { vec![Type::ix(ccx, llvm_width as u64)] } Float(x) => { @@ -545,29 +564,24 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, } } Pointer(ref t, ref llvm_elem, _const) => { - *any_changes_needed |= llvm_elem.is_some(); - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, any_changes_needed)); + let elem = one(ty_to_type(ccx, t)); vec![elem.ptr_to()] } Vector(ref t, ref llvm_elem, length) => { - *any_changes_needed |= llvm_elem.is_some(); - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(ccx, t, any_changes_needed)); + let elem = one(ty_to_type(ccx, t)); vec![Type::vector(&elem, length as u64)] } Aggregate(false, ref contents) => { let elems = contents.iter() - .map(|t| one(ty_to_type(ccx, t, any_changes_needed))) + .map(|t| one(ty_to_type(ccx, t))) .collect::>(); vec![Type::struct_(ccx, &elems, false)] } Aggregate(true, ref contents) => { - *any_changes_needed = true; contents.iter() - .flat_map(|t| ty_to_type(ccx, t, any_changes_needed)) + .flat_map(|t| ty_to_type(ccx, t)) .collect() } } @@ -579,8 +593,7 @@ fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type, // cast. fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: &intrinsics::Type, - arg_type: Ty<'tcx>, - llarg: ValueRef) + arg: &OperandRef<'tcx>) -> Vec { match *t { @@ -591,55 +604,44 @@ fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bcx.ccx.shared().type_needs_drop(arg_type)); - let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned); + assert!(!bcx.ccx.shared().type_needs_drop(arg.layout.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, align) => (ptr, align), + _ => bug!() + }; + let arg = LvalueRef::new_sized(ptr, arg.layout, align); (0..contents.len()).map(|i| { - let (ptr, align) = arg.trans_field_ptr(bcx, i); - bcx.load(ptr, align.to_align()) + arg.project_field(bcx, i).load(bcx).immediate() }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); - vec![bcx.pointercast(llarg, llvm_elem.ptr_to())] + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem)); + vec![bcx.pointercast(arg.immediate(), llvm_elem.ptr_to())] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false)); - vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))] + let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem)); + vec![bcx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bcx.trunc(llarg, Type::ix(bcx.ccx, llvm_width as u64))] + vec![bcx.trunc(arg.immediate(), Type::ix(bcx.ccx, llvm_width as u64))] } - _ => vec![llarg], + _ => vec![arg.immediate()], } } - let mut any_changes_needed = false; let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed)) + .flat_map(|t| ty_to_type(ccx, t)) .collect::>(); - let mut out_changes = false; - let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes)); - // outputting a flattened aggregate is nonsense - assert!(!out_changes); + let outputs = one(ty_to_type(ccx, &intr.output)); - let llargs = if !any_changes_needed { - // no aggregates to flatten, so no change needed - llargs.to_vec() - } else { - // there are some aggregates that need to be flattened - // in the LLVM call, so we need to run over the types - // again to find them and extract the arguments - intr.inputs.iter() - .zip(llargs) - .zip(arg_tys) - .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg)) - .collect() - }; + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(bcx, t, arg) + }).collect(); assert_eq!(inputs.len(), llargs.len()); let val = match intr.definition { @@ -657,25 +659,24 @@ fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert!(!flatten); for i in 0..elems.len() { - let val = bcx.extract_value(val, i); - let lval = LvalueRef::new_sized_ty(llresult, ret_ty, - Alignment::AbiAligned); - let (dest, align) = lval.trans_field_ptr(bcx, i); - bcx.store(val, dest, align.to_align()); + let dest = result.project_field(bcx, i); + let val = bcx.extract_value(val, i as u64); + bcx.store(val, dest.llval, dest.alignment.non_abi()); } - C_nil(ccx) + return; } _ => val, } } }; - if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { - if let Some(ty) = fn_ty.ret.cast { - let ptr = bcx.pointercast(llresult, ty.ptr_to()); + if !fn_ty.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_ty.ret.mode { + let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to()); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { - store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty); + OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout) + .val.store(bcx, result); } } } @@ -683,16 +684,15 @@ fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, allow_overlap: bool, volatile: bool, - tp_ty: Ty<'tcx>, + ty: Ty<'tcx>, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef { let ccx = bcx.ccx; - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, ccx.align_of(tp_ty) as i32); - let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); + let (size, align) = ccx.size_and_align_of(ty); + let size = C_usize(ccx, size.bytes()); + let align = C_i32(ccx, align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -700,7 +700,8 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "memcpy" }; - let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); + let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, + ccx.data_layout().pointer_size.bits()); let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); let src_ptr = bcx.pointercast(src, Type::i8p(ccx)); @@ -724,9 +725,9 @@ fn memset_intrinsic<'a, 'tcx>( count: ValueRef ) -> ValueRef { let ccx = bcx.ccx; - let align = C_i32(ccx, ccx.align_of(ty) as i32); - let lltp_ty = type_of::type_of(ccx, ty); - let size = machine::llsize_of(ccx, lltp_ty); + let (size, align) = ccx.size_and_align_of(ty); + let size = C_usize(ccx, size.bytes()); + let align = C_i32(ccx, align.abi() as i32); let dst = bcx.pointercast(dst, Type::i8p(ccx)); call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } @@ -816,7 +817,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // // More information can be found in libstd's seh.rs implementation. let i64p = Type::i64(ccx).ptr_to(); - let slot = bcx.alloca(i64p, "slot", None); + let slot = bcx.alloca(i64p, "slot", ccx.data_layout().pointer_align); bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); @@ -972,11 +973,11 @@ fn generic_simd_intrinsic<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, name: &str, callee_ty: Ty<'tcx>, - llargs: &[ValueRef], + args: &[OperandRef<'tcx>], ret_ty: Ty<'tcx>, llret_ty: Type, span: Span -) -> ValueRef { +) -> Result { // macros for error handling: macro_rules! emit_error { ($msg: tt) => { @@ -994,7 +995,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( ($cond: expr, $($fmt: tt)*) => { if !$cond { emit_error!($($fmt)*); - return C_nil(bcx.ccx) + return Err(()); } } } @@ -1040,12 +1041,12 @@ fn generic_simd_intrinsic<'a, 'tcx>( ret_ty, ret_ty.simd_type(tcx)); - return compare_simd_types(bcx, - llargs[0], - llargs[1], - in_elem, - llret_ty, - cmp_op) + return Ok(compare_simd_types(bcx, + args[0].immediate(), + args[1].immediate(), + in_elem, + llret_ty, + cmp_op)) } if name.starts_with("simd_shuffle") { @@ -1069,12 +1070,12 @@ fn generic_simd_intrinsic<'a, 'tcx>( let total_len = in_len as u128 * 2; - let vector = llargs[2]; + let vector = args[2].immediate(); let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, &[i as libc::c_uint]); + let val = const_get_elt(vector, i as u64); match const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); @@ -1091,23 +1092,27 @@ fn generic_simd_intrinsic<'a, 'tcx>( .collect(); let indices = match indices { Some(i) => i, - None => return C_null(llret_ty) + None => return Ok(C_null(llret_ty)) }; - return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices)) + return Ok(bcx.shuffle_vector(args[0].immediate(), + args[1].immediate(), + C_vector(&indices))) } if name == "simd_insert" { require!(in_elem == arg_tys[2], "expected inserted type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, arg_tys[2]); - return bcx.insert_element(llargs[0], llargs[2], llargs[1]) + return Ok(bcx.insert_element(args[0].immediate(), + args[2].immediate(), + args[1].immediate())) } if name == "simd_extract" { require!(ret_ty == in_elem, "expected return type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, ret_ty); - return bcx.extract_element(llargs[0], llargs[1]) + return Ok(bcx.extract_element(args[0].immediate(), args[1].immediate())) } if name == "simd_cast" { @@ -1121,7 +1126,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( // casting cares about nominal type, not just structural type let out_elem = ret_ty.simd_type(tcx); - if in_elem == out_elem { return llargs[0]; } + if in_elem == out_elem { return Ok(args[0].immediate()); } enum Style { Float, Int(/* is signed? */ bool), Unsupported } @@ -1142,36 +1147,36 @@ enum Style { Float, Int(/* is signed? */ bool), Unsupported } match (in_style, out_style) { (Style::Int(in_is_signed), Style::Int(_)) => { - return match in_width.cmp(&out_width) { - Ordering::Greater => bcx.trunc(llargs[0], llret_ty), - Ordering::Equal => llargs[0], + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bcx.trunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), Ordering::Less => if in_is_signed { - bcx.sext(llargs[0], llret_ty) + bcx.sext(args[0].immediate(), llret_ty) } else { - bcx.zext(llargs[0], llret_ty) + bcx.zext(args[0].immediate(), llret_ty) } - } + }) } (Style::Int(in_is_signed), Style::Float) => { - return if in_is_signed { - bcx.sitofp(llargs[0], llret_ty) + return Ok(if in_is_signed { + bcx.sitofp(args[0].immediate(), llret_ty) } else { - bcx.uitofp(llargs[0], llret_ty) - } + bcx.uitofp(args[0].immediate(), llret_ty) + }) } (Style::Float, Style::Int(out_is_signed)) => { - return if out_is_signed { - bcx.fptosi(llargs[0], llret_ty) + return Ok(if out_is_signed { + bcx.fptosi(args[0].immediate(), llret_ty) } else { - bcx.fptoui(llargs[0], llret_ty) - } + bcx.fptoui(args[0].immediate(), llret_ty) + }) } (Style::Float, Style::Float) => { - return match in_width.cmp(&out_width) { - Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty), - Ordering::Equal => llargs[0], - Ordering::Less => bcx.fpext(llargs[0], llret_ty) - } + return Ok(match in_width.cmp(&out_width) { + Ordering::Greater => bcx.fptrunc(args[0].immediate(), llret_ty), + Ordering::Equal => args[0].immediate(), + Ordering::Less => bcx.fpext(args[0].immediate(), llret_ty) + }) } _ => {/* Unsupported. Fallthrough. */} } @@ -1182,21 +1187,18 @@ enum Style { Float, Int(/* is signed? */ bool), Unsupported } } macro_rules! arith { ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { - $( - if name == stringify!($name) { - match in_elem.sty { - $( - $(ty::$p(_))|* => { - return bcx.$call(llargs[0], llargs[1]) - } - )* - _ => {}, - } - require!(false, - "unsupported operation on `{}` with element `{}`", - in_ty, - in_elem) - })* + $(if name == stringify!($name) { + match in_elem.sty { + $($(ty::$p(_))|* => { + return Ok(bcx.$call(args[0].immediate(), args[1].immediate())) + })* + _ => {}, + } + require!(false, + "unsupported operation on `{}` with element `{}`", + in_ty, + in_elem) + })* } } arith! { @@ -1214,15 +1216,13 @@ enum Style { Float, Int(/* is signed? */ bool), Unsupported } span_bug!(span, "unknown SIMD intrinsic"); } -// Returns the width of an int TypeVariant, and if it's signed or not +// Returns the width of an int Ty, and if it's signed or not // Returns None if the type is not an integer // FIXME: there’s multiple of this functions, investigate using some of the already existing // stuffs. -fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) - -> Option<(u64, bool)> { - use rustc::ty::{TyInt, TyUint}; - match *sty { - TyInt(t) => Some((match t { +fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> { + match ty.sty { + ty::TyInt(t) => Some((match t { ast::IntTy::Is => { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => 16, @@ -1237,7 +1237,7 @@ fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) ast::IntTy::I64 => 64, ast::IntTy::I128 => 128, }, true)), - TyUint(t) => Some((match t { + ty::TyUint(t) => Some((match t { ast::UintTy::Us => { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => 16, diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 96e11d366423aea46d6160dfd953050f22f99a77..f6c4153c183de7203ac492ba5c41c8c3acf2e763 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -25,6 +25,8 @@ #![allow(unused_attributes)] #![feature(i128_type)] #![feature(i128)] +#![feature(inclusive_range)] +#![feature(inclusive_range_syntax)] #![feature(libc)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] @@ -103,7 +105,6 @@ pub mod back { } mod abi; -mod adt; mod allocator; mod asm; mod assert_module_sources; @@ -136,7 +137,6 @@ pub mod back { mod glue; mod intrinsic; mod llvm_util; -mod machine; mod metadata; mod meth; mod mir; @@ -144,7 +144,6 @@ pub mod back { mod symbol_names_test; mod time_graph; mod trans_item; -mod tvec; mod type_; mod type_of; mod value; diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs deleted file mode 100644 index bc383abc7e0ecc96aeb18ead69c13991aaa3d55f..0000000000000000000000000000000000000000 --- a/src/librustc_trans/machine.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Information concerning the machine representation of various types. - -#![allow(non_camel_case_types)] - -use llvm::{self, ValueRef}; -use common::*; - -use type_::Type; - -pub type llbits = u64; -pub type llsize = u64; -pub type llalign = u32; - -// ______________________________________________________________________ -// compute sizeof / alignof - -// Returns the number of bytes between successive elements of type T in an -// array of T. This is the "ABI" size. It includes any ABI-mandated padding. -pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref()); - } -} - -/// Returns the "real" size of the type in bits. -pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { - unsafe { - llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()) - } -} - -/// Returns the size of the type as an LLVM constant integer value. -pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef { - // Once upon a time, this called LLVMSizeOf, which does a - // getelementptr(1) on a null pointer and casts to an int, in - // order to obtain the type size as a value without requiring the - // target data layout. But we have the target data layout, so - // there's no need for that contrivance. The instruction - // selection DAG generator would flatten that GEP(1) node into a - // constant of the type's alloc size, so let's save it some work. - return C_usize(cx, llsize_of_alloc(cx, ty)); -} - -// Returns the preferred alignment of the given type for the current target. -// The preferred alignment may be larger than the alignment used when -// packing the type into structs. This will be used for things like -// allocations inside a stack frame, which LLVM has a free hand in. -pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -// Returns the minimum alignment of a type required by the platform. -// This is the alignment that will be used for struct fields, arrays, -// and similar ABI-mandated things. -pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 { - unsafe { - return llvm::LLVMOffsetOfElement(cx.td(), - struct_ty.to_ref(), - element as u32); - } -} diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index e7c5a36838c2f22db03e35f45bbf26cec30b2c92..a7d467f1cc5f3f572cb4d2dc0fb2f29b3952bba5 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -9,19 +9,20 @@ // except according to those terms. use llvm::ValueRef; +use abi::FnType; use callee; use common::*; use builder::Builder; use consts; -use machine; use monomorphize; use type_::Type; use value::Value; use rustc::ty::{self, Ty}; +use rustc::ty::layout::HasDataLayout; use debuginfo; #[derive(Copy, Clone, Debug)] -pub struct VirtualIndex(usize); +pub struct VirtualIndex(u64); pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0); pub const SIZE: VirtualIndex = VirtualIndex(1); @@ -29,14 +30,18 @@ impl<'a, 'tcx> VirtualIndex { pub fn from_index(index: usize) -> Self { - VirtualIndex(index + 3) + VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef { + pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, + llvtable: ValueRef, + fn_ty: &FnType<'tcx>) -> ValueRef { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", Value(llvtable), self); - let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[self.0]), None); + let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to()); + let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); + bcx.nonnull_metadata(ptr); // Vtable loads are invariant bcx.set_invariant_load(ptr); ptr @@ -47,7 +52,7 @@ pub fn get_usize(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef debug!("get_int({:?}, {:?})", Value(llvtable), self); let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to()); - let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None); + let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); // Vtable loads are invariant bcx.set_invariant_load(ptr); ptr @@ -77,12 +82,13 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } // Not in the cache. Build it. - let nullptr = C_null(Type::nil(ccx).ptr_to()); + let nullptr = C_null(Type::i8p(ccx)); + let (size, align) = ccx.size_and_align_of(ty); let mut components: Vec<_> = [ callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.tcx(), ty)), - C_usize(ccx, ccx.size_of(ty)), - C_usize(ccx, ccx.align_of(ty) as u64) + C_usize(ccx, size.bytes()), + C_usize(ccx, align.abi()) ].iter().cloned().collect(); if let Some(trait_ref) = trait_ref { @@ -97,7 +103,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } let vtable_const = C_struct(ccx, &components, false); - let align = machine::llalign_of_pref(ccx, val_ty(vtable_const)); + let align = ccx.data_layout().pointer_align; let vtable = consts::addr_of(ccx, vtable_const, align, "vtable"); debuginfo::create_vtable_metadata(ccx, ty, vtable); diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 73f60ff29a85141c1987dcea6725c6c3c75cb3e2..223379527c989aee07d5a0409d7a6ea034201228 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -18,7 +18,8 @@ use rustc::mir::visit::{Visitor, LvalueContext}; use rustc::mir::traversal; use rustc::ty; -use common; +use rustc::ty::layout::LayoutOf; +use type_of::LayoutLlvmExt; use super::MirContext; pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { @@ -30,21 +31,15 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { let ty = mircx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); - if ty.is_scalar() || - ty.is_box() || - ty.is_region_ptr() || - ty.is_simd() || - common::type_is_zero_size(mircx.ccx, ty) - { + let layout = mircx.ccx.layout_of(ty); + if layout.is_llvm_immediate() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - assert!(common::type_is_immediate(mircx.ccx, ty) || - common::type_is_fat_ptr(mircx.ccx, ty)); - } else if common::type_is_imm_pair(mircx.ccx, ty) { + } else if layout.is_llvm_scalar_pair() { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that - // type_is_immediate() may *still* be true, particularly + // is_llvm_immediate() may *still* be true, particularly // for newtypes, but we currently force some types // (e.g. structs) into an alloca unconditionally, just so // that we don't have to deal with having two pathways @@ -141,18 +136,29 @@ fn visit_lvalue(&mut self, context: LvalueContext<'tcx>, location: Location) { debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context); + let ccx = self.cx.ccx; if let mir::Lvalue::Projection(ref proj) = *lvalue { - // Allow uses of projections of immediate pair fields. + // Allow uses of projections that are ZSTs or from scalar fields. if let LvalueContext::Consume = context { - if let mir::Lvalue::Local(_) = proj.base { - if let mir::ProjectionElem::Field(..) = proj.elem { - let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx()); + let base_ty = proj.base.ty(self.cx.mir, ccx.tcx()); + let base_ty = self.cx.monomorphize(&base_ty); + + // ZSTs don't require any actual memory access. + let elem_ty = base_ty.projection_ty(ccx.tcx(), &proj.elem).to_ty(ccx.tcx()); + let elem_ty = self.cx.monomorphize(&elem_ty); + if ccx.layout_of(elem_ty).is_zst() { + return; + } - let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); - if common::type_is_imm_pair(self.cx.ccx, ty) { - return; - } + if let mir::ProjectionElem::Field(..) = proj.elem { + let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx())); + if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { + // Recurse as a `Consume` instead of `Projection`, + // potentially stopping at non-operand projections, + // which would trigger `mark_as_lvalue` on locals. + self.visit_lvalue(&proj.base, LvalueContext::Consume, location); + return; } } } @@ -178,9 +184,9 @@ fn visit_local(&mut self, LvalueContext::StorageLive | LvalueContext::StorageDead | LvalueContext::Validate | - LvalueContext::Inspect | LvalueContext::Consume => {} + LvalueContext::Inspect | LvalueContext::Store | LvalueContext::Borrow { .. } | LvalueContext::Projection(..) => { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index bd26c961bb28b59558f7b85ce5e130d56f9dedca..f43eba36a8232fa8fdb188efb13e73ec93bc8320 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -11,28 +11,24 @@ use llvm::{self, ValueRef, BasicBlockRef}; use rustc::middle::lang_items; use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind}; -use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::{self, LayoutOf}; use rustc::traits; use rustc::mir; -use abi::{Abi, FnType, ArgType}; -use adt; -use base::{self, Lifetime}; +use abi::{Abi, FnType, ArgType, PassMode}; +use base; use callee; use builder::Builder; use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; -use machine::llalign_of_min; use meth; use monomorphize; -use type_of; +use type_of::LayoutLlvmExt; use type_::Type; use syntax::symbol::Symbol; use syntax_pos::Pos; -use std::cmp; - use super::{MirContext, LocalRef}; use super::constant::Const; use super::lvalue::{Alignment, LvalueRef}; @@ -120,11 +116,11 @@ fn trans_terminator(&mut self, fn_ty: FnType<'tcx>, fn_ptr: ValueRef, llargs: &[ValueRef], - destination: Option<(ReturnDest, Ty<'tcx>, mir::BasicBlock)>, + destination: Option<(ReturnDest<'tcx>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { - let ret_bcx = if let Some((_, _, target)) = destination { + let ret_bcx = if let Some((_, target)) = destination { this.blocks[target] } else { this.unreachable_block() @@ -136,14 +132,10 @@ fn trans_terminator(&mut self, cleanup_bundle); fn_ty.apply_attrs_callsite(invokeret); - if let Some((ret_dest, ret_ty, target)) = destination { + if let Some((ret_dest, target)) = destination { let ret_bcx = this.get_builder(target); this.set_debug_loc(&ret_bcx, terminator.source_info); - let op = OperandRef { - val: Immediate(invokeret), - ty: ret_ty, - }; - this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op); + this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, invokeret); } } else { let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); @@ -156,12 +148,8 @@ fn trans_terminator(&mut self, llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); } - if let Some((ret_dest, ret_ty, target)) = destination { - let op = OperandRef { - val: Immediate(llret), - ty: ret_ty, - }; - this.store_return(&bcx, ret_dest, &fn_ty.ret, op); + if let Some((ret_dest, target)) = destination { + this.store_return(&bcx, ret_dest, &fn_ty.ret, llret); funclet_br(this, bcx, target); } else { bcx.unreachable(); @@ -175,14 +163,18 @@ fn trans_terminator(&mut self, if let Some(cleanup_pad) = cleanup_pad { bcx.cleanup_ret(cleanup_pad, None); } else { - let ps = self.get_personality_slot(&bcx); - let lp = bcx.load(ps, None); - Lifetime::End.call(&bcx, ps); + let slot = self.get_personality_slot(&bcx); + let lp0 = slot.project_field(&bcx, 0).load(&bcx).immediate(); + let lp1 = slot.project_field(&bcx, 1).load(&bcx).immediate(); + slot.storage_dead(&bcx); + if !bcx.sess().target.target.options.custom_unwind_resume { + let mut lp = C_undef(self.landing_pad_type()); + lp = bcx.insert_value(lp, lp0, 0); + lp = bcx.insert_value(lp, lp1, 1); bcx.resume(lp); } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle); + bcx.call(bcx.ccx.eh_unwind_resume(), &[lp0], cleanup_bundle); bcx.unreachable(); } } @@ -215,45 +207,47 @@ fn trans_terminator(&mut self, } mir::TerminatorKind::Return => { - let ret = self.fn_ty.ret; - if ret.is_ignore() || ret.is_indirect() { - bcx.ret_void(); - return; - } + let llval = match self.fn_ty.ret.mode { + PassMode::Ignore | PassMode::Indirect(_) => { + bcx.ret_void(); + return; + } - let llval = if let Some(cast_ty) = ret.cast { - let op = match self.locals[mir::RETURN_POINTER] { - LocalRef::Operand(Some(op)) => op, - LocalRef::Operand(None) => bug!("use of return before def"), - LocalRef::Lvalue(tr_lvalue) => { - OperandRef { - val: Ref(tr_lvalue.llval, tr_lvalue.alignment), - ty: tr_lvalue.ty.to_ty(bcx.tcx()) - } - } - }; - let llslot = match op.val { - Immediate(_) | Pair(..) => { - let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None); - self.store_operand(&bcx, llscratch, None, op); - llscratch - } - Ref(llval, align) => { - assert_eq!(align, Alignment::AbiAligned, - "return pointer is unaligned!"); - llval + PassMode::Direct(_) | PassMode::Pair(..) => { + let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); + if let Ref(llval, align) = op.val { + bcx.load(llval, align.non_abi()) + } else { + op.immediate_or_packed_pair(&bcx) } - }; - let load = bcx.load( - bcx.pointercast(llslot, cast_ty.ptr_to()), - Some(ret.layout.align(bcx.ccx).abi() as u32)); - load - } else { - let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); - if let Ref(llval, align) = op.val { - base::load_ty(&bcx, llval, align, op.ty) - } else { - op.pack_if_pair(&bcx).immediate() + } + + PassMode::Cast(cast_ty) => { + let op = match self.locals[mir::RETURN_POINTER] { + LocalRef::Operand(Some(op)) => op, + LocalRef::Operand(None) => bug!("use of return before def"), + LocalRef::Lvalue(tr_lvalue) => { + OperandRef { + val: Ref(tr_lvalue.llval, tr_lvalue.alignment), + layout: tr_lvalue.layout + } + } + }; + let llslot = match op.val { + Immediate(_) | Pair(..) => { + let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret"); + op.val.store(&bcx, scratch); + scratch.llval + } + Ref(llval, align) => { + assert_eq!(align, Alignment::AbiAligned, + "return pointer is unaligned!"); + llval + } + }; + bcx.load( + bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), + Some(self.fn_ty.ret.layout.align)) } }; bcx.ret(llval); @@ -275,15 +269,24 @@ fn trans_terminator(&mut self, } let lvalue = self.trans_lvalue(&bcx, location); - let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn); - let (drop_fn, need_extra) = match ty.sty { - ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), - false), - _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) + let mut args: &[_] = &[lvalue.llval, lvalue.llextra]; + args = &args[..1 + lvalue.has_extra() as usize]; + let (drop_fn, fn_ty) = match ty.sty { + ty::TyDynamic(..) => { + let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn); + let sig = common::ty_fn_sig(bcx.ccx, fn_ty); + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); + let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]); + args = &args[..1]; + (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty) + } + _ => { + (callee::get_fn(bcx.ccx, drop_fn), + FnType::of_instance(bcx.ccx, &drop_fn)) + } }; - let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; do_call(self, bcx, fn_ty, drop_fn, args, - Some((ReturnDest::Nothing, tcx.mk_nil(), target)), + Some((ReturnDest::Nothing, target)), unwind); } @@ -336,6 +339,9 @@ fn trans_terminator(&mut self, let filename = C_str_slice(bcx.ccx, filename); let line = C_u32(bcx.ccx, loc.line as u32); let col = C_u32(bcx.ccx, loc.col.to_usize() as u32 + 1); + let align = tcx.data_layout.aggregate_align + .max(tcx.data_layout.i32_align) + .max(tcx.data_layout.pointer_align); // Put together the arguments to the panic entry point. let (lang_item, args, const_err) = match *msg { @@ -351,7 +357,6 @@ fn trans_terminator(&mut self, })); let file_line_col = C_struct(bcx.ccx, &[filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(file_line_col)); let file_line_col = consts::addr_of(bcx.ccx, file_line_col, align, @@ -366,7 +371,6 @@ fn trans_terminator(&mut self, let msg_file_line_col = C_struct(bcx.ccx, &[msg_str, filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); let msg_file_line_col = consts::addr_of(bcx.ccx, msg_file_line_col, align, @@ -387,7 +391,6 @@ fn trans_terminator(&mut self, let msg_file_line_col = C_struct(bcx.ccx, &[msg_str, filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); let msg_file_line_col = consts::addr_of(bcx.ccx, msg_file_line_col, align, @@ -428,7 +431,7 @@ fn trans_terminator(&mut self, // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. let callee = self.trans_operand(&bcx, func); - let (instance, mut llfn) = match callee.ty.sty { + let (instance, mut llfn) = match callee.layout.ty.sty { ty::TyFnDef(def_id, substs) => { (Some(ty::Instance::resolve(bcx.ccx.tcx(), ty::ParamEnv::empty(traits::Reveal::All), @@ -439,10 +442,10 @@ fn trans_terminator(&mut self, ty::TyFnPtr(_) => { (None, Some(callee.immediate())) } - _ => bug!("{} is not callable", callee.ty) + _ => bug!("{} is not callable", callee.layout.ty) }; let def = instance.map(|i| i.def); - let sig = callee.ty.fn_sig(bcx.tcx()); + let sig = callee.layout.ty.fn_sig(bcx.tcx()); let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); let abi = sig.abi; @@ -493,83 +496,51 @@ fn trans_terminator(&mut self, ReturnDest::Nothing }; - // Split the rust-call tupled arguments off. - let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { - let (tup, args) = args.split_last().unwrap(); - (args, Some(tup)) - } else { - (&args[..], None) - }; - - let is_shuffle = intrinsic.map_or(false, |name| { - name.starts_with("simd_shuffle") - }); - let mut idx = 0; - for arg in first_args { - // The indices passed to simd_shuffle* in the - // third argument must be constant. This is - // checked by const-qualification, which also - // promotes any complex rvalues to constants. - if is_shuffle && idx == 2 { - match *arg { - mir::Operand::Consume(_) => { - span_bug!(span, "shuffle indices must be constant"); - } - mir::Operand::Constant(ref constant) => { - let val = self.trans_constant(&bcx, constant); - llargs.push(val.llval); - idx += 1; - continue; - } - } - } - - let mut op = self.trans_operand(&bcx, arg); - - // The callee needs to own the argument memory if we pass it - // by-ref, so make a local copy of non-immediate constants. - if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) { - let tmp = LvalueRef::alloca(&bcx, op.ty, "const"); - self.store_operand(&bcx, tmp.llval, tmp.alignment.to_align(), op); - op.val = Ref(tmp.llval, tmp.alignment); - } - - self.trans_argument(&bcx, op, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def); - } - if let Some(tup) = untuple { - self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty, - &mut idx, &mut llfn, &def) - } - if intrinsic.is_some() && intrinsic != Some("drop_in_place") { use intrinsic::trans_intrinsic_call; - let (dest, llargs) = match ret_dest { - _ if fn_ty.ret.is_indirect() => { - (llargs[0], &llargs[1..]) - } + let dest = match ret_dest { + _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..]) + C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()) } ReturnDest::IndirectOperand(dst, _) | - ReturnDest::Store(dst) => (dst, &llargs[..]), + ReturnDest::Store(dst) => dst.llval, ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") }; + let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") { + match *arg { + mir::Operand::Consume(_) => { + span_bug!(span, "shuffle indices must be constant"); + } + mir::Operand::Constant(ref constant) => { + let val = self.trans_constant(&bcx, constant); + return OperandRef { + val: Immediate(val.llval), + layout: bcx.ccx.layout_of(val.ty) + }; + } + } + } + + self.trans_operand(&bcx, arg) + }).collect(); + + let callee_ty = common::instance_ty( bcx.ccx.tcx(), instance.as_ref().unwrap()); - trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest, + trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &args, dest, terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - // Make a fake operand for store_return - let op = OperandRef { - val: Ref(dst, Alignment::AbiAligned), - ty: sig.output(), - }; - self.store_return(&bcx, ret_dest, &fn_ty.ret, op); + self.store_return(&bcx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { @@ -581,6 +552,40 @@ fn trans_terminator(&mut self, return; } + // Split the rust-call tupled arguments off. + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { + let (tup, args) = args.split_last().unwrap(); + (args, Some(tup)) + } else { + (&args[..], None) + }; + + for (i, arg) in first_args.iter().enumerate() { + let mut op = self.trans_operand(&bcx, arg); + if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { + if let Pair(data_ptr, meta) = op.val { + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&bcx, meta, &fn_ty)); + llargs.push(data_ptr); + continue; + } + } + + // The callee needs to own the argument memory if we pass it + // by-ref, so make a local copy of non-immediate constants. + if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) { + let tmp = LvalueRef::alloca(&bcx, op.layout, "const"); + op.val.store(&bcx, tmp); + op.val = Ref(tmp.llval, tmp.alignment); + } + + self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]); + } + if let Some(tup) = untuple { + self.trans_arguments_untupled(&bcx, tup, &mut llargs, + &fn_ty.args[first_args.len()..]) + } + let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, (None, Some(instance)) => callee::get_fn(bcx.ccx, instance), @@ -588,7 +593,7 @@ fn trans_terminator(&mut self, }; do_call(self, bcx, fn_ty, fn_ptr, &llargs, - destination.as_ref().map(|&(_, target)| (ret_dest, sig.output(), target)), + destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } mir::TerminatorKind::GeneratorDrop | @@ -601,79 +606,73 @@ fn trans_argument(&mut self, bcx: &Builder<'a, 'tcx>, op: OperandRef<'tcx>, llargs: &mut Vec, - fn_ty: &FnType<'tcx>, - next_idx: &mut usize, - llfn: &mut Option, - def: &Option>) { - if let Pair(a, b) = op.val { - // Treat the values in a fat pointer separately. - if common::type_is_fat_ptr(bcx.ccx, op.ty) { - let (ptr, meta) = (a, b); - if *next_idx == 0 { - if let Some(ty::InstanceDef::Virtual(_, idx)) = *def { - let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta); - let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); - *llfn = Some(bcx.pointercast(llmeth, llty)); - } - } - - let imm_op = |x| OperandRef { - val: Immediate(x), - // We won't be checking the type again. - ty: bcx.tcx().types.err - }; - self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, llfn, def); - self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, llfn, def); - return; - } - } - - let arg = &fn_ty.args[*next_idx]; - *next_idx += 1; - + arg: &ArgType<'tcx>) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty)); + llargs.push(C_undef(ty.llvm_type(bcx.ccx))); } if arg.is_ignore() { return; } + if let PassMode::Pair(..) = arg.mode { + match op.val { + Pair(a, b) => { + llargs.push(a); + llargs.push(b); + return; + } + _ => bug!("trans_argument: {:?} invalid for pair arugment", op) + } + } + // Force by-ref if we have to load through a cast pointer. let (mut llval, align, by_ref) = match op.val { Immediate(_) | Pair(..) => { - if arg.is_indirect() || arg.cast.is_some() { - let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None); - self.store_operand(bcx, llscratch, None, op); - (llscratch, Alignment::AbiAligned, true) - } else { - (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) + match arg.mode { + PassMode::Indirect(_) | PassMode::Cast(_) => { + let scratch = LvalueRef::alloca(bcx, arg.layout, "arg"); + op.val.store(bcx, scratch); + (scratch.llval, Alignment::AbiAligned, true) + } + _ => { + (op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false) + } } } - Ref(llval, Alignment::Packed) if arg.is_indirect() => { + Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None); - base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); - (llscratch, Alignment::AbiAligned, true) + let scratch = LvalueRef::alloca(bcx, arg.layout, "arg"); + base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align.non_abi()); + (scratch.llval, Alignment::AbiAligned, true) } Ref(llval, align) => (llval, align, true) }; if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if arg.layout.ty == bcx.tcx().types.bool { - // We store bools as i8 so we need to truncate to i1. - llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); - llval = bcx.trunc(llval, Type::i1(bcx.ccx)); - } else if let Some(ty) = arg.cast { - llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), - align.min_with(arg.layout.align(bcx.ccx).abi() as u32)); + if let PassMode::Cast(ty) = arg.mode { + llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), + (align | Alignment::Packed(arg.layout.align)) + .non_abi()); } else { - llval = bcx.load(llval, align.to_align()); + // We can't use `LvalueRef::load` here because the argument + // may have a type we don't treat as immediate, but the ABI + // used for this call is passing it by-value. In that case, + // the load would just produce `OperandValue::Ref` instead + // of the `OperandValue::Immediate` we need for the call. + llval = bcx.load(llval, align.non_abi()); + if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { + if scalar.is_bool() { + bcx.range_metadata(llval, 0..2); + } + } + // We store bools as i8 so we need to truncate to i1. + llval = base::to_immediate(bcx, llval, arg.layout); } } @@ -684,89 +683,36 @@ fn trans_arguments_untupled(&mut self, bcx: &Builder<'a, 'tcx>, operand: &mir::Operand<'tcx>, llargs: &mut Vec, - fn_ty: &FnType<'tcx>, - next_idx: &mut usize, - llfn: &mut Option, - def: &Option>) { + args: &[ArgType<'tcx>]) { let tuple = self.trans_operand(bcx, operand); - let arg_types = match tuple.ty.sty { - ty::TyTuple(ref tys, _) => tys, - _ => span_bug!(self.mir.span, - "bad final argument to \"rust-call\" fn {:?}", tuple.ty) - }; - // Handle both by-ref and immediate tuples. - match tuple.val { - Ref(llval, align) => { - for (n, &ty) in arg_types.iter().enumerate() { - let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align); - let (ptr, align) = ptr.trans_field_ptr(bcx, n); - let val = if common::type_is_fat_ptr(bcx.ccx, ty) { - let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty); - Pair(lldata, llextra) - } else { - // trans_argument will load this if it needs to - Ref(ptr, align) - }; - let op = OperandRef { - val, - ty, - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); - } - - } - Immediate(llval) => { - let l = bcx.ccx.layout_of(tuple.ty); - let v = if let layout::Univariant { ref variant, .. } = *l { - variant - } else { - bug!("Not a tuple."); - }; - for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = bcx.extract_value( - llval, adt::struct_llfields_index(v, n)); - // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx)); - } - // If the tuple is immediate, the elements are as well - let op = OperandRef { - val: Immediate(elem), - ty, - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); - } + if let Ref(llval, align) = tuple.val { + let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align); + for i in 0..tuple.layout.fields.count() { + let field_ptr = tuple_ptr.project_field(bcx, i); + self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]); } - Pair(a, b) => { - let elems = [a, b]; - for (n, &ty) in arg_types.iter().enumerate() { - let mut elem = elems[n]; - // Truncate bools to i1, if needed - if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) { - elem = bcx.trunc(elem, Type::i1(bcx.ccx)); - } - // Pair is always made up of immediates - let op = OperandRef { - val: Immediate(elem), - ty, - }; - self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def); - } + } else { + // If the tuple is immediate, the elements are as well. + for i in 0..tuple.layout.fields.count() { + let op = tuple.extract_field(bcx, i); + self.trans_argument(bcx, op, llargs, &args[i]); } } - } - fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> LvalueRef<'tcx> { let ccx = bcx.ccx; - if let Some(slot) = self.llpersonalityslot { + if let Some(slot) = self.personality_slot { slot } else { - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.alloca(llretty, "personalityslot", None); - self.llpersonalityslot = Some(slot); + let layout = ccx.layout_of(ccx.tcx().intern_tup(&[ + ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8), + ccx.tcx().types.i32 + ], false)); + let slot = LvalueRef::alloca(bcx, layout, "personalityslot"); + self.personality_slot = Some(slot); slot } } @@ -792,18 +738,24 @@ fn landing_pad_uncached(&mut self, target_bb: BasicBlockRef) -> BasicBlockRef { let bcx = self.new_block("cleanup"); - let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); - bcx.set_cleanup(llretval); + let llretty = self.landing_pad_type(); + let lp = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); + bcx.set_cleanup(lp); + let slot = self.get_personality_slot(&bcx); - Lifetime::Start.call(&bcx, slot); - bcx.store(llretval, slot, None); + slot.storage_live(&bcx); + Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)).store(&bcx, slot); + bcx.br(target_bb); bcx.llbb() } + fn landing_pad_type(&self) -> Type { + let ccx = self.ccx; + Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false) + } + fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { let bl = self.new_block("unreachable"); @@ -824,31 +776,33 @@ pub fn get_builder(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> { } fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, - dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, - llargs: &mut Vec, is_intrinsic: bool) -> ReturnDest { + dest: &mir::Lvalue<'tcx>, fn_ret: &ArgType<'tcx>, + llargs: &mut Vec, is_intrinsic: bool) + -> ReturnDest<'tcx> { // If the return is ignored, we can just return a do-nothing ReturnDest - if fn_ret_ty.is_ignore() { + if fn_ret.is_ignore() { return ReturnDest::Nothing; } let dest = if let mir::Lvalue::Local(index) = *dest { - let ret_ty = self.monomorphized_lvalue_ty(dest); match self.locals[index] { LocalRef::Lvalue(dest) => dest, LocalRef::Operand(None) => { // Handle temporary lvalues, specifically Operand ones, as // they don't have allocas - return if fn_ret_ty.is_indirect() { + return if fn_ret.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret"); + tmp.storage_live(bcx); llargs.push(tmp.llval); - ReturnDest::IndirectOperand(tmp.llval, index) + ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result - let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); - ReturnDest::IndirectOperand(tmp.llval, index) + let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret"); + tmp.storage_live(bcx); + ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) }; @@ -860,13 +814,13 @@ fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, } else { self.trans_lvalue(bcx, dest) }; - if fn_ret_ty.is_indirect() { + if fn_ret.is_indirect() { match dest.alignment { Alignment::AbiAligned => { llargs.push(dest.llval); ReturnDest::Nothing }, - Alignment::Packed => { + Alignment::Packed(_) => { // Currently, MIR code generation does not create calls // that store directly to fields of packed structs (in // fact, the calls it creates write only to temps), @@ -877,7 +831,7 @@ fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, } } } else { - ReturnDest::Store(dest.llval) + ReturnDest::Store(dest) } } @@ -886,63 +840,67 @@ fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>, dst: &mir::Lvalue<'tcx>) { if let mir::Lvalue::Local(index) = *dst { match self.locals[index] { - LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue), + LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, lvalue), LocalRef::Operand(None) => { - let lvalue_ty = self.monomorphized_lvalue_ty(dst); - assert!(!lvalue_ty.has_erasable_regions()); - let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); - self.trans_transmute_into(bcx, src, &lvalue); - let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty); + let dst_layout = bcx.ccx.layout_of(self.monomorphized_lvalue_ty(dst)); + assert!(!dst_layout.ty.has_erasable_regions()); + let lvalue = LvalueRef::alloca(bcx, dst_layout, "transmute_temp"); + lvalue.storage_live(bcx); + self.trans_transmute_into(bcx, src, lvalue); + let op = lvalue.load(bcx); + lvalue.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } - LocalRef::Operand(Some(_)) => { - let ty = self.monomorphized_lvalue_ty(dst); - assert!(common::type_is_zero_size(bcx.ccx, ty), + LocalRef::Operand(Some(op)) => { + assert!(op.layout.is_zst(), "assigning to initialized SSAtemp"); } } } else { let dst = self.trans_lvalue(bcx, dst); - self.trans_transmute_into(bcx, src, &dst); + self.trans_transmute_into(bcx, src, dst); } } fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>, src: &mir::Operand<'tcx>, - dst: &LvalueRef<'tcx>) { - let val = self.trans_operand(bcx, src); - let llty = type_of::type_of(bcx.ccx, val.ty); + dst: LvalueRef<'tcx>) { + let src = self.trans_operand(bcx, src); + let llty = src.layout.llvm_type(bcx.ccx); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); - let in_type = val.ty; - let out_type = dst.ty.to_ty(bcx.tcx()); - let llalign = cmp::min(bcx.ccx.align_of(in_type), bcx.ccx.align_of(out_type)); - self.store_operand(bcx, cast_ptr, Some(llalign), val); + let align = src.layout.align.min(dst.layout.align); + src.val.store(bcx, + LvalueRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align))); } // Stores the return value of a function call into it's final location. fn store_return(&mut self, bcx: &Builder<'a, 'tcx>, - dest: ReturnDest, + dest: ReturnDest<'tcx>, ret_ty: &ArgType<'tcx>, - op: OperandRef<'tcx>) { + llval: ValueRef) { use self::ReturnDest::*; match dest { Nothing => (), - Store(dst) => ret_ty.store(bcx, op.immediate(), dst), + Store(dst) => ret_ty.store(bcx, llval, dst), IndirectOperand(tmp, index) => { - let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty); + let op = tmp.load(bcx); + tmp.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } DirectOperand(index) => { // If there is a cast, we have to store and reload. - let op = if ret_ty.cast.is_some() { - let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); - ret_ty.store(bcx, op.immediate(), tmp.llval); - self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty) + let op = if let PassMode::Cast(_) = ret_ty.mode { + let tmp = LvalueRef::alloca(bcx, ret_ty.layout, "tmp_ret"); + tmp.storage_live(bcx); + ret_ty.store(bcx, llval, tmp); + let op = tmp.load(bcx); + tmp.storage_dead(bcx); + op } else { - op.unpack_if_pair(bcx) + OperandRef::from_immediate_or_packed_pair(bcx, llval, ret_ty.layout) }; self.locals[index] = LocalRef::Operand(Some(op)); } @@ -950,13 +908,13 @@ fn store_return(&mut self, } } -enum ReturnDest { +enum ReturnDest<'tcx> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer - Store(ValueRef), + Store(LvalueRef<'tcx>), // Stores an indirect return value to an operand local lvalue - IndirectOperand(ValueRef, mir::Local), + IndirectOperand(LvalueRef<'tcx>, mir::Local), // Stores a direct return value to an operand local lvalue DirectOperand(mir::Local) } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 6573e507bd32554e26acd6398a9b7cde2d8c2a24..8c013330e5bcb506dd792ee1103612da777f2adc 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,21 +18,21 @@ use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, LayoutOf, Size}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::{Kind, Substs, Subst}; use rustc_apfloat::{ieee, Float, Status}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use {adt, base, machine}; +use base; use abi::{self, Abi}; use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; -use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, is_undef}; +use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; -use type_of; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; @@ -55,7 +55,7 @@ pub struct Const<'tcx> { pub ty: Ty<'tcx> } -impl<'tcx> Const<'tcx> { +impl<'a, 'tcx> Const<'tcx> { pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> { Const { llval, @@ -63,32 +63,31 @@ pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> { } } - pub fn from_constint<'a>(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt) - -> Const<'tcx> { + pub fn from_constint(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt) -> Const<'tcx> { let tcx = ccx.tcx(); let (llval, ty) = match *ci { I8(v) => (C_int(Type::i8(ccx), v as i64), tcx.types.i8), I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16), I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32), I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64), - I128(v) => (C_big_integral(Type::i128(ccx), v as u128), tcx.types.i128), + I128(v) => (C_uint_big(Type::i128(ccx), v as u128), tcx.types.i128), Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize), U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8), U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16), U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32), U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64), - U128(v) => (C_big_integral(Type::i128(ccx), v), tcx.types.u128), + U128(v) => (C_uint_big(Type::i128(ccx), v), tcx.types.u128), Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize), }; Const { llval: llval, ty: ty } } /// Translate ConstVal into a LLVM constant value. - pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>, - cv: &ConstVal, - ty: Ty<'tcx>) - -> Const<'tcx> { - let llty = type_of::type_of(ccx, ty); + pub fn from_constval(ccx: &CrateContext<'a, 'tcx>, + cv: &ConstVal, + ty: Ty<'tcx>) + -> Const<'tcx> { + let llty = ccx.layout_of(ty).llvm_type(ccx); let val = match *cv { ConstVal::Float(v) => { let bits = match v.ty { @@ -100,9 +99,11 @@ pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>, ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), - ConstVal::ByteStr(v) => consts::addr_of(ccx, C_bytes(ccx, v.data), 1, "byte_str"), + ConstVal::ByteStr(v) => { + consts::addr_of(ccx, C_bytes(ccx, v.data), ccx.align_of(ty), "byte_str") + } ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64), - ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)), + ConstVal::Function(..) => C_undef(llty), ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => { @@ -115,15 +116,44 @@ pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>, Const::new(val, ty) } - fn get_pair(&self) -> (ValueRef, ValueRef) { - (const_get_elt(self.llval, &[0]), - const_get_elt(self.llval, &[1])) + fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { + let layout = ccx.layout_of(self.ty); + let field = layout.field(ccx, i); + if field.is_zst() { + return C_undef(field.immediate_llvm_type(ccx)); + } + match layout.abi { + layout::Abi::Scalar(_) => self.llval, + layout::Abi::ScalarPair(ref a, ref b) => { + let offset = layout.fields.offset(i); + if offset.bytes() == 0 { + if field.size == layout.size { + self.llval + } else { + assert_eq!(field.size, a.value.size(ccx)); + const_get_elt(self.llval, 0) + } + } else { + assert_eq!(offset, a.value.size(ccx) + .abi_align(b.value.align(ccx))); + assert_eq!(field.size, b.value.size(ccx)); + const_get_elt(self.llval, 1) + } + } + _ => { + const_get_elt(self.llval, layout.llvm_field_index(i)) + } + } + } + + fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { + (self.get_field(ccx, 0), self.get_field(ccx, 1)) } - fn get_fat_ptr(&self) -> (ValueRef, ValueRef) { + fn get_fat_ptr(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); - self.get_pair() + self.get_pair(ccx) } fn as_lvalue(&self) -> ConstLvalue<'tcx> { @@ -134,14 +164,16 @@ fn as_lvalue(&self) -> ConstLvalue<'tcx> { } } - pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { - let llty = type_of::immediate_type_of(ccx, self.ty); + pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { + let layout = ccx.layout_of(self.ty); + let llty = layout.immediate_llvm_type(ccx); let llvalty = val_ty(self.llval); - let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { - let (a, b) = self.get_pair(); - OperandValue::Pair(a, b) - } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) { + let val = if llty == llvalty && layout.is_llvm_scalar_pair() { + OperandValue::Pair( + const_get_elt(self.llval, 0), + const_get_elt(self.llval, 1)) + } else if llty == llvalty && layout.is_llvm_immediate() { // If the types match, we can use the value directly. OperandValue::Immediate(self.llval) } else { @@ -149,12 +181,13 @@ pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { // a constant LLVM global and cast its address if necessary. let align = ccx.align_of(self.ty); let ptr = consts::addr_of(ccx, self.llval, align, "const"); - OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned) + OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()), + Alignment::AbiAligned) }; OperandRef { val, - ty: self.ty + layout: ccx.layout_of(self.ty) } } } @@ -368,12 +401,12 @@ fn trans(&mut self) -> Result, ConstEvalErr<'tcx>> { match &tcx.item_name(def_id)[..] { "size_of" => { let llval = C_usize(self.ccx, - self.ccx.size_of(substs.type_at(0))); + self.ccx.size_of(substs.type_at(0)).bytes()); Ok(Const::new(llval, tcx.types.usize)) } "min_align_of" => { let llval = C_usize(self.ccx, - self.ccx.align_of(substs.type_at(0)) as u64); + self.ccx.align_of(substs.type_at(0)).abi()); Ok(Const::new(llval, tcx.types.usize)) } _ => span_bug!(span, "{:?} in constant", terminator.kind) @@ -436,7 +469,7 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) let (base, extra) = if !has_metadata { (base.llval, ptr::null_mut()) } else { - base.get_fat_ptr() + base.get_fat_ptr(self.ccx) }; if self.ccx.statics().borrow().contains_key(&base) { (Base::Static(base), extra) @@ -450,9 +483,10 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) span_bug!(span, "dereference of non-constant pointer `{:?}`", Value(base)); } - if projected_ty.is_bool() { + let layout = self.ccx.layout_of(projected_ty); + if let layout::Abi::Scalar(ref scalar) = layout.abi { let i1_type = Type::i1(self.ccx); - if val_ty(val) != i1_type { + if scalar.is_bool() && val_ty(val) != i1_type { unsafe { val = llvm::LLVMConstTrunc(val, i1_type.to_ref()); } @@ -462,8 +496,7 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) } } mir::ProjectionElem::Field(ref field, _) => { - let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval, - field.index()); + let llprojected = base.get_field(self.ccx, field.index()); let llextra = if !has_metadata { ptr::null_mut() } else { @@ -484,9 +517,9 @@ fn const_lvalue(&self, lvalue: &mir::Lvalue<'tcx>, span: Span) // Produce an undef instead of a LLVM assertion on OOB. let len = common::const_to_uint(tr_base.len(self.ccx)); let llelem = if iv < len as u128 { - const_get_elt(base.llval, &[iv as u32]) + const_get_elt(base.llval, iv as u64) } else { - C_undef(type_of::type_of(self.ccx, projected_ty)) + C_undef(self.ccx.layout_of(projected_ty).llvm_type(self.ccx)) }; (Base::Value(llelem), ptr::null_mut()) @@ -540,7 +573,7 @@ fn const_array(&self, array_ty: Ty<'tcx>, fields: &[ValueRef]) let elem_ty = array_ty.builtin_index().unwrap_or_else(|| { bug!("bad array type {:?}", array_ty) }); - let llunitty = type_of::type_of(self.ccx, elem_ty); + let llunitty = self.ccx.layout_of(elem_ty).llvm_type(self.ccx); // If the array contains enums, an LLVM array won't work. let val = if fields.iter().all(|&f| val_ty(f) == llunitty) { C_array(llunitty, fields) @@ -566,7 +599,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, self.const_array(dest_ty, &fields) } - mir::Rvalue::Aggregate(ref kind, ref operands) => { + mir::Rvalue::Aggregate(box mir::AggregateKind::Array(_), ref operands) => { // Make sure to evaluate all operands to // report as many errors as we possibly can. let mut fields = Vec::with_capacity(operands.len()); @@ -579,17 +612,23 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, } failure?; - match **kind { - mir::AggregateKind::Array(_) => { - self.const_array(dest_ty, &fields) - } - mir::AggregateKind::Adt(..) | - mir::AggregateKind::Closure(..) | - mir::AggregateKind::Generator(..) | - mir::AggregateKind::Tuple => { - Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) + self.const_array(dest_ty, &fields) + } + + mir::Rvalue::Aggregate(ref kind, ref operands) => { + // Make sure to evaluate all operands to + // report as many errors as we possibly can. + let mut fields = Vec::with_capacity(operands.len()); + let mut failure = Ok(()); + for operand in operands { + match self.const_operand(operand, span) { + Ok(val) => fields.push(val), + Err(err) => if failure.is_ok() { failure = Err(err); } } } + failure?; + + trans_const_adt(self.ccx, dest_ty, kind, &fields) } mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { @@ -635,10 +674,6 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, operand.llval } mir::CastKind::Unsize => { - // unsize targets other than to a fat pointer currently - // can't be in constants. - assert!(common::type_is_fat_ptr(self.ccx, cast_ty)); - let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer type").ty; let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) { @@ -648,7 +683,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, // to use a different vtable. In that case, we want to // load out the original data pointer so we can repackage // it. - let (base, extra) = operand.get_fat_ptr(); + let (base, extra) = operand.get_fat_ptr(self.ccx); (base, Some(extra)) } else { (operand.llval, None) @@ -656,7 +691,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference) .expect("consts: unsizing got non-pointer target type").ty; - let ptr_ty = type_of::in_memory_type_of(self.ccx, unsized_ty).ptr_to(); + let ptr_ty = self.ccx.layout_of(unsized_ty).llvm_type(self.ccx).ptr_to(); let base = consts::ptrcast(base, ptr_ty); let info = base::unsized_info(self.ccx, pointee_ty, unsized_ty, old_info); @@ -666,22 +701,23 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, .insert(base, operand.llval); assert!(prev_const.is_none() || prev_const == Some(operand.llval)); } - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(self.ccx, &[base, info], false) + C_fat_ptr(self.ccx, base, info) } - mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { - debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); + mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => { let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty); + let cast_layout = self.ccx.layout_of(cast_ty); + assert!(cast_layout.is_llvm_immediate()); + let ll_t_out = cast_layout.immediate_llvm_type(self.ccx); let llval = operand.llval; - let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in { - let l = self.ccx.layout_of(operand.ty); - adt::is_discr_signed(&l) - } else { - operand.ty.is_signed() - }; + + let mut signed = false; + let l = self.ccx.layout_of(operand.ty); + if let layout::Abi::Scalar(ref scalar) = l.abi { + if let layout::Int(_, true) = scalar.value { + signed = true; + } + } unsafe { match (r_t_in, r_t_out) { @@ -720,20 +756,19 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, } } mir::CastKind::Misc => { // Casts from a fat-ptr. - let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty); - let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty); - if common::type_is_fat_ptr(self.ccx, operand.ty) { - let (data_ptr, meta_ptr) = operand.get_fat_ptr(); - if common::type_is_fat_ptr(self.ccx, cast_ty) { - let ll_cft = ll_cast_ty.field_types(); - let ll_fft = ll_from_ty.field_types(); - let data_cast = consts::ptrcast(data_ptr, ll_cft[0]); - assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); - C_struct(self.ccx, &[data_cast, meta_ptr], false) + let l = self.ccx.layout_of(operand.ty); + let cast = self.ccx.layout_of(cast_ty); + if l.is_llvm_scalar_pair() { + let (data_ptr, meta) = operand.get_fat_ptr(self.ccx); + if cast.is_llvm_scalar_pair() { + let data_cast = consts::ptrcast(data_ptr, + cast.scalar_pair_element_llvm_type(self.ccx, 0)); + C_fat_ptr(self.ccx, data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - consts::ptrcast(data_ptr, ll_cast_ty) + let llcast_ty = cast.immediate_llvm_type(self.ccx); + consts::ptrcast(data_ptr, llcast_ty) } } else { bug!("Unexpected non-fat-pointer operand") @@ -756,7 +791,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, let align = if self.ccx.shared().type_is_sized(ty) { self.ccx.align_of(ty) } else { - self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign + self.ccx.tcx().data_layout.pointer_align }; if bk == mir::BorrowKind::Mut { consts::addr_of_mut(self.ccx, llval, align, "ref_mut") @@ -771,7 +806,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, let ptr = if self.ccx.shared().type_is_sized(ty) { base } else { - C_struct(self.ccx, &[base, tr_lvalue.llextra], false) + C_fat_ptr(self.ccx, base, tr_lvalue.llextra) }; Const::new(ptr, ref_ty) } @@ -801,8 +836,10 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) { Some((llval, of)) => { - let llof = C_bool(self.ccx, of); - Const::new(C_struct(self.ccx, &[llval, llof], false), binop_ty) + trans_const_adt(self.ccx, binop_ty, &mir::AggregateKind::Tuple, &[ + Const::new(llval, val_ty), + Const::new(C_bool(self.ccx, of), tcx.types.bool) + ]) } None => { span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}", @@ -836,7 +873,7 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(self.ccx.shared().type_is_sized(ty)); - let llval = C_usize(self.ccx, self.ccx.size_of(ty)); + let llval = C_usize(self.ccx, self.ccx.size_of(ty).bytes()); Const::new(llval, tcx.types.usize) } @@ -986,7 +1023,7 @@ unsafe fn cast_const_float_to_int(ccx: &CrateContext, let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast }; err.report(ccx.tcx(), span, "expression"); } - C_big_integral(int_ty, cast_result.value) + C_uint_big(int_ty, cast_result.value) } unsafe fn cast_const_int_to_float(ccx: &CrateContext, @@ -1037,7 +1074,7 @@ pub fn trans_constant(&mut self, let result = result.unwrap_or_else(|_| { // We've errored, so we don't have to produce working code. - let llty = type_of::type_of(bcx.ccx, ty); + let llty = bcx.ccx.layout_of(ty).llvm_type(bcx.ccx); Const::new(C_undef(llty), ty) }); @@ -1075,19 +1112,41 @@ pub fn trans_static_initializer<'a, 'tcx>( /// Currently the returned value has the same size as the type, but /// this could be changed in the future to avoid allocating unnecessary /// space after values of shorter-than-maximum cases. -fn trans_const<'a, 'tcx>( +fn trans_const_adt<'a, 'tcx>( ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, kind: &mir::AggregateKind, - vals: &[ValueRef] -) -> ValueRef { + vals: &[Const<'tcx>] +) -> Const<'tcx> { let l = ccx.layout_of(t); let variant_index = match *kind { mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, }; - match *l { - layout::CEnum { discr: d, min, max, .. } => { + + if let layout::Abi::Uninhabited = l.abi { + return Const::new(C_undef(l.llvm_type(ccx)), t); + } + + match l.variants { + layout::Variants::Single { index } => { + assert_eq!(variant_index, index); + if let layout::Abi::Vector = l.abi { + Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::>()), t) + } else if let layout::FieldPlacement::Union(_) = l.fields { + assert_eq!(variant_index, 0); + assert_eq!(vals.len(), 1); + let contents = [ + vals[0].llval, + padding(ccx, l.size - ccx.size_of(vals[0].ty)) + ]; + + Const::new(C_struct(ccx, &contents, l.is_packed()), t) + } else { + build_const_struct(ccx, l, vals, None) + } + } + layout::Variants::Tagged { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { adt_def.discriminant_for_variant(ccx.tcx(), variant_index) @@ -1095,114 +1154,103 @@ fn trans_const<'a, 'tcx>( }, _ => 0, }; - assert_eq!(vals.len(), 0); - adt::assert_discr_in_range(min, max, discr); - C_int(Type::from_integer(ccx, d), discr as i64) - } - layout::General { discr: d, ref variants, .. } => { - let variant = &variants[variant_index]; - let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64); - let mut vals_with_discr = vec![lldiscr]; - vals_with_discr.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(ccx).bytes() - variant.stride().bytes(); - if needed_padding > 0 { - contents.push(padding(ccx, needed_padding)); - } - C_struct(ccx, &contents[..], false) - } - layout::UntaggedUnion { ref variants, .. }=> { - assert_eq!(variant_index, 0); - let contents = build_const_union(ccx, variants, vals[0]); - C_struct(ccx, &contents, variants.packed) - } - layout::Univariant { ref variant, .. } => { - assert_eq!(variant_index, 0); - let contents = build_const_struct(ccx, &variant, vals); - C_struct(ccx, &contents[..], variant.packed) - } - layout::Vector { .. } => { - C_vector(vals) - } - layout::RawNullablePointer { nndiscr, .. } => { - if variant_index as u64 == nndiscr { - assert_eq!(vals.len(), 1); - vals[0] + let discr_field = l.field(ccx, 0); + let discr = C_int(discr_field.llvm_type(ccx), discr as i64); + if let layout::Abi::Scalar(_) = l.abi { + Const::new(discr, t) } else { - C_null(type_of::type_of(ccx, t)) + let discr = Const::new(discr, discr_field.ty); + build_const_struct(ccx, l.for_variant(ccx, variant_index), vals, Some(discr)) } } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - if variant_index as u64 == nndiscr { - C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + if variant_index == dataful_variant { + build_const_struct(ccx, l.for_variant(ccx, dataful_variant), vals, None) } else { - // Always use null even if it's not the `discrfield`th - // field; see #8506. - C_null(type_of::type_of(ccx, t)) + let niche = l.field(ccx, 0); + let niche_llty = niche.llvm_type(ccx); + let niche_value = ((variant_index - niche_variants.start) as u128) + .wrapping_add(niche_start); + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_value == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_value) + }; + build_const_struct(ccx, l, &[Const::new(niche_llval, niche.ty)], None) } } - _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) } } /// Building structs is a little complicated, because we might need to /// insert padding if a field's value is less aligned than its type. /// -/// Continuing the example from `trans_const`, a value of type `(u32, +/// Continuing the example from `trans_const_adt`, a value of type `(u32, /// E)` should have the `E` at offset 8, but if that field's /// initializer is 4-byte aligned then simply translating the tuple as /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - st: &layout::Struct, - vals: &[ValueRef]) - -> Vec { - assert_eq!(vals.len(), st.offsets.len()); - - if vals.len() == 0 { - return Vec::new(); + layout: layout::TyLayout<'tcx>, + vals: &[Const<'tcx>], + discr: Option>) + -> Const<'tcx> { + assert_eq!(vals.len(), layout.fields.count()); + + match layout.abi { + layout::Abi::Scalar(_) | + layout::Abi::ScalarPair(..) if discr.is_none() => { + let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| { + (f, layout.fields.offset(i)) + }).filter(|&(f, _)| !ccx.layout_of(f.ty).is_zst()); + match (non_zst_fields.next(), non_zst_fields.next()) { + (Some((x, offset)), None) if offset.bytes() == 0 => { + return Const::new(x.llval, layout.ty); + } + (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => { + return Const::new(C_struct(ccx, &[a.llval, b.llval], false), layout.ty); + } + (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => { + return Const::new(C_struct(ccx, &[b.llval, a.llval], false), layout.ty); + } + _ => {} + } + } + _ => {} } // offset of current value - let mut offset = 0; + let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); - cfields.reserve(st.offsets.len()*2); + cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2); - let parts = st.field_index_by_increasing_offset().map(|i| { - (&vals[i], st.offsets[i].bytes()) - }); - for (&val, target_offset) in parts { - if offset < target_offset { - cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; - } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); + if let Some(discr) = discr { + cfields.push(discr.llval); + offset = ccx.size_of(discr.ty); } - if offset < st.stride().bytes() { - cfields.push(padding(ccx, st.stride().bytes() - offset)); + let parts = layout.fields.index_by_increasing_offset().map(|i| { + (vals[i], layout.fields.offset(i)) + }); + for (val, target_offset) in parts { + cfields.push(padding(ccx, target_offset - offset)); + cfields.push(val.llval); + offset = target_offset + ccx.size_of(val.ty); } - cfields -} - -fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &layout::Union, - field_val: ValueRef) - -> Vec { - let mut cfields = vec![field_val]; - - let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = un.stride().bytes(); - if offset != size { - cfields.push(padding(ccx, size - offset)); - } + // Pad to the size of the whole type, not e.g. the variant. + cfields.push(padding(ccx, ccx.size_of(layout.ty) - offset)); - cfields + Const::new(C_struct(ccx, &cfields, layout.is_packed()), layout.ty) } -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) +fn padding(ccx: &CrateContext, size: Size) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size.bytes())) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index d939acaccd99c4611329d5a63a7d7f1f15f5d6b3..891d52045c217a2ae99a7de9a17ff90d7dc8378e 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -8,18 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::ValueRef; -use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use llvm::{self, ValueRef}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; -use adt; +use base; use builder::Builder; -use common::{self, CrateContext, C_usize}; +use common::{CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big}; use consts; -use machine; -use type_of; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; use glue; @@ -28,10 +27,11 @@ use std::ops; use super::{MirContext, LocalRef}; +use super::operand::{OperandRef, OperandValue}; #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Alignment { - Packed, + Packed(Align), AbiAligned, } @@ -40,34 +40,36 @@ impl ops::BitOr for Alignment { fn bitor(self, rhs: Self) -> Self { match (self, rhs) { - (Alignment::Packed, _) => Alignment::Packed, - (Alignment::AbiAligned, a) => a, + (Alignment::Packed(a), Alignment::Packed(b)) => { + Alignment::Packed(a.min(b)) + } + (Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => { + Alignment::Packed(x) + } + (Alignment::AbiAligned, Alignment::AbiAligned) => { + Alignment::AbiAligned + } } } } -impl Alignment { - pub fn from_packed(packed: bool) -> Self { - if packed { - Alignment::Packed +impl<'a> From> for Alignment { + fn from(layout: TyLayout) -> Self { + if layout.is_packed() { + Alignment::Packed(layout.align) } else { Alignment::AbiAligned } } +} - pub fn to_align(self) -> Option { +impl Alignment { + pub fn non_abi(self) -> Option { match self { - Alignment::Packed => Some(1), + Alignment::Packed(x) => Some(x), Alignment::AbiAligned => None, } } - - pub fn min_with(self, align: u32) -> Option { - match self { - Alignment::Packed => Some(1), - Alignment::AbiAligned => Some(align), - } - } } #[derive(Copy, Clone, Debug)] @@ -79,41 +81,43 @@ pub struct LvalueRef<'tcx> { pub llextra: ValueRef, /// Monomorphized type of this lvalue, including variant information - pub ty: LvalueTy<'tcx>, + pub layout: TyLayout<'tcx>, /// Whether this lvalue is known to be aligned according to its layout pub alignment: Alignment, } impl<'a, 'tcx> LvalueRef<'tcx> { - pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>, - alignment: Alignment) -> LvalueRef<'tcx> { - LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment } - } - - pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { - LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment) + pub fn new_sized(llval: ValueRef, + layout: TyLayout<'tcx>, + alignment: Alignment) + -> LvalueRef<'tcx> { + LvalueRef { + llval, + llextra: ptr::null_mut(), + layout, + alignment + } } - pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> { - debug!("alloca({:?}: {:?})", name, ty); - let tmp = bcx.alloca( - type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty)); - assert!(!ty.has_param_types()); - Self::new_sized_ty(tmp, ty, Alignment::AbiAligned) + pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str) + -> LvalueRef<'tcx> { + debug!("alloca({:?}: {:?})", name, layout); + let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align); + Self::new_sized(tmp, layout, Alignment::AbiAligned) } pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { - let ty = self.ty.to_ty(ccx.tcx()); - match ty.sty { - ty::TyArray(_, n) => { - common::C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap()) - } - ty::TySlice(_) | ty::TyStr => { - assert!(self.llextra != ptr::null_mut()); + if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { + if self.layout.is_unsized() { + assert!(self.has_extra()); + assert_eq!(count, 0); self.llextra + } else { + C_usize(ccx, count) } - _ => bug!("unexpected type `{}` in LvalueRef::len", ty) + } else { + bug!("unexpected layout `{:#?}` in LvalueRef::len", self.layout) } } @@ -121,53 +125,132 @@ pub fn has_extra(&self) -> bool { !self.llextra.is_null() } - fn struct_field_ptr( - self, - bcx: &Builder<'a, 'tcx>, - st: &layout::Struct, - fields: &Vec>, - ix: usize, - needs_cast: bool - ) -> (ValueRef, Alignment) { - let fty = fields[ix]; - let ccx = bcx.ccx; + pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { + debug!("LvalueRef::load: {:?}", self); - let alignment = self.alignment | Alignment::from_packed(st.packed); + assert!(!self.has_extra()); + + if self.layout.is_zst() { + return OperandRef::new_zst(bcx.ccx, self.layout); + } - let llfields = adt::struct_llfields(ccx, fields, st); - let ptr_val = if needs_cast { - let real_ty = Type::struct_(ccx, &llfields[..], st.packed); - bcx.pointercast(self.llval, real_ty.ptr_to()) + let scalar_load_metadata = |load, scalar: &layout::Scalar| { + let (min, max) = (scalar.valid_range.start, scalar.valid_range.end); + let max_next = max.wrapping_add(1); + let bits = scalar.value.size(bcx.ccx).bits(); + assert!(bits <= 128); + let mask = !0u128 >> (128 - bits); + // For a (max) value of -1, max will be `-1 as usize`, which overflows. + // However, that is fine here (it would still represent the full range), + // i.e., if the range is everything. The lo==hi case would be + // rejected by the LLVM verifier (it would mean either an + // empty set, which is impossible, or the entire range of the + // type, which is pointless). + match scalar.value { + layout::Int(..) if max_next & mask != min & mask => { + // llvm::ConstantRange can deal with ranges that wrap around, + // so an overflow on (max + 1) is fine. + bcx.range_metadata(load, min..max_next); + } + layout::Pointer if 0 < min && min < max => { + bcx.nonnull_metadata(load); + } + _ => {} + } + }; + + let val = if self.layout.is_llvm_immediate() { + let mut const_llval = ptr::null_mut(); + unsafe { + let global = llvm::LLVMIsAGlobalVariable(self.llval); + if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + + let llval = if !const_llval.is_null() { + const_llval + } else { + let load = bcx.load(self.llval, self.alignment.non_abi()); + if let layout::Abi::Scalar(ref scalar) = self.layout.abi { + scalar_load_metadata(load, scalar); + } + load + }; + OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + let load = |i, scalar: &layout::Scalar| { + let mut llptr = bcx.struct_gep(self.llval, i as u64); + // Make sure to always load i1 as i8. + if scalar.is_bool() { + llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx)); + } + let load = bcx.load(llptr, self.alignment.non_abi()); + scalar_load_metadata(load, scalar); + if scalar.is_bool() { + bcx.trunc(load, Type::i1(bcx.ccx)) + } else { + load + } + }; + OperandValue::Pair(load(0, a), load(1, b)) } else { - self.llval + OperandValue::Ref(self.llval, self.alignment) + }; + + OperandRef { val, layout: self.layout } + } + + /// Access a field, at a point when the value's case is known. + pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> { + let ccx = bcx.ccx; + let field = self.layout.field(ccx, ix); + let offset = self.layout.fields.offset(ix); + let alignment = self.alignment | Alignment::from(self.layout); + + let simple = || { + // Unions and newtypes only use an offset of 0. + let llval = if offset.bytes() == 0 { + self.llval + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + // Offsets have to match either first or second field. + assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx))); + bcx.struct_gep(self.llval, 1) + } else { + bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + }; + LvalueRef { + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()), + llextra: if ccx.shared().type_has_metadata(field.ty) { + self.llextra + } else { + ptr::null_mut() + }, + layout: field, + alignment, + } }; // Simple case - we can just GEP the field - // * First field - Always aligned properly // * Packed struct - There is no alignment padding // * Field is sized - pointer is properly aligned already - if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed || - bcx.ccx.shared().type_is_sized(fty) - { - return (bcx.struct_gep( - ptr_val, adt::struct_llfields_index(st, ix)), alignment); + if self.layout.is_packed() || !field.is_unsized() { + return simple(); } // If the type of the last field is [T], str or a foreign type, then we don't need to do // any adjusments - match fty.sty { - ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => { - return (bcx.struct_gep( - ptr_val, adt::struct_llfields_index(st, ix)), alignment); - } + match field.ty.sty { + ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(), _ => () } // There's no metadata available, log the case and just do the GEP. if !self.has_extra() { debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment", - ix, Value(ptr_val)); - return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment); + ix, Value(self.llval)); + return simple(); } // We need to get the pointer manually now. @@ -187,12 +270,10 @@ fn struct_field_ptr( let meta = self.llextra; - - let offset = st.offsets[ix].bytes(); - let unaligned_offset = C_usize(bcx.ccx, offset); + let unaligned_offset = C_usize(ccx, offset.bytes()); // Get the alignment of the field - let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta); + let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta); // Bump the unaligned offset up to the appropriate alignment using the // following expression: @@ -200,89 +281,166 @@ fn struct_field_ptr( // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bcx.sub(align, C_usize(bcx.ccx, 1)); + let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64)); let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1), bcx.neg(align)); debug!("struct_field_ptr: DST field offset: {:?}", Value(offset)); // Cast and adjust pointer - let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx)); + let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); let byte_ptr = bcx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty); + let ll_fty = field.llvm_type(ccx); debug!("struct_field_ptr: Field type is {:?}", ll_fty); - (bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment) + + LvalueRef { + llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()), + llextra: self.llextra, + layout: field, + alignment, + } } - /// Access a field, at a point when the value's case is known. - pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) { - let discr = match self.ty { - LvalueTy::Ty { .. } => 0, - LvalueTy::Downcast { variant_index, .. } => variant_index, - }; - let t = self.ty.to_ty(bcx.tcx()); - let l = bcx.ccx.layout_of(t); - // Note: if this ever needs to generate conditionals (e.g., if we - // decide to do some kind of cdr-coding-like non-unique repr - // someday), it will need to return a possibly-new bcx as well. - match *l { - layout::Univariant { ref variant, .. } => { - assert_eq!(discr, 0); - self.struct_field_ptr(bcx, &variant, - &adt::compute_fields(bcx.ccx, t, 0, false), ix, false) + /// Obtain the actual discriminant of a value. + pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { + let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); + match self.layout.variants { + layout::Variants::Single { index } => { + return C_uint(cast_to, index as u64); } - layout::Vector { count, .. } => { - assert_eq!(discr, 0); - assert!((ix as u64) < count); - (bcx.struct_gep(self.llval, ix), self.alignment) - } - layout::General { discr: d, ref variants, .. } => { - let mut fields = adt::compute_fields(bcx.ccx, t, discr, false); - fields.insert(0, d.to_ty(&bcx.tcx(), false)); - self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true) + layout::Variants::Tagged { .. } | + layout::Variants::NicheFilling { .. } => {}, + } + + let discr = self.project_field(bcx, 0); + let lldiscr = discr.load(bcx).immediate(); + match self.layout.variants { + layout::Variants::Single { .. } => bug!(), + layout::Variants::Tagged { ref discr, .. } => { + let signed = match discr.value { + layout::Int(_, signed) => signed, + _ => false + }; + bcx.intcast(lldiscr, cast_to, signed) } - layout::UntaggedUnion { ref variants } => { - let fields = adt::compute_fields(bcx.ccx, t, 0, false); - let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]); - (bcx.pointercast(self.llval, ty.ptr_to()), - self.alignment | Alignment::from_packed(variants.packed)) + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + let niche_llty = discr.layout.immediate_llvm_type(bcx.ccx); + if niche_variants.start == niche_variants.end { + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_start == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_start) + }; + bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval), + C_uint(cast_to, niche_variants.start as u64), + C_uint(cast_to, dataful_variant as u64)) + } else { + // Rebase from niche values to discriminant values. + let delta = niche_start.wrapping_sub(niche_variants.start as u128); + let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta)); + let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64); + bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max), + bcx.intcast(lldiscr, cast_to, false), + C_uint(cast_to, dataful_variant as u64)) + } } - layout::RawNullablePointer { nndiscr, .. } | - layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => { - let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false); - // The unit-like case might have a nonzero number of unit-like fields. - // (e.d., Result of Either with (), as one side.) - let ty = type_of::type_of(bcx.ccx, nullfields[ix]); - assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0); - (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed) + } + } + + /// Set the discriminant for a new value of the given case of the given + /// representation. + pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) { + match self.layout.variants { + layout::Variants::Single { index } => { + if index != variant_index { + // If the layout of an enum is `Single`, all + // other variants are necessarily uninhabited. + assert_eq!(self.layout.for_variant(bcx.ccx, variant_index).abi, + layout::Abi::Uninhabited); + } } - layout::RawNullablePointer { nndiscr, .. } => { - let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0]; - assert_eq!(ix, 0); - assert_eq!(discr as u64, nndiscr); - let ty = type_of::type_of(bcx.ccx, nnty); - (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment) + layout::Variants::Tagged { .. } => { + let ptr = self.project_field(bcx, 0); + let to = self.layout.ty.ty_adt_def().unwrap() + .discriminant_for_variant(bcx.tcx(), variant_index) + .to_u128_unchecked() as u64; + bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), + ptr.llval, ptr.alignment.non_abi()); } - layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { - assert_eq!(discr as u64, nndiscr); - self.struct_field_ptr(bcx, &nonnull, - &adt::compute_fields(bcx.ccx, t, discr, false), ix, false) + layout::Variants::NicheFilling { + dataful_variant, + ref niche_variants, + niche_start, + .. + } => { + if variant_index != dataful_variant { + if bcx.sess().target.target.arch == "arm" || + bcx.sess().target.target.arch == "aarch64" { + // Issue #34427: As workaround for LLVM bug on ARM, + // use memset of 0 before assigning niche value. + let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to()); + let fill_byte = C_u8(bcx.ccx, 0); + let (size, align) = self.layout.size_and_align(); + let size = C_usize(bcx.ccx, size.bytes()); + let align = C_u32(bcx.ccx, align.abi() as u32); + base::call_memset(bcx, llptr, fill_byte, size, align, false); + } + + let niche = self.project_field(bcx, 0); + let niche_llty = niche.layout.immediate_llvm_type(bcx.ccx); + let niche_value = ((variant_index - niche_variants.start) as u128) + .wrapping_add(niche_start); + // FIXME(eddyb) Check the actual primitive type here. + let niche_llval = if niche_value == 0 { + // HACK(eddyb) Using `C_null` as it works on all types. + C_null(niche_llty) + } else { + C_uint_big(niche_llty, niche_value) + }; + OperandValue::Immediate(niche_llval).store(bcx, niche); + } } - _ => bug!("element access in type without elements: {} represented as {:#?}", t, l) } } - pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { - if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty { - // Slices already point to the array element type. - bcx.inbounds_gep(self.llval, &[llindex]) - } else { - let zero = common::C_usize(bcx.ccx, 0); - bcx.inbounds_gep(self.llval, &[zero, llindex]) + pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) + -> LvalueRef<'tcx> { + LvalueRef { + llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]), + llextra: ptr::null_mut(), + layout: self.layout.field(bcx.ccx, 0), + alignment: self.alignment } } + + pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) + -> LvalueRef<'tcx> { + let mut downcast = *self; + downcast.layout = self.layout.for_variant(bcx.ccx, variant_index); + + // Cast to the appropriate variant struct type. + let variant_ty = downcast.layout.llvm_type(bcx.ccx); + downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); + + downcast + } + + pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) { + bcx.lifetime_start(self.llval, self.layout.size); + } + + pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) { + bcx.lifetime_end(self.llval, self.layout.size); + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { @@ -310,7 +468,7 @@ pub fn trans_lvalue(&mut self, mir::Lvalue::Local(_) => bug!(), // handled above mir::Lvalue::Static(box mir::Static { def_id, ty }) => { LvalueRef::new_sized(consts::get_static(ccx, def_id), - LvalueTy::from_ty(self.monomorphize(&ty)), + ccx.layout_of(self.monomorphize(&ty)), Alignment::AbiAligned) }, mir::Lvalue::Projection(box mir::Projection { @@ -318,37 +476,27 @@ pub fn trans_lvalue(&mut self, elem: mir::ProjectionElem::Deref }) => { // Load the pointer from its location. - self.trans_consume(bcx, base).deref() + self.trans_consume(bcx, base).deref(bcx.ccx) } mir::Lvalue::Projection(ref projection) => { let tr_base = self.trans_lvalue(bcx, &projection.base); - let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); - let projected_ty = self.monomorphize(&projected_ty); - let align = tr_base.alignment; - let ((llprojected, align), llextra) = match projection.elem { + match projection.elem { mir::ProjectionElem::Deref => bug!(), mir::ProjectionElem::Field(ref field, _) => { - let has_metadata = self.ccx.shared() - .type_has_metadata(projected_ty.to_ty(tcx)); - let llextra = if !has_metadata { - ptr::null_mut() - } else { - tr_base.llextra - }; - (tr_base.trans_field_ptr(bcx, field.index()), llextra) + tr_base.project_field(bcx, field.index()) } mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Consume(mir::Lvalue::Local(index)); let index = self.trans_operand(bcx, index); - let llindex = self.prepare_index(bcx, index.immediate()); - ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) + let llindex = index.immediate(); + tr_base.project_index(bcx, llindex) } mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { let lloffset = C_usize(bcx.ccx, offset as u64); - ((tr_base.project_index(bcx, lloffset), align), ptr::null_mut()) + tr_base.project_index(bcx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, @@ -356,39 +504,31 @@ pub fn trans_lvalue(&mut self, let lloffset = C_usize(bcx.ccx, offset as u64); let lllen = tr_base.len(bcx.ccx); let llindex = bcx.sub(lllen, lloffset); - ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) + tr_base.project_index(bcx, llindex) } mir::ProjectionElem::Subslice { from, to } => { - let llbase = tr_base.project_index(bcx, C_usize(bcx.ccx, from as u64)); - - let base_ty = tr_base.ty.to_ty(bcx.tcx()); - match base_ty.sty { - ty::TyArray(..) => { - // must cast the lvalue pointer type to the new - // array type (*[%_; new_len]). - let base_ty = self.monomorphized_lvalue_ty(lvalue); - let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to(); - let llbase = bcx.pointercast(llbase, llbasety); - ((llbase, align), ptr::null_mut()) - } - ty::TySlice(..) => { - assert!(tr_base.llextra != ptr::null_mut()); - let lllen = bcx.sub(tr_base.llextra, - C_usize(bcx.ccx, (from as u64)+(to as u64))); - ((llbase, align), lllen) - } - _ => bug!("unexpected type {:?} in Subslice", base_ty) + let mut subslice = tr_base.project_index(bcx, + C_usize(bcx.ccx, from as u64)); + let projected_ty = LvalueTy::Ty { ty: tr_base.layout.ty } + .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx()); + subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty)); + + if subslice.layout.is_unsized() { + assert!(tr_base.has_extra()); + subslice.llextra = bcx.sub(tr_base.llextra, + C_usize(bcx.ccx, (from as u64) + (to as u64))); } + + // Cast the lvalue pointer type to the new + // array or slice type (*[%_; new_len]). + subslice.llval = bcx.pointercast(subslice.llval, + subslice.layout.llvm_type(bcx.ccx).ptr_to()); + + subslice } - mir::ProjectionElem::Downcast(..) => { - ((tr_base.llval, align), tr_base.llextra) + mir::ProjectionElem::Downcast(_, v) => { + tr_base.project_downcast(bcx, v) } - }; - LvalueRef { - llval: llprojected, - llextra, - ty: projected_ty, - alignment: align, } } }; @@ -396,22 +536,6 @@ pub fn trans_lvalue(&mut self, result } - /// Adjust the bitwidth of an index since LLVM is less forgiving - /// than we are. - /// - /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { - let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.isize_ty()); - if index_size < int_size { - bcx.zext(llindex, bcx.ccx.isize_ty()) - } else if index_size > int_size { - bcx.trunc(llindex, bcx.ccx.isize_ty()) - } else { - llindex - } - } - pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { let tcx = self.ccx.tcx(); let lvalue_ty = lvalue.ty(self.mir, tcx); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 59da80035fd36eb6eb13bdd63ba33b6be9dce030..7f3a430c418e9451df4cc682a1722e19a090efe1 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -11,20 +11,18 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; -use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::{self, TypeFoldable}; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc::mir::{self, Mir}; -use rustc::mir::tcx::LvalueTy; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; use rustc::session::config::FullDebugInfo; use base; use builder::Builder; -use common::{self, CrateContext, Funclet}; +use common::{CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; -use abi::{ArgAttribute, FnType}; -use type_of; +use abi::{ArgAttribute, FnType, PassMode}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -61,7 +59,7 @@ pub struct MirContext<'a, 'tcx:'a> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - llpersonalityslot: Option, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` blocks: IndexVec, @@ -86,7 +84,7 @@ pub struct MirContext<'a, 'tcx:'a> { /// directly using an `OperandRef`, which makes for tighter LLVM /// IR. The conditions for using an `OperandRef` are as follows: /// - /// - the type of the local must be judged "immediate" by `type_is_immediate` + /// - the type of the local must be judged "immediate" by `is_llvm_immediate` /// - the operand must never be referenced indirectly /// - we should not take its address using the `&` operator /// - nor should it appear in an lvalue path like `tmp.a` @@ -177,14 +175,13 @@ enum LocalRef<'tcx> { Operand(Option>), } -impl<'tcx> LocalRef<'tcx> { - fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> LocalRef<'tcx> { - if common::type_is_zero_size(ccx, ty) { +impl<'a, 'tcx> LocalRef<'tcx> { + fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> { + if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(Some(OperandRef::new_zst(ccx, ty))) + LocalRef::Operand(Some(OperandRef::new_zst(ccx, layout))) } else { LocalRef::Operand(None) } @@ -232,7 +229,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( llfn, fn_ty, ccx, - llpersonalityslot: None, + personality_slot: None, blocks: block_bcxs, unreachable_block: None, cleanup_kinds, @@ -255,7 +252,8 @@ pub fn trans_mir<'a, 'tcx: 'a>( let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let ty = mircx.monomorphize(&decl.ty); + let layout = bcx.ccx.layout_of(mircx.monomorphize(&decl.ty)); + assert!(!layout.ty.has_erasable_regions()); if let Some(name) = decl.name { // User variable @@ -264,15 +262,14 @@ pub fn trans_mir<'a, 'tcx: 'a>( if !lvalue_locals.contains(local.index()) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bcx.ccx, ty); + return LocalRef::new_operand(bcx.ccx, layout); } debug!("alloc: {:?} ({}) -> lvalue", local, name); - assert!(!ty.has_erasable_regions()); - let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str()); + let lvalue = LvalueRef::alloca(&bcx, layout, &name.as_str()); if dbg { let (scope, span) = mircx.debug_loc(decl.source_info); - declare_local(&bcx, &mircx.debug_context, name, ty, scope, + declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope, VariableAccess::DirectVariable { alloca: lvalue.llval }, VariableKind::LocalVariable, span); } @@ -282,18 +279,18 @@ pub fn trans_mir<'a, 'tcx: 'a>( if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return pointer) -> lvalue", local); let llretptr = llvm::get_param(llfn, 0); - LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty), + LocalRef::Lvalue(LvalueRef::new_sized(llretptr, + layout, Alignment::AbiAligned)) } else if lvalue_locals.contains(local.index()) { debug!("alloc: {:?} -> lvalue", local); - assert!(!ty.has_erasable_regions()); - LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local))) + LocalRef::Lvalue(LvalueRef::alloca(&bcx, layout, &format!("{:?}", local))) } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bcx.ccx, ty) + LocalRef::new_operand(bcx.ccx, layout) } } }; @@ -384,7 +381,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; - let arg_ty = mircx.monomorphize(&arg_decl.ty); let name = if let Some(name) = arg_decl.name { name.as_str().to_string() @@ -398,26 +394,17 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // to reconstruct it into a tuple local variable, from multiple // individual LLVM function arguments. + let arg_ty = mircx.monomorphize(&arg_decl.ty); let tupled_arg_tys = match arg_ty.sty { ty::TyTuple(ref tys, _) => tys, _ => bug!("spread argument isn't a tuple?!") }; - let lvalue = LvalueRef::alloca(bcx, arg_ty, &name); - for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() { - let (dst, _) = lvalue.trans_field_ptr(bcx, i); + let lvalue = LvalueRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name); + for i in 0..tupled_arg_tys.len() { let arg = &mircx.fn_ty.args[idx]; idx += 1; - if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) { - // We pass fat pointers as two words, but inside the tuple - // they are the two sub-fields of a single aggregate field. - let meta = &mircx.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst)); - meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst)); - } else { - arg.store_fn_arg(bcx, &mut llarg_idx, dst); - } + arg.store_fn_arg(bcx, &mut llarg_idx, lvalue.project_field(bcx, i)); } // Now that we have one alloca that contains the aggregate value, @@ -442,82 +429,56 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let arg = &mircx.fn_ty.args[idx]; idx += 1; - let llval = if arg.is_indirect() { - // Don't copy an indirect argument to an alloca, the caller - // already put it in a temporary alloca and gave it up - // FIXME: lifetimes - if arg.pad.is_some() { - llarg_idx += 1; - } - let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); - bcx.set_value_name(llarg, &name); + if arg.pad.is_some() { llarg_idx += 1; - llarg - } else if !lvalue_locals.contains(local.index()) && - arg.cast.is_none() && arg_scope.is_none() { - if arg.is_ignore() { - return LocalRef::new_operand(bcx.ccx, arg_ty); - } + } + if arg_scope.is_none() && !lvalue_locals.contains(local.index()) { // We don't have to cast or keep the argument in the alloca. // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead // of putting everything in allocas just so we can use llvm.dbg.declare. - if arg.pad.is_some() { - llarg_idx += 1; + let local = |op| LocalRef::Operand(Some(op)); + match arg.mode { + PassMode::Ignore => { + return local(OperandRef::new_zst(bcx.ccx, arg.layout)); + } + PassMode::Direct(_) => { + let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(llarg, &name); + llarg_idx += 1; + return local( + OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout)); + } + PassMode::Pair(..) => { + let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(a, &(name.clone() + ".0")); + llarg_idx += 1; + + let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(b, &(name + ".1")); + llarg_idx += 1; + + return local(OperandRef { + val: OperandValue::Pair(a, b), + layout: arg.layout + }); + } + _ => {} } + } + + let lvalue = if arg.is_indirect() { + // Don't copy an indirect argument to an alloca, the caller + // already put it in a temporary alloca and gave it up. + // FIXME: lifetimes let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(llarg, &name); llarg_idx += 1; - let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { - let meta = &mircx.fn_ty.args[idx]; - idx += 1; - assert_eq!((meta.cast, meta.pad), (None, None)); - let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); - llarg_idx += 1; - - // FIXME(eddyb) As we can't perfectly represent the data and/or - // vtable pointer in a fat pointers in Rust's typesystem, and - // because we split fat pointers into two ArgType's, they're - // not the right type so we have to cast them for now. - let pointee = match arg_ty.sty { - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty, - ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(), - _ => bug!() - }; - let data_llty = type_of::in_memory_type_of(bcx.ccx, pointee); - let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); - - let llarg = bcx.pointercast(llarg, data_llty.ptr_to()); - bcx.set_value_name(llarg, &(name.clone() + ".ptr")); - let llmeta = bcx.pointercast(llmeta, meta_llty); - bcx.set_value_name(llmeta, &(name + ".meta")); - - OperandValue::Pair(llarg, llmeta) - } else { - bcx.set_value_name(llarg, &name); - OperandValue::Immediate(llarg) - }; - let operand = OperandRef { - val, - ty: arg_ty - }; - return LocalRef::Operand(Some(operand.unpack_if_pair(bcx))); + LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned) } else { - let lltemp = LvalueRef::alloca(bcx, arg_ty, &name); - if common::type_is_fat_ptr(bcx.ccx, arg_ty) { - // we pass fat pointers as two words, but we want to - // represent them internally as a pointer to two words, - // so make an alloca to store them in. - let meta = &mircx.fn_ty.args[idx]; - idx += 1; - arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp.llval)); - meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp.llval)); - } else { - // otherwise, arg is passed by value, so make a - // temporary and store it there - arg.store_fn_arg(bcx, &mut llarg_idx, lltemp.llval); - } - lltemp.llval + let tmp = LvalueRef::alloca(bcx, arg.layout, &name); + arg.store_fn_arg(bcx, &mut llarg_idx, tmp); + tmp }; arg_scope.map(|scope| { // Is this a regular argument? @@ -525,21 +486,24 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // The Rust ABI passes indirect variables using a pointer and a manual copy, so we // need to insert a deref here, but the C ABI uses a pointer and a copy using the // byval attribute, for which LLVM does the deref itself, so we must not add it. - let variable_access = if arg.is_indirect() && - !arg.attrs.contains(ArgAttribute::ByVal) { - VariableAccess::IndirectVariable { - alloca: llval, - address_operations: &deref_op, - } - } else { - VariableAccess::DirectVariable { alloca: llval } + let mut variable_access = VariableAccess::DirectVariable { + alloca: lvalue.llval }; + if let PassMode::Indirect(ref attrs) = arg.mode { + if !attrs.contains(ArgAttribute::ByVal) { + variable_access = VariableAccess::IndirectVariable { + alloca: lvalue.llval, + address_operations: &deref_op, + }; + } + } + declare_local( bcx, &mircx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), - arg_ty, + arg.layout.ty, scope, variable_access, VariableKind::ArgumentVariable(arg_index + 1), @@ -549,15 +513,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } // Or is it the closure environment? - let (closure_ty, env_ref) = match arg_ty.sty { - ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (mt.ty, true), - _ => (arg_ty, false) + let (closure_layout, env_ref) = match arg.layout.ty.sty { + ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bcx.ccx.layout_of(mt.ty), true), + _ => (arg.layout, false) }; - let upvar_tys = match closure_ty.sty { + let upvar_tys = match closure_layout.ty.sty { ty::TyClosure(def_id, substs) | ty::TyGenerator(def_id, substs, _) => substs.upvar_tys(def_id, tcx), - _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty) + _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty) }; // Store the pointer to closure data in an alloca for debuginfo @@ -568,21 +532,17 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. let env_ptr = if !env_ref { - let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None); - bcx.store(llval, alloc, None); - alloc + let alloc = LvalueRef::alloca(bcx, + bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + "__debuginfo_env_ptr"); + bcx.store(lvalue.llval, alloc.llval, None); + alloc.llval } else { - llval - }; - - let layout = bcx.ccx.layout_of(closure_ty); - let offsets = match *layout { - layout::Univariant { ref variant, .. } => &variant.offsets[..], - _ => bug!("Closures are only supposed to be Univariant") + lvalue.llval }; for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { - let byte_offset_of_var_in_env = offsets[i].bytes(); + let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); let ops = unsafe { [llvm::LLVMRustDIBuilderCreateOpDeref(), @@ -620,8 +580,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ); } }); - LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty), - Alignment::AbiAligned)) + LocalRef::Lvalue(lvalue) }).collect() } @@ -629,6 +588,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, mod block; mod constant; pub mod lvalue; -mod operand; +pub mod operand; mod rvalue; mod statement; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 9ce1749190ba17710228d41162c423ce42be2ba8..8c43bded1bf217415ab4ca75e25eb5a6623e0e7e 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -9,18 +9,16 @@ // except according to those terms. use llvm::ValueRef; -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty; +use rustc::ty::layout::{self, LayoutOf, TyLayout}; use rustc::mir; -use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; -use adt; use base; -use common::{self, CrateContext, C_null}; +use common::{self, CrateContext, C_undef, C_usize}; use builder::Builder; use value::Value; -use type_of; +use type_of::LayoutLlvmExt; use type_::Type; use std::fmt; @@ -43,63 +41,52 @@ pub enum OperandValue { Pair(ValueRef, ValueRef) } +impl fmt::Debug for OperandValue { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + OperandValue::Ref(r, align) => { + write!(f, "Ref({:?}, {:?})", Value(r), align) + } + OperandValue::Immediate(i) => { + write!(f, "Immediate({:?})", Value(i)) + } + OperandValue::Pair(a, b) => { + write!(f, "Pair({:?}, {:?})", Value(a), Value(b)) + } + } + } +} + /// An `OperandRef` is an "SSA" reference to a Rust value, along with /// its type. /// /// NOTE: unless you know a value's type exactly, you should not /// generate LLVM opcodes acting on it and instead act via methods, -/// to avoid nasty edge cases. In particular, using `Builder.store` -/// directly is sure to cause problems -- use `MirContext.store_operand` +/// to avoid nasty edge cases. In particular, using `Builder::store` +/// directly is sure to cause problems -- use `OperandRef::store` /// instead. #[derive(Copy, Clone)] pub struct OperandRef<'tcx> { // The value. pub val: OperandValue, - // The type of value being returned. - pub ty: Ty<'tcx> + // The layout of value, based on its Rust type. + pub layout: TyLayout<'tcx>, } impl<'tcx> fmt::Debug for OperandRef<'tcx> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.val { - OperandValue::Ref(r, align) => { - write!(f, "OperandRef(Ref({:?}, {:?}) @ {:?})", - Value(r), align, self.ty) - } - OperandValue::Immediate(i) => { - write!(f, "OperandRef(Immediate({:?}) @ {:?})", - Value(i), self.ty) - } - OperandValue::Pair(a, b) => { - write!(f, "OperandRef(Pair({:?}, {:?}) @ {:?})", - Value(a), Value(b), self.ty) - } - } + write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> OperandRef<'tcx> { - assert!(common::type_is_zero_size(ccx, ty)); - let llty = type_of::type_of(ccx, ty); - let val = if common::type_is_imm_pair(ccx, ty) { - let layout = ccx.layout_of(ty); - let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1)) - } else { - (0, 1) - }; - let fields = llty.field_types(); - OperandValue::Pair(C_null(fields[ix0]), C_null(fields[ix1])) - } else { - OperandValue::Immediate(C_null(llty)) - }; + layout: TyLayout<'tcx>) -> OperandRef<'tcx> { + assert!(layout.is_zst()); OperandRef { - val, - ty, + val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(ccx))), + layout } } @@ -112,8 +99,8 @@ pub fn immediate(self) -> ValueRef { } } - pub fn deref(self) -> LvalueRef<'tcx> { - let projected_ty = self.ty.builtin_deref(true, ty::NoPreference) + pub fn deref(self, ccx: &CrateContext<'a, 'tcx>) -> LvalueRef<'tcx> { + let projected_ty = self.layout.ty.builtin_deref(true, ty::NoPreference) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()), @@ -123,126 +110,150 @@ pub fn deref(self) -> LvalueRef<'tcx> { LvalueRef { llval: llptr, llextra, - ty: LvalueTy::from_ty(projected_ty), + layout: ccx.layout_of(projected_ty), alignment: Alignment::AbiAligned, } } - /// If this operand is a Pair, we return an - /// Immediate aggregate with the two values. - pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { + /// If this operand is a `Pair`, we return an aggregate with the two values. + /// For other cases, see `immediate`. + pub fn immediate_or_packed_pair(self, bcx: &Builder<'a, 'tcx>) -> ValueRef { if let OperandValue::Pair(a, b) = self.val { + let llty = self.layout.llvm_type(bcx.ccx); + debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", + self, llty); // Reconstruct the immediate aggregate. - let llty = type_of::type_of(bcx.ccx, self.ty); - let mut llpair = common::C_undef(llty); - let elems = [a, b]; - for i in 0..2 { - let mut elem = elems[i]; - // Extend boolean i1's to i8. - if common::val_ty(elem) == Type::i1(bcx.ccx) { - elem = bcx.zext(elem, Type::i8(bcx.ccx)); - } - let layout = bcx.ccx.layout_of(self.ty); - let i = if let Layout::Univariant { ref variant, .. } = *layout { - adt::struct_llfields_index(variant, i) - } else { - i - }; - llpair = bcx.insert_value(llpair, elem, i); - } - self.val = OperandValue::Immediate(llpair); + let mut llpair = C_undef(llty); + llpair = bcx.insert_value(llpair, a, 0); + llpair = bcx.insert_value(llpair, b, 1); + llpair + } else { + self.immediate() } - self } - /// If this operand is a pair in an Immediate, - /// we return a Pair with the two halves. - pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { - if let OperandValue::Immediate(llval) = self.val { + /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. + pub fn from_immediate_or_packed_pair(bcx: &Builder<'a, 'tcx>, + llval: ValueRef, + layout: TyLayout<'tcx>) + -> OperandRef<'tcx> { + let val = if layout.is_llvm_scalar_pair() { + debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", + llval, layout); + // Deconstruct the immediate aggregate. - if common::type_is_imm_pair(bcx.ccx, self.ty) { - debug!("Operand::unpack_if_pair: unpacking {:?}", self); + OperandValue::Pair(bcx.extract_value(llval, 0), + bcx.extract_value(llval, 1)) + } else { + OperandValue::Immediate(llval) + }; + OperandRef { val, layout } + } - let layout = bcx.ccx.layout_of(self.ty); - let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1)) - } else { - (0, 1) + pub fn extract_field(&self, bcx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> { + let field = self.layout.field(bcx.ccx, i); + let offset = self.layout.fields.offset(i); + + let mut val = match (self.val, &self.layout.abi) { + // If we're uninhabited, or the field is ZST, it has no data. + _ if self.layout.abi == layout::Abi::Uninhabited || field.is_zst() => { + return OperandRef { + val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bcx.ccx))), + layout: field }; + } - let mut a = bcx.extract_value(llval, ix0); - let mut b = bcx.extract_value(llval, ix1); + // Newtype of a scalar or scalar pair. + (OperandValue::Immediate(_), _) | + (OperandValue::Pair(..), _) if field.size == self.layout.size => { + assert_eq!(offset.bytes(), 0); + self.val + } - let pair_fields = common::type_pair_fields(bcx.ccx, self.ty); - if let Some([a_ty, b_ty]) = pair_fields { - if a_ty.is_bool() { - a = bcx.trunc(a, Type::i1(bcx.ccx)); - } - if b_ty.is_bool() { - b = bcx.trunc(b, Type::i1(bcx.ccx)); - } + // Extract a scalar component from a pair. + (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { + if offset.bytes() == 0 { + assert_eq!(field.size, a.value.size(bcx.ccx)); + OperandValue::Immediate(a_llval) + } else { + assert_eq!(offset, a.value.size(bcx.ccx) + .abi_align(b.value.align(bcx.ccx))); + assert_eq!(field.size, b.value.size(bcx.ccx)); + OperandValue::Immediate(b_llval) } + } + + // `#[repr(simd)]` types are also immediate. + (OperandValue::Immediate(llval), &layout::Abi::Vector) => { + OperandValue::Immediate( + bcx.extract_element(llval, C_usize(bcx.ccx, i as u64))) + } + + _ => bug!("OperandRef::extract_field({:?}): not applicable", self) + }; - self.val = OperandValue::Pair(a, b); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + match val { + OperandValue::Immediate(ref mut llval) => { + *llval = bcx.bitcast(*llval, field.immediate_llvm_type(bcx.ccx)); + } + OperandValue::Pair(ref mut a, ref mut b) => { + *a = bcx.bitcast(*a, field.scalar_pair_element_llvm_type(bcx.ccx, 0)); + *b = bcx.bitcast(*b, field.scalar_pair_element_llvm_type(bcx.ccx, 1)); } + OperandValue::Ref(..) => bug!() + } + + OperandRef { + val, + layout: field } - self } } -impl<'a, 'tcx> MirContext<'a, 'tcx> { - pub fn trans_load(&mut self, - bcx: &Builder<'a, 'tcx>, - llval: ValueRef, - align: Alignment, - ty: Ty<'tcx>) - -> OperandRef<'tcx> - { - debug!("trans_load: {:?} @ {:?}", Value(llval), ty); - - let val = if common::type_is_fat_ptr(bcx.ccx, ty) { - let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty); - OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx, ty) { - let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) { - Layout::Univariant { ref variant, .. } => { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1), - Alignment::from_packed(variant.packed) | align) - }, - _ => (0, 1, align) - }; - let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap(); - let a_ptr = bcx.struct_gep(llval, ix0); - let b_ptr = bcx.struct_gep(llval, ix1); - - OperandValue::Pair( - base::load_ty(bcx, a_ptr, f_align, a_ty), - base::load_ty(bcx, b_ptr, f_align, b_ty) - ) - } else if common::type_is_immediate(bcx.ccx, ty) { - OperandValue::Immediate(base::load_ty(bcx, llval, align, ty)) - } else { - OperandValue::Ref(llval, align) - }; - - OperandRef { val: val, ty: ty } +impl<'a, 'tcx> OperandValue { + pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) { + debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); + // Avoid generating stores of zero-sized values, because the only way to have a zero-sized + // value is through `undef`, and store itself is useless. + if dest.layout.is_zst() { + return; + } + match self { + OperandValue::Ref(r, source_align) => + base::memcpy_ty(bcx, dest.llval, r, dest.layout, + (source_align | dest.alignment).non_abi()), + OperandValue::Immediate(s) => { + bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi()); + } + OperandValue::Pair(a, b) => { + for (i, &x) in [a, b].iter().enumerate() { + let mut llptr = bcx.struct_gep(dest.llval, i as u64); + // Make sure to always store i1 as i8. + if common::val_ty(x) == Type::i1(bcx.ccx) { + llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx)); + } + bcx.store(base::from_immediate(bcx, x), llptr, dest.alignment.non_abi()); + } + } + } } +} - pub fn trans_consume(&mut self, - bcx: &Builder<'a, 'tcx>, - lvalue: &mir::Lvalue<'tcx>) - -> OperandRef<'tcx> +impl<'a, 'tcx> MirContext<'a, 'tcx> { + fn maybe_trans_consume_direct(&mut self, + bcx: &Builder<'a, 'tcx>, + lvalue: &mir::Lvalue<'tcx>) + -> Option> { - debug!("trans_consume(lvalue={:?})", lvalue); + debug!("maybe_trans_consume_direct(lvalue={:?})", lvalue); // watch out for locals that do not have an // alloca; they are handled somewhat differently if let mir::Lvalue::Local(index) = *lvalue { match self.locals[index] { LocalRef::Operand(Some(o)) => { - return o; + return Some(o); } LocalRef::Operand(None) => { bug!("use of {:?} before def", lvalue); @@ -253,33 +264,40 @@ pub fn trans_consume(&mut self, } } - // Moves out of pair fields are trivial. + // Moves out of scalar and scalar pair fields are trivial. if let &mir::Lvalue::Projection(ref proj) = lvalue { - if let mir::Lvalue::Local(index) = proj.base { - if let LocalRef::Operand(Some(o)) = self.locals[index] { - match (o.val, &proj.elem) { - (OperandValue::Pair(a, b), - &mir::ProjectionElem::Field(ref f, ty)) => { - let llval = [a, b][f.index()]; - let op = OperandRef { - val: OperandValue::Immediate(llval), - ty: self.monomorphize(&ty) - }; - - // Handle nested pairs. - return op.unpack_if_pair(bcx); - } - _ => {} - } + if let mir::ProjectionElem::Field(ref f, _) = proj.elem { + if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) { + return Some(o.extract_field(bcx, f.index())); } } } + None + } + + pub fn trans_consume(&mut self, + bcx: &Builder<'a, 'tcx>, + lvalue: &mir::Lvalue<'tcx>) + -> OperandRef<'tcx> + { + debug!("trans_consume(lvalue={:?})", lvalue); + + let ty = self.monomorphized_lvalue_ty(lvalue); + let layout = bcx.ccx.layout_of(ty); + + // ZSTs don't require any actual memory access. + if layout.is_zst() { + return OperandRef::new_zst(bcx.ccx, layout); + } + + if let Some(o) = self.maybe_trans_consume_direct(bcx, lvalue) { + return o; + } + // for most lvalues, to consume them we just load them // out from their home - let tr_lvalue = self.trans_lvalue(bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - self.trans_load(bcx, tr_lvalue.llval, tr_lvalue.alignment, ty) + self.trans_lvalue(bcx, lvalue).load(bcx) } pub fn trans_operand(&mut self, @@ -299,60 +317,11 @@ pub fn trans_operand(&mut self, let operand = val.to_operand(bcx.ccx); if let OperandValue::Ref(ptr, align) = operand.val { // If this is a OperandValue::Ref to an immediate constant, load it. - self.trans_load(bcx, ptr, align, operand.ty) + LvalueRef::new_sized(ptr, operand.layout, align).load(bcx) } else { operand } } } } - - pub fn store_operand(&mut self, - bcx: &Builder<'a, 'tcx>, - lldest: ValueRef, - align: Option, - operand: OperandRef<'tcx>) { - debug!("store_operand: operand={:?}, align={:?}", operand, align); - // Avoid generating stores of zero-sized values, because the only way to have a zero-sized - // value is through `undef`, and store itself is useless. - if common::type_is_zero_size(bcx.ccx, operand.ty) { - return; - } - match operand.val { - OperandValue::Ref(r, Alignment::Packed) => - base::memcpy_ty(bcx, lldest, r, operand.ty, Some(1)), - OperandValue::Ref(r, Alignment::AbiAligned) => - base::memcpy_ty(bcx, lldest, r, operand.ty, align), - OperandValue::Immediate(s) => { - bcx.store(base::from_immediate(bcx, s), lldest, align); - } - OperandValue::Pair(a, b) => { - let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) { - Layout::Univariant { ref variant, .. } => { - (adt::struct_llfields_index(variant, 0), - adt::struct_llfields_index(variant, 1), - if variant.packed { Some(1) } else { None }) - } - _ => (0, 1, align) - }; - - let a = base::from_immediate(bcx, a); - let b = base::from_immediate(bcx, b); - - // See comment above about zero-sized values. - let (a_zst, b_zst) = common::type_pair_fields(bcx.ccx, operand.ty) - .map_or((false, false), |[a_ty, b_ty]| { - (common::type_is_zero_size(bcx.ccx, a_ty), - common::type_is_zero_size(bcx.ccx, b_ty)) - }); - - if !a_zst { - bcx.store(a, bcx.struct_gep(lldest, ix0), f_align); - } - if !b_zst { - bcx.store(b, bcx.struct_gep(lldest, ix1), f_align); - } - } - } - } } diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index 7e187a85867cbd870f9c1418fa2c6405127c3cd8..4781425f491f8ef8978eb9626638bdf3d6633ead 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -11,8 +11,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{Layout, LayoutTyper}; -use rustc::mir::tcx::LvalueTy; +use rustc::ty::layout::{self, LayoutOf}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; @@ -22,14 +21,12 @@ use base; use builder::Builder; use callee; -use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; +use common::{self, val_ty}; +use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_uint_big}; use consts; -use adt; -use machine; use monomorphize; use type_::Type; -use type_of; -use tvec; +use type_of::LayoutLlvmExt; use value::Value; use super::{MirContext, LocalRef}; @@ -52,18 +49,18 @@ pub fn trans_rvalue(&mut self, let tr_operand = self.trans_operand(&bcx, operand); // FIXME: consider not copying constants through stack. (fixable by translating // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand); + tr_operand.val.store(&bcx, dest); bcx } - mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => { - let cast_ty = self.monomorphize(&cast_ty); - - if common::type_is_fat_ptr(bcx.ccx, cast_ty) { + mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { + // The destination necessarily contains a fat pointer, so if + // it's a scalar pair, it's a fat pointer or newtype thereof. + if dest.layout.is_llvm_scalar_pair() { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp); + temp.val.store(&bcx, dest); return bcx; } @@ -72,10 +69,9 @@ pub fn trans_rvalue(&mut self, // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. let operand = self.trans_operand(&bcx, source); - let operand = operand.pack_if_pair(&bcx); - let llref = match operand.val { - OperandValue::Pair(..) => bug!(), - OperandValue::Immediate(llval) => { + match operand.val { + OperandValue::Pair(..) | + OperandValue::Immediate(_) => { // unsize from an immediate structure. We don't // really need a temporary alloca here, but // avoiding it would require us to have @@ -83,107 +79,94 @@ pub fn trans_rvalue(&mut self, // index into the struct, and this case isn't // important enough for it. debug!("trans_rvalue: creating ugly alloca"); - let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp"); - base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty); - scratch + let scratch = LvalueRef::alloca(&bcx, operand.layout, "__unsize_temp"); + scratch.storage_live(&bcx); + operand.val.store(&bcx, scratch); + base::coerce_unsized_into(&bcx, scratch, dest); + scratch.storage_dead(&bcx); } OperandValue::Ref(llref, align) => { - LvalueRef::new_sized_ty(llref, operand.ty, align) + let source = LvalueRef::new_sized(llref, operand.layout, align); + base::coerce_unsized_into(&bcx, source, dest); } - }; - base::coerce_unsized_into(&bcx, &llref, &dest); + } bcx } mir::Rvalue::Repeat(ref elem, count) => { - let dest_ty = dest.ty.to_ty(bcx.tcx()); + let tr_elem = self.trans_operand(&bcx, elem); - // No need to inizialize memory of a zero-sized slice - if common::type_is_zero_size(bcx.ccx, dest_ty) { + // Do not generate the loop for zero-sized elements or empty arrays. + if dest.layout.is_zst() { return bcx; } - let tr_elem = self.trans_operand(&bcx, elem); - let size = count.as_u64(); - let size = C_usize(bcx.ccx, size); - let base = base::get_dataptr(&bcx, dest.llval); - let align = dest.alignment.to_align(); + let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval; if let OperandValue::Immediate(v) = tr_elem.val { + let align = dest.alignment.non_abi() + .unwrap_or(tr_elem.layout.align); + let align = C_i32(bcx.ccx, align.abi() as i32); + let size = C_usize(bcx.ccx, dest.layout.size.bytes()); + // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); - let align = C_i32(bcx.ccx, align as i32); - let ty = type_of::type_of(bcx.ccx, dest_ty); - let size = machine::llsize_of(bcx.ccx, ty); - let fill = C_uint(Type::i8(bcx.ccx), 0); - base::call_memset(&bcx, base, fill, size, align, false); + let fill = C_u8(bcx.ccx, 0); + base::call_memset(&bcx, start, fill, size, align, false); return bcx; } // Use llvm.memset.p0i8.* to initialize byte arrays + let v = base::from_immediate(&bcx, v); if common::val_ty(v) == Type::i8(bcx.ccx) { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); - let align = C_i32(bcx.ccx, align as i32); - base::call_memset(&bcx, base, v, size, align, false); + base::call_memset(&bcx, start, v, size, align, false); return bcx; } } - tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { - self.store_operand(bcx, llslot, align, tr_elem); - bcx.br(loop_bb); - }) + let count = count.as_u64(); + let count = C_usize(bcx.ccx, count); + let end = dest.project_index(&bcx, count).llval; + + let header_bcx = bcx.build_sibling_block("repeat_loop_header"); + let body_bcx = bcx.build_sibling_block("repeat_loop_body"); + let next_bcx = bcx.build_sibling_block("repeat_loop_next"); + + bcx.br(header_bcx.llbb()); + let current = header_bcx.phi(common::val_ty(start), &[start], &[bcx.llbb()]); + + let keep_going = header_bcx.icmp(llvm::IntNE, current, end); + header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); + + tr_elem.val.store(&body_bcx, + LvalueRef::new_sized(current, tr_elem.layout, dest.alignment)); + + let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]); + body_bcx.br(header_bcx.llbb()); + header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); + + next_bcx } mir::Rvalue::Aggregate(ref kind, ref operands) => { - match **kind { - mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => { - let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index) - .to_u128_unchecked() as u64; - let dest_ty = dest.ty.to_ty(bcx.tcx()); - adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr); - for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(&bcx, operand); - // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx, op.ty) { - let mut val = LvalueRef::new_sized( - dest.llval, dest.ty, dest.alignment); - let field_index = active_field_index.unwrap_or(i); - val.ty = LvalueTy::Downcast { - adt_def, - substs: self.monomorphize(&substs), - variant_index, - }; - let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index); - self.store_operand(&bcx, lldest_i, align.to_align(), op); - } - } - }, - _ => { - // If this is a tuple or closure, we need to translate GEP indices. - let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx())); - let get_memory_index = |i| { - if let Layout::Univariant { ref variant, .. } = *layout { - adt::struct_llfields_index(variant, i) - } else { - i - } - }; - let alignment = dest.alignment; - for (i, operand) in operands.iter().enumerate() { - let op = self.trans_operand(&bcx, operand); - // Do not generate stores and GEPis for zero-sized fields. - if !common::type_is_zero_size(bcx.ccx, op.ty) { - // Note: perhaps this should be StructGep, but - // note that in some cases the values here will - // not be structs but arrays. - let i = get_memory_index(i); - let dest = bcx.gepi(dest.llval, &[0, i]); - self.store_operand(&bcx, dest, alignment.to_align(), op); - } + let (dest, active_field_index) = match **kind { + mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => { + dest.trans_set_discr(&bcx, variant_index); + if adt_def.is_enum() { + (dest.project_downcast(&bcx, variant_index), active_field_index) + } else { + (dest, active_field_index) } } + _ => (dest, None) + }; + for (i, operand) in operands.iter().enumerate() { + let op = self.trans_operand(&bcx, operand); + // Do not generate stores and GEPis for zero-sized fields. + if !op.layout.is_zst() { + let field_index = active_field_index.unwrap_or(i); + op.val.store(&bcx, dest.project_field(&bcx, field_index)); + } } bcx } @@ -191,7 +174,7 @@ pub fn trans_rvalue(&mut self, _ => { assert!(self.rvalue_creates_operand(rvalue)); let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue); - self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp); + temp.val.store(&bcx, dest); bcx } } @@ -205,32 +188,32 @@ pub fn trans_rvalue_operand(&mut self, assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); match *rvalue { - mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { + mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { let operand = self.trans_operand(&bcx, source); debug!("cast operand is {:?}", operand); - let cast_ty = self.monomorphize(&cast_ty); + let cast = bcx.ccx.layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { - match operand.ty.sty { + match operand.layout.ty.sty { ty::TyFnDef(def_id, substs) => { OperandValue::Immediate( callee::resolve_and_get_fn(bcx.ccx, def_id, substs)) } _ => { - bug!("{} cannot be reified to a fn ptr", operand.ty) + bug!("{} cannot be reified to a fn ptr", operand.layout.ty) } } } mir::CastKind::ClosureFnPointer => { - match operand.ty.sty { + match operand.layout.ty.sty { ty::TyClosure(def_id, substs) => { let instance = monomorphize::resolve_closure( bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce); OperandValue::Immediate(callee::get_fn(bcx.ccx, instance)) } _ => { - bug!("{} cannot be cast to a fn ptr", operand.ty) + bug!("{} cannot be cast to a fn ptr", operand.layout.ty) } } } @@ -239,26 +222,24 @@ pub fn trans_rvalue_operand(&mut self, operand.val } mir::CastKind::Unsize => { - // unsize targets other than to a fat pointer currently - // can't be operands. - assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty)); - + assert!(cast.is_llvm_scalar_pair()); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a // "trait-object-to-supertrait" coercion, for // example, // &'a fmt::Debug+Send => &'a fmt::Debug, - // So we need to pointercast the base to ensure - // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty); - let lldata = bcx.pointercast(lldata, llcast_ty); + + // HACK(eddyb) have to bitcast pointers + // until LLVM removes pointee types. + let lldata = bcx.pointercast(lldata, + cast.scalar_pair_element_llvm_type(bcx.ccx, 0)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata, - operand.ty, cast_ty); + operand.layout.ty, cast.ty); OperandValue::Pair(lldata, llextra) } OperandValue::Ref(..) => { @@ -267,20 +248,17 @@ pub fn trans_rvalue_operand(&mut self, } } } - mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => { - let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty); - let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty); - if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val { - if common::type_is_fat_ptr(bcx.ccx, cast_ty) { - let ll_cft = ll_cast_ty.field_types(); - let ll_fft = ll_from_ty.field_types(); - let data_cast = bcx.pointercast(data_ptr, ll_cft[0]); - assert_eq!(ll_cft[1].kind(), ll_fft[1].kind()); - OperandValue::Pair(data_cast, meta_ptr) + mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { + if let OperandValue::Pair(data_ptr, meta) = operand.val { + if cast.is_llvm_scalar_pair() { + let data_cast = bcx.pointercast(data_ptr, + cast.scalar_pair_element_llvm_type(bcx.ccx, 0)); + OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llval = bcx.pointercast(data_ptr, ll_cast_ty); + let llcast_ty = cast.immediate_llvm_type(bcx.ccx); + let llval = bcx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } } else { @@ -288,30 +266,32 @@ pub fn trans_rvalue_operand(&mut self, } } mir::CastKind::Misc => { - debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty)); - let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); - let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty); - let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty); + assert!(cast.is_llvm_immediate()); + let r_t_in = CastTy::from_ty(operand.layout.ty) + .expect("bad input type for cast"); + let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); + let ll_t_in = operand.layout.immediate_llvm_type(bcx.ccx); + let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let llval = operand.immediate(); - let l = bcx.ccx.layout_of(operand.ty); - let signed = if let Layout::CEnum { signed, min, max, .. } = *l { - if max > min { - // We want `table[e as usize]` to not - // have bound checks, and this is the most - // convenient place to put the `assume`. - - base::call_assume(&bcx, bcx.icmp( - llvm::IntULE, - llval, - C_uint(common::val_ty(llval), max) - )); - } - signed - } else { - operand.ty.is_signed() - }; + let mut signed = false; + if let layout::Abi::Scalar(ref scalar) = operand.layout.abi { + if let layout::Int(_, s) = scalar.value { + signed = s; + + if scalar.valid_range.end > scalar.valid_range.start { + // We want `table[e as usize]` to not + // have bound checks, and this is the most + // convenient place to put the `assume`. + + base::call_assume(&bcx, bcx.icmp( + llvm::IntULE, + llval, + C_uint_big(ll_t_in, scalar.valid_range.end) + )); + } + } + } let newval = match (r_t_in, r_t_out) { (CastTy::Int(_), CastTy::Int(_)) => { @@ -343,49 +323,43 @@ pub fn trans_rvalue_operand(&mut self, cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(_)) => cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out), - _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty) + _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty) }; OperandValue::Immediate(newval) } }; - let operand = OperandRef { + (bcx, OperandRef { val, - ty: cast_ty - }; - (bcx, operand) + layout: cast + }) } mir::Rvalue::Ref(_, bk, ref lvalue) => { let tr_lvalue = self.trans_lvalue(&bcx, lvalue); - let ty = tr_lvalue.ty.to_ty(bcx.tcx()); - let ref_ty = bcx.tcx().mk_ref( - bcx.tcx().types.re_erased, - ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() } - ); + let ty = tr_lvalue.layout.ty; // Note: lvalues are indirect, so storing the `llval` into the // destination effectively creates a reference. - let operand = if !bcx.ccx.shared().type_has_metadata(ty) { - OperandRef { - val: OperandValue::Immediate(tr_lvalue.llval), - ty: ref_ty, - } + let val = if !bcx.ccx.shared().type_has_metadata(ty) { + OperandValue::Immediate(tr_lvalue.llval) } else { - OperandRef { - val: OperandValue::Pair(tr_lvalue.llval, - tr_lvalue.llextra), - ty: ref_ty, - } + OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra) }; - (bcx, operand) + (bcx, OperandRef { + val, + layout: self.ccx.layout_of(self.ccx.tcx().mk_ref( + self.ccx.tcx().types.re_erased, + ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() } + )), + }) } mir::Rvalue::Len(ref lvalue) => { let size = self.evaluate_array_len(&bcx, lvalue); let operand = OperandRef { val: OperandValue::Immediate(size), - ty: bcx.tcx().types.usize, + layout: bcx.ccx.layout_of(bcx.tcx().types.usize), }; (bcx, operand) } @@ -393,26 +367,26 @@ pub fn trans_rvalue_operand(&mut self, mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { let lhs = self.trans_operand(&bcx, lhs); let rhs = self.trans_operand(&bcx, rhs); - let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) { - match (lhs.val, rhs.val) { - (OperandValue::Pair(lhs_addr, lhs_extra), - OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.trans_fat_ptr_binop(&bcx, op, - lhs_addr, lhs_extra, - rhs_addr, rhs_extra, - lhs.ty) - } - _ => bug!() + let llresult = match (lhs.val, rhs.val) { + (OperandValue::Pair(lhs_addr, lhs_extra), + OperandValue::Pair(rhs_addr, rhs_extra)) => { + self.trans_fat_ptr_binop(&bcx, op, + lhs_addr, lhs_extra, + rhs_addr, rhs_extra, + lhs.layout.ty) } - } else { - self.trans_scalar_binop(&bcx, op, - lhs.immediate(), rhs.immediate(), - lhs.ty) + (OperandValue::Immediate(lhs_val), + OperandValue::Immediate(rhs_val)) => { + self.trans_scalar_binop(&bcx, op, lhs_val, rhs_val, lhs.layout.ty) + } + + _ => bug!() }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty), + layout: bcx.ccx.layout_of( + op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bcx, operand) } @@ -421,12 +395,12 @@ pub fn trans_rvalue_operand(&mut self, let rhs = self.trans_operand(&bcx, rhs); let result = self.trans_scalar_checked_binop(&bcx, op, lhs.immediate(), rhs.immediate(), - lhs.ty); - let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty); + lhs.layout.ty); + let val_ty = op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty); let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false); let operand = OperandRef { val: result, - ty: operand_ty + layout: bcx.ccx.layout_of(operand_ty) }; (bcx, operand) @@ -435,7 +409,7 @@ pub fn trans_rvalue_operand(&mut self, mir::Rvalue::UnaryOp(op, ref operand) => { let operand = self.trans_operand(&bcx, operand); let lloperand = operand.immediate(); - let is_float = operand.ty.is_fp(); + let is_float = operand.layout.ty.is_fp(); let llval = match op { mir::UnOp::Not => bcx.not(lloperand), mir::UnOp::Neg => if is_float { @@ -446,47 +420,43 @@ pub fn trans_rvalue_operand(&mut self, }; (bcx, OperandRef { val: OperandValue::Immediate(llval), - ty: operand.ty, + layout: operand.layout, }) } mir::Rvalue::Discriminant(ref lvalue) => { - let discr_lvalue = self.trans_lvalue(&bcx, lvalue); - let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx()); let discr_ty = rvalue.ty(&*self.mir, bcx.tcx()); - let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty); - let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval, - discr_lvalue.alignment, Some(discr_type), true); + let discr = self.trans_lvalue(&bcx, lvalue) + .trans_get_discr(&bcx, discr_ty); (bcx, OperandRef { val: OperandValue::Immediate(discr), - ty: discr_ty + layout: self.ccx.layout_of(discr_ty) }) } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bcx.ccx.shared().type_is_sized(ty)); - let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty)); + let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty).bytes()); let tcx = bcx.tcx(); (bcx, OperandRef { val: OperandValue::Immediate(val), - ty: tcx.types.usize, + layout: self.ccx.layout_of(tcx.types.usize), }) } mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let llty = type_of::type_of(bcx.ccx, content_ty); - let llsize = machine::llsize_of(bcx.ccx, llty); - let align = bcx.ccx.align_of(content_ty); - let llalign = C_usize(bcx.ccx, align as u64); - let llty_ptr = llty.ptr_to(); - let box_ty = bcx.tcx().mk_box(content_ty); + let (size, align) = bcx.ccx.size_and_align_of(content_ty); + let llsize = C_usize(bcx.ccx, size.bytes()); + let llalign = C_usize(bcx.ccx, align.abi()); + let box_layout = bcx.ccx.layout_of(bcx.tcx().mk_box(content_ty)); + let llty_ptr = box_layout.llvm_type(bcx.ccx); // Allocate space: let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s)); + bcx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bcx.tcx(), def_id); @@ -495,7 +465,7 @@ pub fn trans_rvalue_operand(&mut self, let operand = OperandRef { val: OperandValue::Immediate(val), - ty: box_ty, + layout: box_layout, }; (bcx, operand) } @@ -508,7 +478,8 @@ pub fn trans_rvalue_operand(&mut self, // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.ccx.tcx()); - (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty))) + (bcx, OperandRef::new_zst(self.ccx, + self.ccx.layout_of(self.monomorphize(&ty)))) } } } @@ -521,11 +492,9 @@ fn evaluate_array_len(&mut self, // because trans_lvalue() panics if Local is operand. if let mir::Lvalue::Local(index) = *lvalue { if let LocalRef::Operand(Some(op)) = self.locals[index] { - if common::type_is_zero_size(bcx.ccx, op.ty) { - if let ty::TyArray(_, n) = op.ty.sty { - let n = n.val.to_const_int().unwrap().to_u64().unwrap(); - return common::C_usize(bcx.ccx, n); - } + if let ty::TyArray(_, n) = op.layout.ty.sty { + let n = n.val.to_const_int().unwrap().to_u64().unwrap(); + return common::C_usize(bcx.ccx, n); } } } @@ -730,7 +699,7 @@ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { mir::Rvalue::Aggregate(..) => { let ty = rvalue.ty(self.mir, self.ccx.tcx()); let ty = self.monomorphize(&ty); - common::type_is_zero_size(self.ccx, ty) + self.ccx.layout_of(ty).is_zst() } } @@ -830,7 +799,7 @@ fn cast_int_to_float(bcx: &Builder, if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. - let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bcx.icmp(llvm::IntUGE, x, max); let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); @@ -957,8 +926,8 @@ fn int_min(signed: bool, int_ty: Type) -> i128 { // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min); let greater = bcx.fcmp(llvm::RealOGT, x, f_max); - let int_max = C_big_integral(int_ty, int_max(signed, int_ty)); - let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128); + let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); + let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bcx.select(less_or_nan, int_min, fptosui_result); let s1 = bcx.select(greater, int_max, s0); diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index bbf661ae9a735d41c994cbe8ab018334f12386cb..607ecd887fa7823be86285c0086af2f3c505d30d 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -10,14 +10,11 @@ use rustc::mir; -use base; use asm; -use common; use builder::Builder; use super::MirContext; use super::LocalRef; -use super::super::adt; impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn trans_statement(&mut self, @@ -39,18 +36,16 @@ pub fn trans_statement(&mut self, self.locals[index] = LocalRef::Operand(Some(operand)); bcx } - LocalRef::Operand(Some(_)) => { - let ty = self.monomorphized_lvalue_ty(lvalue); - - if !common::type_is_zero_size(bcx.ccx, ty) { + LocalRef::Operand(Some(op)) => { + if !op.layout.is_zst() { span_bug!(statement.source_info.span, "operand {:?} already assigned", rvalue); - } else { - // If the type is zero-sized, it's already been set here, - // but we still need to make sure we translate the operand - self.trans_rvalue_operand(bcx, rvalue).0 } + + // If the type is zero-sized, it's already been set here, + // but we still need to make sure we translate the operand + self.trans_rvalue_operand(bcx, rvalue).0 } } } else { @@ -59,24 +54,25 @@ pub fn trans_statement(&mut self, } } mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => { - let ty = self.monomorphized_lvalue_ty(lvalue); - let lvalue_transed = self.trans_lvalue(&bcx, lvalue); - adt::trans_set_discr(&bcx, - ty, - lvalue_transed.llval, - variant_index as u64); + self.trans_lvalue(&bcx, lvalue) + .trans_set_discr(&bcx, variant_index); bcx } mir::StatementKind::StorageLive(local) => { - self.trans_storage_liveness(bcx, local, base::Lifetime::Start) + if let LocalRef::Lvalue(tr_lval) = self.locals[local] { + tr_lval.storage_live(&bcx); + } + bcx } mir::StatementKind::StorageDead(local) => { - self.trans_storage_liveness(bcx, local, base::Lifetime::End) + if let LocalRef::Lvalue(tr_lval) = self.locals[local] { + tr_lval.storage_dead(&bcx); + } + bcx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { - let lvalue = self.trans_lvalue(&bcx, output); - (lvalue.llval, lvalue.ty.to_ty(bcx.tcx())) + self.trans_lvalue(&bcx, output) }).collect(); let input_vals = inputs.iter().map(|input| { @@ -91,15 +87,4 @@ pub fn trans_statement(&mut self, mir::StatementKind::Nop => bcx, } } - - fn trans_storage_liveness(&self, - bcx: Builder<'a, 'tcx>, - index: mir::Local, - intrinsic: base::Lifetime) - -> Builder<'a, 'tcx> { - if let LocalRef::Lvalue(tr_lval) = self.locals[index] { - intrinsic.call(&bcx, tr_lval.llval); - } - bcx - } } diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs index fb68be293a79e13083fec93a3b29460cddf98b60..991f99e0f6c9963c74ddc68a75aa6468332d3855 100644 --- a/src/librustc_trans/trans_item.rs +++ b/src/librustc_trans/trans_item.rs @@ -23,14 +23,15 @@ use declare; use llvm; use monomorphize::Instance; +use type_of::LayoutLlvmExt; use rustc::hir; use rustc::middle::trans::{Linkage, Visibility}; use rustc::ty::{self, TyCtxt, TypeFoldable}; +use rustc::ty::layout::LayoutOf; use syntax::ast; use syntax::attr; use syntax_pos::Span; use syntax_pos::symbol::Symbol; -use type_of; use std::fmt; pub use rustc::middle::trans::TransItem; @@ -173,7 +174,7 @@ fn predefine_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let def_id = ccx.tcx().hir.local_def_id(node_id); let instance = Instance::mono(ccx.tcx(), def_id); let ty = common::instance_ty(ccx.tcx(), &instance); - let llty = type_of::type_of(ccx, ty); + let llty = ccx.layout_of(ty).llvm_type(ccx); let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| { ccx.sess().span_fatal(ccx.tcx().hir.span(node_id), diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs deleted file mode 100644 index da4a4e55a67f4819356305a7b692979f8006b1af..0000000000000000000000000000000000000000 --- a/src/librustc_trans/tvec.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use builder::Builder; -use llvm::{BasicBlockRef, ValueRef}; -use common::*; -use rustc::ty::Ty; - -pub fn slice_for_each<'a, 'tcx, F>( - bcx: &Builder<'a, 'tcx>, - data_ptr: ValueRef, - unit_ty: Ty<'tcx>, - len: ValueRef, - f: F -) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) { - // Special-case vectors with elements of size 0 so they don't go out of bounds (#9890) - let zst = type_is_zero_size(bcx.ccx, unit_ty); - let add = |bcx: &Builder, a, b| if zst { - bcx.add(a, b) - } else { - bcx.inbounds_gep(a, &[b]) - }; - - let body_bcx = bcx.build_sibling_block("slice_loop_body"); - let header_bcx = bcx.build_sibling_block("slice_loop_header"); - let next_bcx = bcx.build_sibling_block("slice_loop_next"); - - let start = if zst { - C_usize(bcx.ccx, 1) - } else { - data_ptr - }; - let end = add(&bcx, start, len); - - bcx.br(header_bcx.llbb()); - let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]); - - let keep_going = header_bcx.icmp(llvm::IntNE, current, end); - header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb()); - - let next = add(&body_bcx, current, C_usize(bcx.ccx, 1)); - f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb()); - header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb()); - next_bcx -} diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index ffb303688aaf66747bae9a5af92d4b220d537d40..02224858b4692acb515d90e186cf794d0da1fc2b 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -17,7 +17,7 @@ use context::CrateContext; use syntax::ast; -use rustc::ty::layout; +use rustc::ty::layout::{self, Align}; use std::ffi::CString; use std::fmt; @@ -66,10 +66,6 @@ pub fn void(ccx: &CrateContext) -> Type { ty!(llvm::LLVMVoidTypeInContext(ccx.llcx())) } - pub fn nil(ccx: &CrateContext) -> Type { - Type::empty_struct(ccx) - } - pub fn metadata(ccx: &CrateContext) -> Type { ty!(llvm::LLVMRustMetadataTypeInContext(ccx.llcx())) } @@ -202,9 +198,6 @@ pub fn named_struct(ccx: &CrateContext, name: &str) -> Type { ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr())) } - pub fn empty_struct(ccx: &CrateContext) -> Type { - Type::struct_(ccx, &[], false) - } pub fn array(ty: &Type, len: u64) -> Type { ty!(llvm::LLVMRustArrayType(ty.to_ref(), len)) @@ -214,20 +207,6 @@ pub fn vector(ty: &Type, len: u64) -> Type { ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint)) } - pub fn vec(ccx: &CrateContext, ty: &Type) -> Type { - Type::struct_(ccx, - &[Type::array(ty, 0), Type::isize(ccx)], - false) - } - - pub fn opaque_vec(ccx: &CrateContext) -> Type { - Type::vec(ccx, &Type::i8(ccx)) - } - - pub fn vtable_ptr(ccx: &CrateContext) -> Type { - Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to() - } - pub fn kind(&self) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(self.to_ref()) @@ -259,19 +238,6 @@ pub fn vector_length(&self) -> usize { } } - pub fn field_types(&self) -> Vec { - unsafe { - let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref()) as usize; - if n_elts == 0 { - return Vec::new(); - } - let mut elts = vec![Type { rf: ptr::null_mut() }; n_elts]; - llvm::LLVMGetStructElementTypes(self.to_ref(), - elts.as_mut_ptr() as *mut TypeRef); - elts - } - } - pub fn func_params(&self) -> Vec { unsafe { let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize; @@ -302,7 +268,6 @@ pub fn int_width(&self) -> u64 { pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type { use rustc::ty::layout::Integer::*; match i { - I1 => Type::i1(cx), I8 => Type::i8(cx), I16 => Type::i16(cx), I32 => Type::i32(cx), @@ -310,4 +275,15 @@ pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type { I128 => Type::i128(cx), } } + + /// Return a LLVM type that has at most the required alignment, + /// as a conservative approximation for unknown pointee types. + pub fn pointee_for_abi_align(ccx: &CrateContext, align: Align) -> Type { + if let Some(ity) = layout::Integer::for_abi_align(ccx, align) { + Type::from_integer(ccx, ity) + } else { + // FIXME(eddyb) We could find a better approximation here. + Type::i8(ccx) + } + } } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index cac09a81361f09651edc8c4ff7aa09b88ba37adb..9b32c825117ee32d14ee1fd78d90503db554dffb 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -9,231 +9,484 @@ // except according to those terms. use abi::FnType; -use adt; use common::*; -use machine; +use rustc::hir; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; +use rustc_back::PanicStrategy; use trans_item::DefPathBasedNames; use type_::Type; -use syntax::ast; +use std::fmt::Write; -pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - match ty.sty { - ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => { - in_memory_type_of(ccx, t).ptr_to() +fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>, + defer: &mut Option<(Type, TyLayout<'tcx>)>) + -> Type { + match layout.abi { + layout::Abi::Scalar(_) => bug!("handled elsewhere"), + layout::Abi::Vector => { + return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), + layout.fields.count() as u64); } - ty::TyAdt(def, _) if def.is_box() => { - in_memory_type_of(ccx, ty.boxed_ty()).ptr_to() + layout::Abi::ScalarPair(..) => { + return Type::struct_(ccx, &[ + layout.scalar_pair_element_llvm_type(ccx, 0), + layout.scalar_pair_element_llvm_type(ccx, 1), + ], false); } - _ => bug!("expected fat ptr ty but got {:?}", ty) + layout::Abi::Uninhabited | + layout::Abi::Aggregate { .. } => {} } -} -pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let unsized_part = ccx.tcx().struct_tail(ty); - match unsized_part.sty { - ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { - Type::uint_from_ty(ccx, ast::UintTy::Us) + let name = match layout.ty.sty { + ty::TyClosure(..) | + ty::TyGenerator(..) | + ty::TyAdt(..) | + ty::TyDynamic(..) | + ty::TyForeign(..) | + ty::TyStr => { + let mut name = String::with_capacity(32); + let printer = DefPathBasedNames::new(ccx.tcx(), true, true); + printer.push_type_name(layout.ty, &mut name); + match (&layout.ty.sty, &layout.variants) { + (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => { + if def.is_enum() && !def.variants.is_empty() { + write!(&mut name, "::{}", def.variants[index].name).unwrap(); + } + } + _ => {} + } + Some(name) + } + _ => None + }; + + match layout.fields { + layout::FieldPlacement::Union(_) => { + let size = layout.size.bytes(); + let fill = Type::array(&Type::i8(ccx), size); + match name { + None => { + Type::struct_(ccx, &[fill], layout.is_packed()) + } + Some(ref name) => { + let mut llty = Type::named_struct(ccx, name); + llty.set_struct_body(&[fill], layout.is_packed()); + llty + } + } + } + layout::FieldPlacement::Array { count, .. } => { + Type::array(&layout.field(ccx, 0).llvm_type(ccx), count) + } + layout::FieldPlacement::Arbitrary { .. } => { + match name { + None => { + Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed()) + } + Some(ref name) => { + let llty = Type::named_struct(ccx, name); + *defer = Some((llty, layout)); + llty + } + } } - ty::TyDynamic(..) => Type::vtable_ptr(ccx), - _ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}", - unsized_part, ty) } } -pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - if t.is_bool() { - Type::i1(cx) +fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>) -> Vec { + debug!("struct_llfields: {:#?}", layout); + let field_count = layout.fields.count(); + + let mut offset = Size::from_bytes(0); + let mut result: Vec = Vec::with_capacity(1 + field_count * 2); + for i in layout.fields.index_by_increasing_offset() { + let field = layout.field(ccx, i); + let target_offset = layout.fields.offset(i as usize); + debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", + i, field, offset, target_offset); + assert!(target_offset >= offset); + let padding = target_offset - offset; + result.push(Type::array(&Type::i8(ccx), padding.bytes())); + debug!(" padding before: {:?}", padding); + + result.push(field.llvm_type(ccx)); + + if layout.is_packed() { + assert_eq!(padding.bytes(), 0); + } else { + assert!(field.align.abi() <= layout.align.abi(), + "non-packed type has field with larger align ({}): {:#?}", + field.align.abi(), layout); + } + + offset = target_offset + field.size; + } + if !layout.is_unsized() && field_count > 0 { + if offset > layout.size { + bug!("layout: {:#?} stride: {:?} offset: {:?}", + layout, layout.size, offset); + } + let padding = layout.size - offset; + debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", + padding, offset, layout.size); + result.push(Type::array(&Type::i8(ccx), padding.bytes())); + assert!(result.len() == 1 + field_count * 2); } else { - type_of(cx, t) + debug!("struct_llfields: offset: {:?} stride: {:?}", + offset, layout.size); } -} -/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. -/// This is the right LLVM type for an alloca containing a value of that type, -/// and the pointee of an Lvalue Datum (which is always a LLVM pointer). -/// For unsized types, the returned type is a fat pointer, thus the resulting -/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double -/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`. -/// This is needed due to the treatment of immediate values, as a fat pointer -/// is too large for it to be placed in SSA value (by our rules). -/// For the raw type without far pointer indirection, see `in_memory_type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let ty = if cx.shared().type_has_metadata(ty) { - cx.tcx().mk_imm_ptr(ty) - } else { - ty - }; - in_memory_type_of(cx, ty) + result } -/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. -/// This is the right LLVM type for a field/array element of that type, -/// and is the same as `type_of` for all Sized types. -/// Unsized types, however, are represented by a "minimal unit", e.g. -/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this -/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. -/// If the type is an unsized struct, the regular layout is generated, -/// with the inner-most trailing unsized field using the "minimal unit" -/// of that field's type - this is useful for taking the address of -/// that field and ensuring the struct has the right alignment. -/// For the LLVM type of a value as a whole, see `type_of`. -pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - // Check the cache. - if let Some(&llty) = cx.lltypes().borrow().get(&t) { - return llty; +impl<'a, 'tcx> CrateContext<'a, 'tcx> { + pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + self.layout_of(ty).align } - debug!("type_of {:?}", t); + pub fn size_of(&self, ty: Ty<'tcx>) -> Size { + self.layout_of(ty).size + } - assert!(!t.has_escaping_regions(), "{:?} has escaping regions", t); + pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { + self.layout_of(ty).size_and_align() + } +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum PointerKind { + /// Most general case, we know no restrictions to tell LLVM. + Shared, - // Replace any typedef'd types with their equivalent non-typedef - // type. This ensures that all LLVM nominal types that contain - // Rust types are defined as the same LLVM types. If we don't do - // this then, e.g. `Option<{myfield: bool}>` would be a different - // type than `Option`. - let t_norm = cx.tcx().erase_regions(&t); + /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`. + Frozen, + + /// `&mut T`, when we know `noalias` is safe for LLVM. + UniqueBorrowed, + + /// `Box`, unlike `UniqueBorrowed`, it also has `noalias` on returns. + UniqueOwned +} - if t != t_norm { - let llty = in_memory_type_of(cx, t_norm); - debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty); - cx.lltypes().borrow_mut().insert(t, llty); - return llty; +#[derive(Copy, Clone)] +pub struct PointeeInfo { + pub size: Size, + pub align: Align, + pub safe: Option, +} + +pub trait LayoutLlvmExt<'tcx> { + fn is_llvm_immediate(&self) -> bool; + fn is_llvm_scalar_pair<'a>(&self) -> bool; + fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; + fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; + fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + index: usize) -> Type; + fn llvm_field_index(&self, index: usize) -> u64; + fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) + -> Option; +} + +impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { + fn is_llvm_immediate(&self) -> bool { + match self.abi { + layout::Abi::Uninhabited | + layout::Abi::Scalar(_) | + layout::Abi::Vector => true, + layout::Abi::ScalarPair(..) => false, + layout::Abi::Aggregate { .. } => self.is_zst() + } } - let ptr_ty = |ty: Ty<'tcx>| { - if cx.shared().type_has_metadata(ty) { - if let ty::TyStr = ty.sty { - // This means we get a nicer name in the output (str is always - // unsized). - cx.str_slice_type() - } else { - let ptr_ty = in_memory_type_of(cx, ty).ptr_to(); - let info_ty = unsized_info_ty(cx, ty); - Type::struct_(cx, &[ptr_ty, info_ty], false) + fn is_llvm_scalar_pair<'a>(&self) -> bool { + match self.abi { + layout::Abi::ScalarPair(..) => true, + layout::Abi::Uninhabited | + layout::Abi::Scalar(_) | + layout::Abi::Vector | + layout::Abi::Aggregate { .. } => false + } + } + + /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. + /// The pointee type of the pointer in `LvalueRef` is always this type. + /// For sized types, it is also the right LLVM type for an `alloca` + /// containing a value of that type, and most immediates (except `bool`). + /// Unsized types, however, are represented by a "minimal unit", e.g. + /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this + /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`. + /// If the type is an unsized struct, the regular layout is generated, + /// with the inner-most trailing unsized field using the "minimal unit" + /// of that field's type - this is useful for taking the address of + /// that field and ensuring the struct has the right alignment. + fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { + if let layout::Abi::Scalar(ref scalar) = self.abi { + // Use a different cache for scalars because pointers to DSTs + // can be either fat or thin (data pointers of fat pointers). + if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) { + return llty; } - } else { - in_memory_type_of(cx, ty).ptr_to() + let llty = match scalar.value { + layout::Int(i, _) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => { + let pointee = match self.ty.sty { + ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => { + ccx.layout_of(ty).llvm_type(ccx) + } + ty::TyAdt(def, _) if def.is_box() => { + ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx) + } + ty::TyFnPtr(sig) => { + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); + FnType::new(ccx, sig, &[]).llvm_type(ccx) + } + _ => { + // If we know the alignment, pick something better than i8. + if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) { + Type::pointee_for_abi_align(ccx, pointee.align) + } else { + Type::i8(ccx) + } + } + }; + pointee.ptr_to() + } + }; + ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty); + return llty; } - }; - let mut llty = match t.sty { - ty::TyBool => Type::bool(cx), - ty::TyChar => Type::char(cx), - ty::TyInt(t) => Type::int_from_ty(cx, t), - ty::TyUint(t) => Type::uint_from_ty(cx, t), - ty::TyFloat(t) => Type::float_from_ty(cx, t), - ty::TyNever => Type::nil(cx), - ty::TyClosure(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - adt::incomplete_type_of(cx, t, "closure") - } - ty::TyGenerator(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - adt::incomplete_type_of(cx, t, "generator") - } - - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - ptr_ty(ty) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_ty(t.boxed_ty()) - } - - ty::TyArray(ty, size) => { - let llty = in_memory_type_of(cx, ty); - let size = size.val.to_const_int().unwrap().to_u64().unwrap(); - Type::array(&llty, size) - } - - // Unsized slice types (and str) have the type of their element, and - // traits have the type of u8. This is so that the data pointer inside - // fat pointers is of the right type (e.g. for array accesses), even - // when taking the address of an unsized field in a struct. - ty::TySlice(ty) => in_memory_type_of(cx, ty), - ty::TyStr | ty::TyDynamic(..) | ty::TyForeign(..) => Type::i8(cx), - - ty::TyFnDef(..) => Type::nil(cx), - ty::TyFnPtr(sig) => { - let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); - FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() - } - ty::TyTuple(ref tys, _) if tys.is_empty() => Type::nil(cx), - ty::TyTuple(..) => { - adt::type_of(cx, t) - } - ty::TyAdt(..) if t.is_simd() => { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = in_memory_type_of(cx, e); - let n = t.simd_size(cx.tcx()) as u64; - Type::vector(&llet, n) - } - ty::TyAdt(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. This - // avoids creating more than one copy of the enum when one - // of the enum's variants refers to the enum itself. - let name = llvm_type_name(cx, t); - adt::incomplete_type_of(cx, t, &name[..]) - } - - ty::TyInfer(..) | - ty::TyProjection(..) | - ty::TyParam(..) | - ty::TyAnon(..) | - ty::TyError => bug!("type_of with {:?}", t), - }; - debug!("--> mapped t={:?} to llty={:?}", t, llty); + // Check the cache. + let variant_index = match self.variants { + layout::Variants::Single { index } => Some(index), + _ => None + }; + if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, variant_index)) { + return llty; + } + + debug!("llvm_type({:#?})", self); - cx.lltypes().borrow_mut().insert(t, llty); + assert!(!self.ty.has_escaping_regions(), "{:?} has escaping regions", self.ty); - // If this was an enum or struct, fill in the type now. - match t.sty { - ty::TyAdt(..) | ty::TyClosure(..) | ty::TyGenerator(..) if !t.is_simd() && !t.is_box() => { - adt::finish_type_of(cx, t, &mut llty); + // Make sure lifetimes are erased, to avoid generating distinct LLVM + // types for Rust types that only differ in the choice of lifetimes. + let normal_ty = ccx.tcx().erase_regions(&self.ty); + + let mut defer = None; + let llty = if self.ty != normal_ty { + let mut layout = ccx.layout_of(normal_ty); + if let Some(v) = variant_index { + layout = layout.for_variant(ccx, v); + } + layout.llvm_type(ccx) + } else { + uncached_llvm_type(ccx, *self, &mut defer) + }; + debug!("--> mapped {:#?} to llty={:?}", self, llty); + + ccx.lltypes().borrow_mut().insert((self.ty, variant_index), llty); + + if let Some((mut llty, layout)) = defer { + llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed()) } - _ => () - } - llty -} + llty + } -impl<'a, 'tcx> CrateContext<'a, 'tcx> { - pub fn align_of(&self, ty: Ty<'tcx>) -> machine::llalign { - self.layout_of(ty).align(self).abi() as machine::llalign + fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { + if let layout::Abi::Scalar(ref scalar) = self.abi { + if scalar.is_bool() { + return Type::i1(ccx); + } + } + self.llvm_type(ccx) } - pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize { - self.layout_of(ty).size(self).bytes() as machine::llsize + fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + index: usize) -> Type { + // HACK(eddyb) special-case fat pointers until LLVM removes + // pointee types, to avoid bitcasting every `OperandRef::deref`. + match self.ty.sty { + ty::TyRef(..) | + ty::TyRawPtr(_) => { + return self.field(ccx, index).llvm_type(ccx); + } + ty::TyAdt(def, _) if def.is_box() => { + let ptr_ty = ccx.tcx().mk_mut_ptr(self.ty.boxed_ty()); + return ccx.layout_of(ptr_ty).scalar_pair_element_llvm_type(ccx, index); + } + _ => {} + } + + let (a, b) = match self.abi { + layout::Abi::ScalarPair(ref a, ref b) => (a, b), + _ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self) + }; + let scalar = [a, b][index]; + + // Make sure to return the same type `immediate_llvm_type` would, + // to avoid dealing with two types and the associated conversions. + // This means that `(bool, bool)` is represented as `{i1, i1}`, + // both in memory and as an immediate, while `bool` is typically + // `i8` in memory and only `i1` when immediate. While we need to + // load/store `bool` as `i8` to avoid crippling LLVM optimizations, + // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`. + if scalar.is_bool() { + return Type::i1(ccx); + } + + match scalar.value { + layout::Int(i, _) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => { + // If we know the alignment, pick something better than i8. + let offset = if index == 0 { + Size::from_bytes(0) + } else { + a.value.size(ccx).abi_align(b.value.align(ccx)) + }; + let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) { + Type::pointee_for_abi_align(ccx, pointee.align) + } else { + Type::i8(ccx) + }; + pointee.ptr_to() + } + } } - pub fn over_align_of(&self, t: Ty<'tcx>) - -> Option { - let layout = self.layout_of(t); - if let Some(align) = layout.over_align(&self.tcx().data_layout) { - Some(align as machine::llalign) - } else { - None + fn llvm_field_index(&self, index: usize) -> u64 { + match self.abi { + layout::Abi::Scalar(_) | + layout::Abi::ScalarPair(..) => { + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + } + _ => {} + } + match self.fields { + layout::FieldPlacement::Union(_) => { + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + } + + layout::FieldPlacement::Array { .. } => { + index as u64 + } + + layout::FieldPlacement::Arbitrary { .. } => { + 1 + (self.fields.memory_index(index) as u64) * 2 + } } } -} -fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { - let mut name = String::with_capacity(32); - let printer = DefPathBasedNames::new(cx.tcx(), true, true); - printer.push_type_name(ty, &mut name); - name + fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) + -> Option { + if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) { + return pointee; + } + + let mut result = None; + match self.ty.sty { + ty::TyRawPtr(mt) if offset.bytes() == 0 => { + let (size, align) = ccx.size_and_align_of(mt.ty); + result = Some(PointeeInfo { + size, + align, + safe: None + }); + } + + ty::TyRef(_, mt) if offset.bytes() == 0 => { + let (size, align) = ccx.size_and_align_of(mt.ty); + + let kind = match mt.mutbl { + hir::MutImmutable => if ccx.shared().type_is_freeze(mt.ty) { + PointerKind::Frozen + } else { + PointerKind::Shared + }, + hir::MutMutable => { + if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || + ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { + PointerKind::UniqueBorrowed + } else { + PointerKind::Shared + } + } + }; + + result = Some(PointeeInfo { + size, + align, + safe: Some(kind) + }); + } + + _ => { + let mut data_variant = match self.variants { + layout::Variants::NicheFilling { dataful_variant, .. } => { + // Only the niche itself is always initialized, + // so only check for a pointer at its offset. + // + // If the niche is a pointer, it's either valid + // (according to its type), or null (which the + // niche field's scalar validity range encodes). + // This allows using `dereferenceable_or_null` + // for e.g. `Option<&T>`, and this will continue + // to work as long as we don't start using more + // niches than just null (e.g. the first page + // of the address space, or unaligned pointers). + if self.fields.offset(0) == offset { + Some(self.for_variant(ccx, dataful_variant)) + } else { + None + } + } + _ => Some(*self) + }; + + if let Some(variant) = data_variant { + // We're not interested in any unions. + if let layout::FieldPlacement::Union(_) = variant.fields { + data_variant = None; + } + } + + if let Some(variant) = data_variant { + let ptr_end = offset + layout::Pointer.size(ccx); + for i in 0..variant.fields.count() { + let field_start = variant.fields.offset(i); + if field_start <= offset { + let field = variant.field(ccx, i); + if ptr_end <= field_start + field.size { + // We found the right field, look inside it. + result = field.pointee_info_at(ccx, offset - field_start); + break; + } + } + } + } + + // FIXME(eddyb) This should be for `ptr::Unique`, not `Box`. + if let Some(ref mut pointee) = result { + if let ty::TyAdt(def, _) = self.ty.sty { + if def.is_box() && offset.bytes() == 0 { + pointee.safe = Some(PointerKind::UniqueOwned); + } + } + } + } + } + + ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result); + result + } } diff --git a/src/librustc_trans_utils/monomorphize.rs b/src/librustc_trans_utils/monomorphize.rs index ab61dacf010ae1f51637f15453ebbf0a5b023095..eee5c1d9ef238c6a397590ed63390a1ea46a7642 100644 --- a/src/librustc_trans_utils/monomorphize.rs +++ b/src/librustc_trans_utils/monomorphize.rs @@ -12,7 +12,7 @@ use rustc::middle::lang_items::DropInPlaceFnLangItem; use rustc::traits; use rustc::ty::adjustment::CustomCoerceUnsized; -use rustc::ty::subst::{Kind, Subst, Substs}; +use rustc::ty::subst::{Kind, Subst}; use rustc::ty::{self, Ty, TyCtxt}; pub use rustc::ty::Instance; @@ -125,12 +125,3 @@ pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -/// Returns the normalized type of a struct field -pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_substs: &Substs<'tcx>, - f: &'tcx ty::FieldDef) - -> Ty<'tcx> -{ - tcx.fully_normalize_associated_types_in(&f.ty(tcx, param_substs)) -} - diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index 20ea8d70302205f1084b7745ef24475b86e8c2c4..9aa172591b86f7410d7c030d303c89ce13bdf891 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -178,6 +178,22 @@ extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index, #endif } +extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr, + unsigned Index, + uint32_t Bytes) { + CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B; + B.addAlignmentAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, B)); +#else + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, + AttributeSet::get(Call->getContext(), Index, B))); +#endif +} + extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, unsigned Index, uint64_t Bytes) { @@ -194,6 +210,22 @@ extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, #endif } +extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr, + unsigned Index, + uint64_t Bytes) { + CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B; + B.addDereferenceableOrNullAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, B)); +#else + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, + AttributeSet::get(Call->getContext(), Index, B))); +#endif +} + extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, LLVMRustAttribute RustAttr) { Function *A = unwrap(Fn); @@ -206,6 +238,19 @@ extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, #endif } +extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn, + unsigned Index, + uint32_t Bytes) { + Function *A = unwrap(Fn); + AttrBuilder B; + B.addAlignmentAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + A->addAttributes(Index, B); +#else + A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B)); +#endif +} + extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, uint64_t Bytes) { Function *A = unwrap(Fn); @@ -218,6 +263,19 @@ extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, #endif } +extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn, + unsigned Index, + uint64_t Bytes) { + Function *A = unwrap(Fn); + AttrBuilder B; + B.addDereferenceableOrNullAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + A->addAttributes(Index, B); +#else + A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B)); +#endif +} + extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn, unsigned Index, const char *Name, @@ -257,21 +315,18 @@ extern "C" void LLVMRustSetHasUnsafeAlgebra(LLVMValueRef V) { extern "C" LLVMValueRef LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name, - LLVMAtomicOrdering Order, unsigned Alignment) { + LLVMAtomicOrdering Order) { LoadInst *LI = new LoadInst(unwrap(Source), 0); LI->setAtomic(fromRust(Order)); - LI->setAlignment(Alignment); return wrap(unwrap(B)->Insert(LI, Name)); } extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B, LLVMValueRef V, LLVMValueRef Target, - LLVMAtomicOrdering Order, - unsigned Alignment) { + LLVMAtomicOrdering Order) { StoreInst *SI = new StoreInst(unwrap(V), unwrap(Target)); SI->setAtomic(fromRust(Order)); - SI->setAlignment(Alignment); return wrap(unwrap(B)->Insert(SI)); } diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 342a4f0d085c40c7bec87df2ce769ba709d050de..2b35d4547395a0d08b5ddab9aeb717ef511c0720 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] @@ -23,9 +24,9 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %0 = insertvalue { i8*, [[USIZE]] } undef, i8* %x.ptr, 0 -// CHECK: %1 = insertvalue { i8*, [[USIZE]] } %0, [[USIZE]] %x.meta, 1 -// CHECK: ret { i8*, [[USIZE]] } %1 +// CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0 +// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1 +// CHECK: ret { [0 x i8]*, [[USIZE]] } %1 { x } } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index 33b4221b73338b454f993ab62a4f1a3c8f2f23e7..a75b8f3992d0795ea76697d761dd7f596dfbf80e 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -54,7 +54,7 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } @@ -62,6 +62,6 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 29e2840c8817e9011f8ccad2e832f0ab6a9737c4..f8945a6ee8d93aeb0eba51b16923d601f02b0f59 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -9,12 +9,13 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] #![feature(custom_attribute)] pub struct S { - _field: [i64; 4], + _field: [i32; 8], } pub struct UnsafeInner { @@ -45,13 +46,13 @@ pub fn static_borrow(_: &'static i32) { pub fn named_borrow<'r>(_: &'r i32) { } -// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0) +// CHECK: @unsafe_borrow(i16* dereferenceable(2) %arg0) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_borrow(_: &UnsafeInner) { } -// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0) +// CHECK: @mutable_unsafe_borrow(i16* dereferenceable(2) %arg0) // ... unless this is a mutable borrow, those never alias // ... except that there's this LLVM bug that forces us to not use noalias, see #29485 #[no_mangle] @@ -76,7 +77,7 @@ pub fn indirect_struct(_: S) { pub fn borrowed_struct(_: &S) { } -// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x) +// CHECK: noalias align 4 dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x) #[no_mangle] pub fn _box(x: Box) -> Box { x @@ -86,7 +87,7 @@ pub fn _box(x: Box) -> Box { #[no_mangle] pub fn struct_return() -> S { S { - _field: [0, 0, 0, 0] + _field: [0, 0, 0, 0, 0, 0, 0, 0] } } @@ -96,43 +97,43 @@ pub fn struct_return() -> S { pub fn helper(_: usize) { } -// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @slice([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn slice(_: &[u8]) { } -// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @mutable_slice([0 x i8]* nonnull %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` // ... there's this LLVM bug that forces us to not use noalias, see #29485 #[no_mangle] pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @unsafe_slice([0 x i16]* nonnull %arg0.0, [[USIZE]] %arg0.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { } -// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @str([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn str(_: &[u8]) { } -// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly) +// CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn trait_borrow(_: &Drop) { } -// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly) +// CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly) #[no_mangle] pub fn trait_box(_: Box) { } -// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/issue-32031.rs b/src/test/codegen/issue-32031.rs index 5d3ccbfa4ceb07f4556c0b498cad206d5b28da14..e5ec17385455e24d59089e7ea7a887e48d7ea329 100644 --- a/src/test/codegen/issue-32031.rs +++ b/src/test/codegen/issue-32031.rs @@ -15,7 +15,7 @@ #[no_mangle] pub struct F32(f32); -// CHECK: define float @add_newtype_f32(float, float) +// CHECK: define float @add_newtype_f32(float %a, float %b) #[inline(never)] #[no_mangle] pub fn add_newtype_f32(a: F32, b: F32) -> F32 { @@ -25,7 +25,7 @@ pub fn add_newtype_f32(a: F32, b: F32) -> F32 { #[no_mangle] pub struct F64(f64); -// CHECK: define double @add_newtype_f64(double, double) +// CHECK: define double @add_newtype_f64(double %a, double %b) #[inline(never)] #[no_mangle] pub fn add_newtype_f64(a: F64, b: F64) -> F64 { diff --git a/src/test/codegen/link_section.rs b/src/test/codegen/link_section.rs index 98214dc5c6f3d994afc190fbe7bac7076b63499f..1879002e7f3d7cb2750ca73b9aad7aedda4a8347 100644 --- a/src/test/codegen/link_section.rs +++ b/src/test/codegen/link_section.rs @@ -22,12 +22,12 @@ pub enum E { B(f32) } -// CHECK: @VAR2 = constant {{.*}} { i32 0, i32 666 }, section ".test_two" +// CHECK: @VAR2 = constant {{.*}}, section ".test_two" #[no_mangle] #[link_section = ".test_two"] pub static VAR2: E = E::A(666); -// CHECK: @VAR3 = constant {{.*}} { i32 1, float 1.000000e+00 }, section ".test_three" +// CHECK: @VAR3 = constant {{.*}}, section ".test_three" #[no_mangle] #[link_section = ".test_three"] pub static VAR3: E = E::B(1.); diff --git a/src/test/codegen/match-optimizes-away.rs b/src/test/codegen/match-optimizes-away.rs index c0f2f64f82c8d65dd5034b9717aa3afd9c34fb34..d7b779374314dc94576b914886e6983454be6f98 100644 --- a/src/test/codegen/match-optimizes-away.rs +++ b/src/test/codegen/match-optimizes-away.rs @@ -12,11 +12,9 @@ // compile-flags: -O #![crate_type="lib"] -pub enum Three { First, Second, Third } -use Three::*; +pub enum Three { A, B, C } -pub enum Four { First, Second, Third, Fourth } -use Four::*; +pub enum Four { A, B, C, D } #[no_mangle] pub fn three_valued(x: Three) -> Three { @@ -24,9 +22,9 @@ pub fn three_valued(x: Three) -> Three { // CHECK-NEXT: {{^.*:$}} // CHECK-NEXT: ret i8 %0 match x { - First => First, - Second => Second, - Third => Third, + Three::A => Three::A, + Three::B => Three::B, + Three::C => Three::C, } } @@ -36,9 +34,9 @@ pub fn four_valued(x: Four) -> Four { // CHECK-NEXT: {{^.*:$}} // CHECK-NEXT: ret i8 %0 match x { - First => First, - Second => Second, - Third => Third, - Fourth => Fourth, + Four::A => Four::A, + Four::B => Four::B, + Four::C => Four::C, + Four::D => Four::D, } } diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 99e6e38a3bf0bd97654846d3f5c6e151d1d2b822..dd530cf03cd41f3d0a20dd613d096827ab9564eb 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -54,9 +54,6 @@ pub fn call_pkd(f: fn() -> Array) -> BigPacked { // CHECK-LABEL: @pkd_pair #[no_mangle] pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) { - // CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* %{{.*}}, align 1 - // CHECK: [[V2:%[a-z0-9]+]] = load i32, i32* %{{.*}}, align 1 - // CHECK: store i8 [[V1]], i8* {{.*}}, align 1 - // CHECK: store i32 [[V2]], i32* {{.*}}, align 1 +// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 5, i32 1, i1 false) *pair2 = *pair1; } diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 4b713e28b05253d2cc3527756ff3154761135c20..6c00ffa754b060f0c4357627f3e89a227c2d3794 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] @@ -23,10 +24,10 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0 -// CHECK: store i8* %s.ptr, i8** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1 -// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] +// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0 +// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 +// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] let x = &*s; &x; // keep variable in an alloca diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs index 569d937c812cbe67f8056f32d1b64a7961be1d5b..915db493fc2a4f2068971228e38eecd05fa4a7c3 100644 --- a/src/test/codegen/slice-init.rs +++ b/src/test/codegen/slice-init.rs @@ -15,7 +15,7 @@ // CHECK-LABEL: @zero_sized_elem #[no_mangle] pub fn zero_sized_elem() { - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} // CHECK-NOT: call void @llvm.memset.p0i8 let x = [(); 4]; drop(&x); @@ -24,7 +24,7 @@ pub fn zero_sized_elem() { // CHECK-LABEL: @zero_len_array #[no_mangle] pub fn zero_len_array() { - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} // CHECK-NOT: call void @llvm.memset.p0i8 let x = [4; 0]; drop(&x); @@ -34,7 +34,7 @@ pub fn zero_len_array() { #[no_mangle] pub fn byte_array() { // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4 - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [7u8; 4]; drop(&x); } @@ -50,7 +50,7 @@ enum Init { #[no_mangle] pub fn byte_enum_array() { // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4 - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [Init::Memset; 4]; drop(&x); } @@ -59,7 +59,7 @@ pub fn byte_enum_array() { #[no_mangle] pub fn zeroed_integer_array() { // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16 - // CHECK-NOT: br label %slice_loop_header{{.*}} + // CHECK-NOT: br label %repeat_loop_header{{.*}} let x = [0u32; 4]; drop(&x); } @@ -67,7 +67,7 @@ pub fn zeroed_integer_array() { // CHECK-LABEL: @nonzero_integer_array #[no_mangle] pub fn nonzero_integer_array() { - // CHECK: br label %slice_loop_header{{.*}} + // CHECK: br label %repeat_loop_header{{.*}} // CHECK-NOT: call void @llvm.memset.p0i8 let x = [0x1a_2b_3c_4d_u32; 4]; drop(&x); diff --git a/src/test/ui/issue-26548.rs b/src/test/compile-fail/issue-26548.rs similarity index 70% rename from src/test/ui/issue-26548.rs rename to src/test/compile-fail/issue-26548.rs index 2591d7bcbaef4e2cc73e50bc17ff70ae97d4f000..39c6e97268f980e3d9aebdcd8a33ba64256be49e 100644 --- a/src/test/ui/issue-26548.rs +++ b/src/test/compile-fail/issue-26548.rs @@ -8,7 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// error-pattern: overflow representing the type +// error-pattern: unsupported cyclic reference between types/traits detected +// note-pattern: the cycle begins when computing layout of +// note-pattern: ...which then requires computing layout of +// note-pattern: ...which then again requires computing layout of trait Mirror { type It: ?Sized; } diff --git a/src/test/run-make/issue-25581/test.c b/src/test/run-make/issue-25581/test.c index ab85d2bb13fb16b9519475a1a8aadbf86f5161b0..5736b1730216d4bd87775c16036a7b687cc35076 100644 --- a/src/test/run-make/issue-25581/test.c +++ b/src/test/run-make/issue-25581/test.c @@ -2,10 +2,15 @@ #include #include -size_t slice_len(uint8_t *data, size_t len) { - return len; +struct ByteSlice { + uint8_t *data; + size_t len; +}; + +size_t slice_len(struct ByteSlice bs) { + return bs.len; } -uint8_t slice_elem(uint8_t *data, size_t len, size_t idx) { - return data[idx]; +uint8_t slice_elem(struct ByteSlice bs, size_t idx) { + return bs.data[idx]; } diff --git a/src/test/run-pass/enum-discrim-manual-sizing.rs b/src/test/run-pass/enum-discrim-manual-sizing.rs index 3bbc107e0b99e20bdbc227f0bbdad29aaecd4d95..8557c065dc69c0694c18f20b0b9abe67820bdcf8 100644 --- a/src/test/run-pass/enum-discrim-manual-sizing.rs +++ b/src/test/run-pass/enum-discrim-manual-sizing.rs @@ -108,6 +108,9 @@ pub fn main() { let array_expected_size = round_up(28, align_of::>()); assert_eq!(size_of::>(), array_expected_size); assert_eq!(size_of::>(), 32); + + assert_eq!(align_of::(), align_of::()); + assert_eq!(align_of::>(), align_of::()); } // Rounds x up to the next multiple of a diff --git a/src/test/run-pass/enum-univariant-repr.rs b/src/test/run-pass/enum-univariant-repr.rs index ef4cc60bf0da1fc62fa7ab98a7bab098e6798c2a..17d614b54969cd9953bd529406783e6663afccba 100644 --- a/src/test/run-pass/enum-univariant-repr.rs +++ b/src/test/run-pass/enum-univariant-repr.rs @@ -22,6 +22,11 @@ enum UnivariantWithoutDescr { Y } +#[repr(u8)] +enum UnivariantWithData { + Z(u8), +} + pub fn main() { { assert_eq!(4, mem::size_of::()); @@ -44,4 +49,12 @@ pub fn main() { // check it has the same memory layout as u16 assert_eq!(&[descr, descr, descr], ints); } + + { + assert_eq!(2, mem::size_of::()); + + match UnivariantWithData::Z(4) { + UnivariantWithData::Z(x) => assert_eq!(x, 4), + } + } } diff --git a/src/test/run-pass/packed-struct-optimized-enum.rs b/src/test/run-pass/packed-struct-optimized-enum.rs new file mode 100644 index 0000000000000000000000000000000000000000..1179f16daa238c699752302617b8b1905a518daf --- /dev/null +++ b/src/test/run-pass/packed-struct-optimized-enum.rs @@ -0,0 +1,25 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[repr(packed)] +#[derive(Copy, Clone)] +struct Packed(T); + +fn main() { + let one = (Some(Packed((&(), 0))), true); + let two = [one, one]; + let stride = (&two[1] as *const _ as usize) - (&two[0] as *const _ as usize); + + // This can fail if rustc and LLVM disagree on the size of a type. + // In this case, `Option>` was erronously not + // marked as packed despite needing alignment `1` and containing + // its `&()` discriminant, which has alignment larger than `1`. + assert_eq!(stride, std::mem::size_of_val(&one)); +} diff --git a/src/test/ui/issue-26548.stderr b/src/test/ui/issue-26548.stderr deleted file mode 100644 index 8bfe4ac733b6d91c97d783b55e6a20fa95af4f4f..0000000000000000000000000000000000000000 --- a/src/test/ui/issue-26548.stderr +++ /dev/null @@ -1,9 +0,0 @@ -error[E0391]: unsupported cyclic reference between types/traits detected - | -note: the cycle begins when computing layout of `S`... -note: ...which then requires computing layout of `std::option::Option<::It>`... -note: ...which then requires computing layout of `::It`... - = note: ...which then again requires computing layout of `S`, completing the cycle. - -error: aborting due to previous error - diff --git a/src/test/ui/print_type_sizes/nullable.rs b/src/test/ui/print_type_sizes/niche-filling.rs similarity index 76% rename from src/test/ui/print_type_sizes/nullable.rs rename to src/test/ui/print_type_sizes/niche-filling.rs index 5052c59a39dcfcfa2989e48f0b11fff0279bad88..f1c419d889556968346229911c65297faa26c34e 100644 --- a/src/test/ui/print_type_sizes/nullable.rs +++ b/src/test/ui/print_type_sizes/niche-filling.rs @@ -10,8 +10,8 @@ // compile-flags: -Z print-type-sizes -// This file illustrates how enums with a non-null field are handled, -// modelled after cases like `Option<&u32>` and such. +// This file illustrates how niche-filling enums are handled, +// modelled after cases like `Option<&u32>`, `Option` and such. // // It uses NonZero directly, rather than `&_` or `Unique<_>`, because // the test is not set up to deal with target-dependent pointer width. @@ -68,8 +68,22 @@ impl One for u32 { fn one() -> Self { 1 } } +pub enum Enum4 { + One(A), + Two(B), + Three(C), + Four(D) +} + pub fn main() { let _x: MyOption> = Default::default(); let _y: EmbeddedDiscr = Default::default(); let _z: MyOption> = Default::default(); + let _a: MyOption = Default::default(); + let _b: MyOption = Default::default(); + let _c: MyOption = Default::default(); + let _b: MyOption> = Default::default(); + let _e: Enum4<(), char, (), ()> = Enum4::One(()); + let _f: Enum4<(), (), bool, ()> = Enum4::One(()); + let _g: Enum4<(), (), (), MyOption> = Enum4::One(()); } diff --git a/src/test/ui/print_type_sizes/niche-filling.stdout b/src/test/ui/print_type_sizes/niche-filling.stdout new file mode 100644 index 0000000000000000000000000000000000000000..af3e89a936ee05cc85b4a41823fa8b0668e286cf --- /dev/null +++ b/src/test/ui/print_type_sizes/niche-filling.stdout @@ -0,0 +1,80 @@ +print-type-size type: `IndirectNonZero`: 12 bytes, alignment: 4 bytes +print-type-size field `.nested`: 8 bytes +print-type-size field `.post`: 2 bytes +print-type-size field `.pre`: 1 bytes +print-type-size end padding: 1 bytes +print-type-size type: `MyOption>`: 12 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 12 bytes +print-type-size field `.0`: 12 bytes +print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Record`: 7 bytes +print-type-size field `.val`: 4 bytes +print-type-size field `.post`: 2 bytes +print-type-size field `.pre`: 1 bytes +print-type-size end padding: 1 bytes +print-type-size type: `NestedNonZero`: 8 bytes, alignment: 4 bytes +print-type-size field `.val`: 4 bytes +print-type-size field `.post`: 2 bytes +print-type-size field `.pre`: 1 bytes +print-type-size end padding: 1 bytes +print-type-size type: `Enum4<(), char, (), ()>`: 4 bytes, alignment: 4 bytes +print-type-size variant `One`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Two`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size variant `Three`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Four`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size type: `MyOption`: 4 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size type: `MyOption>`: 4 bytes, alignment: 4 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size type: `Enum4<(), (), (), MyOption>`: 2 bytes, alignment: 1 bytes +print-type-size variant `One`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Two`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Three`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Four`: 2 bytes +print-type-size field `.0`: 2 bytes +print-type-size type: `MyOption>`: 2 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 2 bytes +print-type-size field `.0`: 2 bytes +print-type-size type: `MyOption`: 2 bytes, alignment: 1 bytes +print-type-size discriminant: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size type: `Enum4<(), (), bool, ()>`: 1 bytes, alignment: 1 bytes +print-type-size variant `One`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Two`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size variant `Three`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size variant `Four`: 0 bytes +print-type-size field `.0`: 0 bytes +print-type-size type: `MyOption`: 1 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size type: `MyOption`: 1 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes +print-type-size variant `Some`: 1 bytes +print-type-size field `.0`: 1 bytes +print-type-size type: `core::cmp::Ordering`: 1 bytes, alignment: 1 bytes +print-type-size discriminant: 1 bytes +print-type-size variant `Less`: 0 bytes +print-type-size variant `Equal`: 0 bytes +print-type-size variant `Greater`: 0 bytes diff --git a/src/test/ui/print_type_sizes/nullable.stdout b/src/test/ui/print_type_sizes/nullable.stdout deleted file mode 100644 index 830678f174f88cf38e0d76836bac2cc7c9a9ee96..0000000000000000000000000000000000000000 --- a/src/test/ui/print_type_sizes/nullable.stdout +++ /dev/null @@ -1,24 +0,0 @@ -print-type-size type: `IndirectNonZero`: 12 bytes, alignment: 4 bytes -print-type-size field `.nested`: 8 bytes -print-type-size field `.post`: 2 bytes -print-type-size field `.pre`: 1 bytes -print-type-size end padding: 1 bytes -print-type-size type: `MyOption>`: 12 bytes, alignment: 4 bytes -print-type-size variant `Some`: 12 bytes -print-type-size field `.0`: 12 bytes -print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes -print-type-size variant `Record`: 7 bytes -print-type-size field `.val`: 4 bytes -print-type-size field `.post`: 2 bytes -print-type-size field `.pre`: 1 bytes -print-type-size end padding: 1 bytes -print-type-size type: `NestedNonZero`: 8 bytes, alignment: 4 bytes -print-type-size field `.val`: 4 bytes -print-type-size field `.post`: 2 bytes -print-type-size field `.pre`: 1 bytes -print-type-size end padding: 1 bytes -print-type-size type: `MyOption>`: 4 bytes, alignment: 4 bytes -print-type-size variant `Some`: 4 bytes -print-type-size field `.0`: 4 bytes -print-type-size type: `core::nonzero::NonZero`: 4 bytes, alignment: 4 bytes -print-type-size field `.0`: 4 bytes diff --git a/src/test/run-pass/issue-30276.rs b/src/test/ui/print_type_sizes/uninhabited.rs similarity index 65% rename from src/test/run-pass/issue-30276.rs rename to src/test/ui/print_type_sizes/uninhabited.rs index 5dd0cd8ba53138b43d5f5db9bf48820067c8c428..69cc4c933601e0eb685346f9d29932e6e0179c01 100644 --- a/src/test/run-pass/issue-30276.rs +++ b/src/test/ui/print_type_sizes/uninhabited.rs @@ -1,4 +1,4 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,7 +8,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -struct Test([i32]); -fn main() { - let _x: fn(_) -> Test = Test; +// compile-flags: -Z print-type-sizes + +#![feature(never_type)] + +pub fn main() { + let _x: Option = None; + let _y: Result = Ok(42); } diff --git a/src/test/ui/print_type_sizes/uninhabited.stdout b/src/test/ui/print_type_sizes/uninhabited.stdout new file mode 100644 index 0000000000000000000000000000000000000000..2a8706f7ac5514591adba5f5d323cb0b356b1116 --- /dev/null +++ b/src/test/ui/print_type_sizes/uninhabited.stdout @@ -0,0 +1,5 @@ +print-type-size type: `std::result::Result`: 4 bytes, alignment: 4 bytes +print-type-size variant `Ok`: 4 bytes +print-type-size field `.0`: 4 bytes +print-type-size type: `std::option::Option`: 0 bytes, alignment: 1 bytes +print-type-size variant `None`: 0 bytes diff --git a/src/tools/cargotest/main.rs b/src/tools/cargotest/main.rs index a6c56a13076297824a26afe127dd1cbffafbbc74..b1122f401feb9b8c30fac7df90c8d1feeea2c5ff 100644 --- a/src/tools/cargotest/main.rs +++ b/src/tools/cargotest/main.rs @@ -60,8 +60,8 @@ struct Test { }, Test { name: "servo", - repo: "https://github.com/servo/servo", - sha: "38fe9533b93e985657f99a29772bf3d3c8694822", + repo: "https://github.com/eddyb/servo", + sha: "6031de9a397e2feba4ff98725991825f62b68518", lock: None, // Only test Stylo a.k.a. Quantum CSS, the parts of Servo going into Firefox. // This takes much less time to build than all of Servo and supports stable Rust. diff --git a/src/tools/toolstate.toml b/src/tools/toolstate.toml index 744a0f96ad7347c5f5e24be1d85e713c56a279d1..f1684f4c5acbe42f5f8eb94ba87a3408b9bd5bc9 100644 --- a/src/tools/toolstate.toml +++ b/src/tools/toolstate.toml @@ -26,7 +26,7 @@ miri = "Broken" # ping @Manishearth @llogiq @mcarton @oli-obk -clippy = "Testing" +clippy = "Broken" # ping @nrc rls = "Testing"