提交 b723af28 编写于 作者: E Eduard-Mihai Burtescu

rustc_trans: go through layouts uniformly for fat pointers and variants.

上级 026214c8
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
use session::{config, early_error, Session}; use session::{config, early_error, Session};
use traits::Reveal; use traits::Reveal;
use ty::{self, TyCtxt, Ty}; use ty::{self, TyCtxt, Ty};
use ty::layout::{FullLayout, LayoutError, LayoutOf}; use ty::layout::{LayoutError, LayoutOf, TyLayout};
use util::nodemap::FxHashMap; use util::nodemap::FxHashMap;
use std::default::Default as StdDefault; use std::default::Default as StdDefault;
...@@ -628,9 +628,9 @@ fn with_param_env<F>(&mut self, id: ast::NodeId, f: F) ...@@ -628,9 +628,9 @@ fn with_param_env<F>(&mut self, id: ast::NodeId, f: F)
} }
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a LateContext<'a, 'tcx> { impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a LateContext<'a, 'tcx> {
type FullLayout = Result<FullLayout<'tcx>, LayoutError<'tcx>>; type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(self.tcx, self.param_env.reveal_all()).layout_of(ty) (self.tcx, self.param_env.reveal_all()).layout_of(ty)
} }
} }
......
此差异已折叠。
...@@ -753,7 +753,7 @@ fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { ...@@ -753,7 +753,7 @@ fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
bug!("failed to get layout for `{}`: {}", t, e) bug!("failed to get layout for `{}`: {}", t, e)
}); });
if let Layout::General { ref variants, discr, .. } = *layout.layout { if let Layout::General { ref variants, discr, .. } = layout.layout {
let discr_size = discr.size(cx.tcx).bytes(); let discr_size = discr.size(cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}", debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
......
...@@ -1316,11 +1316,6 @@ pub fn LLVMStructSetBody(StructTy: TypeRef, ...@@ -1316,11 +1316,6 @@ pub fn LLVMStructSetBody(StructTy: TypeRef,
ElementCount: c_uint, ElementCount: c_uint,
Packed: Bool); Packed: Bool);
pub fn LLVMConstNamedStruct(S: TypeRef,
ConstantVals: *const ValueRef,
Count: c_uint)
-> ValueRef;
/// Enables LLVM debug output. /// Enables LLVM debug output.
pub fn LLVMRustSetDebug(Enabled: c_int); pub fn LLVMRustSetDebug(Enabled: c_int);
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
use rustc::hir; use rustc::hir;
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, Size, FullLayout}; use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc::ty::layout::{HasDataLayout, LayoutOf};
use rustc_back::PanicStrategy; use rustc_back::PanicStrategy;
...@@ -275,7 +275,7 @@ pub trait LayoutExt<'tcx> { ...@@ -275,7 +275,7 @@ pub trait LayoutExt<'tcx> {
fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>; fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
} }
impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
fn is_aggregate(&self) -> bool { fn is_aggregate(&self) -> bool {
match self.abi { match self.abi {
layout::Abi::Scalar(_) | layout::Abi::Scalar(_) |
...@@ -311,7 +311,7 @@ fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> ...@@ -311,7 +311,7 @@ fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>
let mut total = Size::from_bytes(0); let mut total = Size::from_bytes(0);
let mut result = None; let mut result = None;
let is_union = match *self.fields { let is_union = match self.fields {
layout::FieldPlacement::Array { count, .. } => { layout::FieldPlacement::Array { count, .. } => {
if count > 0 { if count > 0 {
return self.field(ccx, 0).homogeneous_aggregate(ccx); return self.field(ccx, 0).homogeneous_aggregate(ccx);
...@@ -424,7 +424,7 @@ pub fn llvm_type(&self, ccx: &CrateContext) -> Type { ...@@ -424,7 +424,7 @@ pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
#[derive(Debug)] #[derive(Debug)]
pub struct ArgType<'tcx> { pub struct ArgType<'tcx> {
kind: ArgKind, kind: ArgKind,
pub layout: FullLayout<'tcx>, pub layout: TyLayout<'tcx>,
/// Cast target, either a single uniform or a pair of registers. /// Cast target, either a single uniform or a pair of registers.
pub cast: Option<CastTarget>, pub cast: Option<CastTarget>,
/// Dummy argument, which is emitted before the real argument. /// Dummy argument, which is emitted before the real argument.
...@@ -435,7 +435,7 @@ pub struct ArgType<'tcx> { ...@@ -435,7 +435,7 @@ pub struct ArgType<'tcx> {
} }
impl<'a, 'tcx> ArgType<'tcx> { impl<'a, 'tcx> ArgType<'tcx> {
fn new(layout: FullLayout<'tcx>) -> ArgType<'tcx> { fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
ArgType { ArgType {
kind: ArgKind::Direct, kind: ArgKind::Direct,
layout, layout,
...@@ -610,7 +610,7 @@ pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>) ...@@ -610,7 +610,7 @@ pub fn of_instance(ccx: &CrateContext<'a, 'tcx>, instance: &ty::Instance<'tcx>)
let fn_ty = instance_ty(ccx.tcx(), &instance); let fn_ty = instance_ty(ccx.tcx(), &instance);
let sig = ty_fn_sig(ccx, fn_ty); let sig = ty_fn_sig(ccx, fn_ty);
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
Self::new(ccx, sig, &[]) FnType::new(ccx, sig, &[])
} }
pub fn new(ccx: &CrateContext<'a, 'tcx>, pub fn new(ccx: &CrateContext<'a, 'tcx>,
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
use super::ModuleTranslation; use super::ModuleTranslation;
use super::ModuleKind; use super::ModuleKind;
use abi;
use assert_module_sources; use assert_module_sources;
use back::link; use back::link;
use back::symbol_export; use back::symbol_export;
...@@ -40,7 +41,7 @@ ...@@ -40,7 +41,7 @@
use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::trans::{Linkage, Visibility, Stats};
use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes};
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
use rustc::ty::maps::Providers; use rustc::ty::maps::Providers;
use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::dep_graph::{DepNode, DepKind, DepConstructor};
use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
...@@ -68,7 +69,7 @@ ...@@ -68,7 +69,7 @@
use time_graph; use time_graph;
use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames}; use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames};
use type_::Type; use type_::Type;
use type_of::{self, LayoutLlvmExt}; use type_of::LayoutLlvmExt;
use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet};
use CrateInfo; use CrateInfo;
...@@ -203,8 +204,10 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, ...@@ -203,8 +204,10 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
old_info.expect("unsized_info: missing old info for trait upcast") old_info.expect("unsized_info: missing old info for trait upcast")
} }
(_, &ty::TyDynamic(ref data, ..)) => { (_, &ty::TyDynamic(ref data, ..)) => {
let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target))
.field(ccx, abi::FAT_PTR_EXTRA);
consts::ptrcast(meth::get_vtable(ccx, source, data.principal()), consts::ptrcast(meth::get_vtable(ccx, source, data.principal()),
Type::vtable_ptr(ccx)) vtable_ptr.llvm_type(ccx))
} }
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
source, source,
...@@ -255,8 +258,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ...@@ -255,8 +258,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure // So we need to pointercast the base to ensure
// the types match up. // the types match up.
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR);
(bcx.pointercast(base, llcast_ty), info) (bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info)
} }
OperandValue::Immediate(base) => { OperandValue::Immediate(base) => {
unsize_thin_ptr(bcx, base, src_ty, dst_ty) unsize_thin_ptr(bcx, base, src_ty, dst_ty)
...@@ -371,7 +374,7 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { ...@@ -371,7 +374,7 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
} }
} }
pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::FullLayout) -> ValueRef { pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi {
bcx.trunc(val, Type::i1(bcx.ccx)) bcx.trunc(val, Type::i1(bcx.ccx))
} else { } else {
...@@ -400,7 +403,7 @@ pub fn memcpy_ty<'a, 'tcx>( ...@@ -400,7 +403,7 @@ pub fn memcpy_ty<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
dst: ValueRef, dst: ValueRef,
src: ValueRef, src: ValueRef,
layout: FullLayout<'tcx>, layout: TyLayout<'tcx>,
align: Option<Align>, align: Option<Align>,
) { ) {
let ccx = bcx.ccx; let ccx = bcx.ccx;
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
use abi::{FnType, ArgType, LayoutExt, Reg}; use abi::{FnType, ArgType, LayoutExt, Reg};
use context::CrateContext; use context::CrateContext;
use rustc::ty::layout::{self, FullLayout}; use rustc::ty::layout::{self, TyLayout};
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 {
...@@ -25,7 +25,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc ...@@ -25,7 +25,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
} }
fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> bool { layout: TyLayout<'tcx>) -> bool {
match layout.abi { match layout.abi {
layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F32) |
layout::Abi::Scalar(layout::F64) => true, layout::Abi::Scalar(layout::F64) => true,
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind};
use common::CrateContext; use common::CrateContext;
use rustc::ty::layout::{self, FullLayout}; use rustc::ty::layout::{self, TyLayout};
#[derive(PartialEq)] #[derive(PartialEq)]
pub enum Flavor { pub enum Flavor {
...@@ -20,7 +20,7 @@ pub enum Flavor { ...@@ -20,7 +20,7 @@ pub enum Flavor {
} }
fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> bool { layout: TyLayout<'tcx>) -> bool {
match layout.abi { match layout.abi {
layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F32) |
layout::Abi::Scalar(layout::F64) => true, layout::Abi::Scalar(layout::F64) => true,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind};
use context::CrateContext; use context::CrateContext;
use rustc::ty::layout::{self, Layout, FullLayout, Size}; use rustc::ty::layout::{self, Layout, TyLayout, Size};
#[derive(Clone, Copy, PartialEq, Debug)] #[derive(Clone, Copy, PartialEq, Debug)]
enum Class { enum Class {
...@@ -53,7 +53,7 @@ fn unify(cls: &mut [Class], ...@@ -53,7 +53,7 @@ fn unify(cls: &mut [Class],
} }
fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>, layout: TyLayout<'tcx>,
cls: &mut [Class], cls: &mut [Class],
off: Size) off: Size)
-> Result<(), Memory> { -> Result<(), Memory> {
...@@ -90,7 +90,7 @@ fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ...@@ -90,7 +90,7 @@ fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// FIXME(eddyb) have to work around Rust enums for now. // FIXME(eddyb) have to work around Rust enums for now.
// Fix is either guarantee no data where there is no field, // Fix is either guarantee no data where there is no field,
// by putting variants in fields, or be more clever. // by putting variants in fields, or be more clever.
match *layout.layout { match layout.layout {
Layout::General { .. } | Layout::General { .. } |
Layout::NullablePointer { .. } => return Err(Memory), Layout::NullablePointer { .. } => return Err(Memory),
_ => {} _ => {}
......
...@@ -54,20 +54,11 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> ...@@ -54,20 +54,11 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) ->
} }
} }
pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
let layout = ccx.layout_of(ty);
match layout.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true,
layout::Abi::Aggregate { .. } => layout.is_zst()
}
}
/// Returns true if the type is represented as a pair of immediates. /// Returns true if the type is represented as a pair of immediates.
pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
-> bool { -> bool {
let layout = ccx.layout_of(ty); let layout = ccx.layout_of(ty);
match *layout.fields { match layout.fields {
layout::FieldPlacement::Arbitrary { .. } => { layout::FieldPlacement::Arbitrary { .. } => {
// There must be only 2 fields. // There must be only 2 fields.
if layout.fields.count() != 2 { if layout.fields.count() != 2 {
...@@ -75,8 +66,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) ...@@ -75,8 +66,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
} }
// The two fields must be both immediates. // The two fields must be both immediates.
type_is_immediate(ccx, layout.field(ccx, 0).ty) && layout.field(ccx, 0).is_llvm_immediate() &&
type_is_immediate(ccx, layout.field(ccx, 1).ty) layout.field(ccx, 1).is_llvm_immediate()
} }
_ => false _ => false
} }
...@@ -256,16 +247,7 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { ...@@ -256,16 +247,7 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
let len = s.len(); let len = s.len();
let cs = consts::ptrcast(C_cstr(cx, s, false), let cs = consts::ptrcast(C_cstr(cx, s, false),
cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to()); cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to());
let empty = C_array(Type::i8(cx), &[]); C_fat_ptr(cx, cs, C_usize(cx, len as u64))
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_named_struct(cx.str_slice_type(), &[
empty,
cs,
empty,
C_usize(cx, len as u64),
empty
])
} }
pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef {
...@@ -293,12 +275,6 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> ...@@ -293,12 +275,6 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) ->
} }
} }
pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
unsafe {
llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
}
}
pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef { pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
unsafe { unsafe {
return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint); return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);
......
...@@ -24,14 +24,13 @@ ...@@ -24,14 +24,13 @@
use partitioning::CodegenUnit; use partitioning::CodegenUnit;
use type_::Type; use type_::Type;
use type_of::LayoutLlvmExt;
use rustc_data_structures::base_n; use rustc_data_structures::base_n;
use rustc::middle::trans::Stats; use rustc::middle::trans::Stats;
use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc_data_structures::stable_hasher::StableHashingContextProvider;
use rustc::session::config::{self, NoDebugInfo}; use rustc::session::config::{self, NoDebugInfo};
use rustc::session::Session; use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, FullLayout}; use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout};
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap; use rustc::util::nodemap::FxHashMap;
use rustc_trans_utils; use rustc_trans_utils;
...@@ -101,9 +100,9 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { ...@@ -101,9 +100,9 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> {
/// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
used_statics: RefCell<Vec<ValueRef>>, used_statics: RefCell<Vec<ValueRef>>,
lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>, lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
isize_ty: Type, isize_ty: Type,
str_slice_type: Type,
dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>, dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
...@@ -378,8 +377,8 @@ pub fn new(shared: &SharedCrateContext<'a, 'tcx>, ...@@ -378,8 +377,8 @@ pub fn new(shared: &SharedCrateContext<'a, 'tcx>,
statics_to_rauw: RefCell::new(Vec::new()), statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()),
lltypes: RefCell::new(FxHashMap()), lltypes: RefCell::new(FxHashMap()),
scalar_lltypes: RefCell::new(FxHashMap()),
isize_ty: Type::from_ref(ptr::null_mut()), isize_ty: Type::from_ref(ptr::null_mut()),
str_slice_type: Type::from_ref(ptr::null_mut()),
dbg_cx, dbg_cx,
eh_personality: Cell::new(None), eh_personality: Cell::new(None),
eh_unwind_resume: Cell::new(None), eh_unwind_resume: Cell::new(None),
...@@ -389,28 +388,19 @@ pub fn new(shared: &SharedCrateContext<'a, 'tcx>, ...@@ -389,28 +388,19 @@ pub fn new(shared: &SharedCrateContext<'a, 'tcx>,
placeholder: PhantomData, placeholder: PhantomData,
}; };
let (isize_ty, str_slice_ty, mut local_ccx) = { let (isize_ty, mut local_ccx) = {
// Do a little dance to create a dummy CrateContext, so we can // Do a little dance to create a dummy CrateContext, so we can
// create some things in the LLVM module of this codegen unit // create some things in the LLVM module of this codegen unit
let mut local_ccxs = vec![local_ccx]; let mut local_ccxs = vec![local_ccx];
let (isize_ty, str_slice_ty) = { let isize_ty = {
let dummy_ccx = LocalCrateContext::dummy_ccx(shared, let dummy_ccx = LocalCrateContext::dummy_ccx(shared,
local_ccxs.as_mut_slice()); local_ccxs.as_mut_slice());
let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); Type::isize(&dummy_ccx)
str_slice_ty.set_struct_body(&[
Type::array(&Type::i8(&dummy_ccx), 0),
dummy_ccx.layout_of(shared.tcx.mk_str()).llvm_type(&dummy_ccx).ptr_to(),
Type::array(&Type::i8(&dummy_ccx), 0),
Type::isize(&dummy_ccx),
Type::array(&Type::i8(&dummy_ccx), 0)
], false);
(Type::isize(&dummy_ccx), str_slice_ty)
}; };
(isize_ty, str_slice_ty, local_ccxs.pop().unwrap()) (isize_ty, local_ccxs.pop().unwrap())
}; };
local_ccx.isize_ty = isize_ty; local_ccx.isize_ty = isize_ty;
local_ccx.str_slice_type = str_slice_ty;
local_ccx local_ccx
} }
...@@ -515,10 +505,14 @@ pub fn used_statics<'a>(&'a self) -> &'a RefCell<Vec<ValueRef>> { ...@@ -515,10 +505,14 @@ pub fn used_statics<'a>(&'a self) -> &'a RefCell<Vec<ValueRef>> {
&self.local().used_statics &self.local().used_statics
} }
pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> { pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>> {
&self.local().lltypes &self.local().lltypes
} }
pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
&self.local().scalar_lltypes
}
pub fn stats<'a>(&'a self) -> &'a RefCell<Stats> { pub fn stats<'a>(&'a self) -> &'a RefCell<Stats> {
&self.local().stats &self.local().stats
} }
...@@ -527,10 +521,6 @@ pub fn isize_ty(&self) -> Type { ...@@ -527,10 +521,6 @@ pub fn isize_ty(&self) -> Type {
self.local().isize_ty self.local().isize_ty
} }
pub fn str_slice_type(&self) -> Type {
self.local().str_slice_type
}
pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> { pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
&self.local().dbg_cx &self.local().dbg_cx
} }
...@@ -669,9 +659,9 @@ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { ...@@ -669,9 +659,9 @@ fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
} }
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a SharedCrateContext<'a, 'tcx> { impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a SharedCrateContext<'a, 'tcx> {
type FullLayout = FullLayout<'tcx>; type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(self.tcx, ty::ParamEnv::empty(traits::Reveal::All)) (self.tcx, ty::ParamEnv::empty(traits::Reveal::All))
.layout_of(ty) .layout_of(ty)
.unwrap_or_else(|e| match e { .unwrap_or_else(|e| match e {
...@@ -682,10 +672,10 @@ fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { ...@@ -682,10 +672,10 @@ fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout {
} }
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CrateContext<'a, 'tcx> { impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CrateContext<'a, 'tcx> {
type FullLayout = FullLayout<'tcx>; type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.shared.layout_of(ty) self.shared.layout_of(ty)
} }
} }
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
use rustc::ich::Fingerprint; use rustc::ich::Fingerprint;
use common::{self, CrateContext}; use common::{self, CrateContext};
use rustc::ty::{self, AdtKind, Ty}; use rustc::ty::{self, AdtKind, Ty};
use rustc::ty::layout::{self, Align, LayoutOf, Size, FullLayout}; use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
use rustc::session::{Session, config}; use rustc::session::{Session, config};
use rustc::util::nodemap::FxHashMap; use rustc::util::nodemap::FxHashMap;
use rustc::util::common::path2cstr; use rustc::util::common::path2cstr;
...@@ -1052,7 +1052,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ...@@ -1052,7 +1052,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
//=----------------------------------------------------------------------------- //=-----------------------------------------------------------------------------
struct UnionMemberDescriptionFactory<'tcx> { struct UnionMemberDescriptionFactory<'tcx> {
layout: FullLayout<'tcx>, layout: TyLayout<'tcx>,
variant: &'tcx ty::VariantDef, variant: &'tcx ty::VariantDef,
span: Span, span: Span,
} }
...@@ -1119,7 +1119,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ...@@ -1119,7 +1119,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// offset of zero bytes). // offset of zero bytes).
struct EnumMemberDescriptionFactory<'tcx> { struct EnumMemberDescriptionFactory<'tcx> {
enum_type: Ty<'tcx>, enum_type: Ty<'tcx>,
type_rep: FullLayout<'tcx>, type_rep: TyLayout<'tcx>,
discriminant_type_metadata: Option<DIType>, discriminant_type_metadata: Option<DIType>,
containing_scope: DIScope, containing_scope: DIScope,
span: Span, span: Span,
...@@ -1129,7 +1129,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { ...@@ -1129,7 +1129,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
-> Vec<MemberDescription> { -> Vec<MemberDescription> {
let adt = &self.enum_type.ty_adt_def().unwrap(); let adt = &self.enum_type.ty_adt_def().unwrap();
match *self.type_rep.layout { match self.type_rep.layout {
layout::Layout::General { ref variants, .. } => { layout::Layout::General { ref variants, .. } => {
let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
.expect("")); .expect(""));
...@@ -1220,7 +1220,7 @@ fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) ...@@ -1220,7 +1220,7 @@ fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
// of discriminant instead of us having to recover its path. // of discriminant instead of us having to recover its path.
fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
name: &mut String, name: &mut String,
layout: FullLayout<'tcx>, layout: TyLayout<'tcx>,
offset: Size, offset: Size,
size: Size) { size: Size) {
for i in 0..layout.fields.count() { for i in 0..layout.fields.count() {
...@@ -1300,7 +1300,7 @@ enum EnumDiscriminantInfo { ...@@ -1300,7 +1300,7 @@ enum EnumDiscriminantInfo {
// descriptions of the fields of the variant. This is a rudimentary version of a // descriptions of the fields of the variant. This is a rudimentary version of a
// full RecursiveTypeDescription. // full RecursiveTypeDescription.
fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
layout: layout::FullLayout<'tcx>, layout: layout::TyLayout<'tcx>,
variant: &'tcx ty::VariantDef, variant: &'tcx ty::VariantDef,
discriminant_info: EnumDiscriminantInfo, discriminant_info: EnumDiscriminantInfo,
containing_scope: DIScope, containing_scope: DIScope,
...@@ -1431,7 +1431,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ...@@ -1431,7 +1431,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let type_rep = cx.layout_of(enum_type); let type_rep = cx.layout_of(enum_type);
let discriminant_type_metadata = match *type_rep.layout { let discriminant_type_metadata = match type_rep.layout {
layout::Layout::NullablePointer { .. } | layout::Layout::NullablePointer { .. } |
layout::Layout::Univariant { .. } => None, layout::Layout::Univariant { .. } => None,
layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)),
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
// except according to those terms. // except according to those terms.
use llvm::ValueRef; use llvm::ValueRef;
use abi::FnType;
use callee; use callee;
use common::*; use common::*;
use builder::Builder; use builder::Builder;
...@@ -32,10 +33,13 @@ pub fn from_index(index: usize) -> Self { ...@@ -32,10 +33,13 @@ pub fn from_index(index: usize) -> Self {
VirtualIndex(index as u64 + 3) VirtualIndex(index as u64 + 3)
} }
pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef { pub fn get_fn(self, bcx: &Builder<'a, 'tcx>,
llvtable: ValueRef,
fn_ty: &FnType<'tcx>) -> ValueRef {
// Load the data pointer from the object. // Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", Value(llvtable), self); debug!("get_fn({:?}, {:?})", Value(llvtable), self);
let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to());
let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
// Vtable loads are invariant // Vtable loads are invariant
bcx.set_invariant_load(ptr); bcx.set_invariant_load(ptr);
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
use rustc::ty; use rustc::ty;
use rustc::ty::layout::LayoutOf; use rustc::ty::layout::LayoutOf;
use common; use common;
use type_of::LayoutLlvmExt;
use super::MirContext; use super::MirContext;
pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
...@@ -31,21 +32,14 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { ...@@ -31,21 +32,14 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
let ty = mircx.monomorphize(&ty); let ty = mircx.monomorphize(&ty);
debug!("local {} has type {:?}", index, ty); debug!("local {} has type {:?}", index, ty);
if ty.is_scalar() || if mircx.ccx.layout_of(ty).is_llvm_immediate() {
ty.is_box() ||
ty.is_region_ptr() ||
ty.is_simd() ||
mircx.ccx.layout_of(ty).is_zst()
{
// These sorts of types are immediates that we can store // These sorts of types are immediates that we can store
// in an ValueRef without an alloca. // in an ValueRef without an alloca.
assert!(common::type_is_immediate(mircx.ccx, ty) ||
common::type_is_fat_ptr(mircx.ccx, ty));
} else if common::type_is_imm_pair(mircx.ccx, ty) { } else if common::type_is_imm_pair(mircx.ccx, ty) {
// We allow pairs and uses of any of their 2 fields. // We allow pairs and uses of any of their 2 fields.
} else { } else {
// These sorts of types require an alloca. Note that // These sorts of types require an alloca. Note that
// type_is_immediate() may *still* be true, particularly // is_llvm_immediate() may *still* be true, particularly
// for newtypes, but we currently force some types // for newtypes, but we currently force some types
// (e.g. structs) into an alloca unconditionally, just so // (e.g. structs) into an alloca unconditionally, just so
// that we don't have to deal with having two pathways // that we don't have to deal with having two pathways
...@@ -179,9 +173,9 @@ fn visit_local(&mut self, ...@@ -179,9 +173,9 @@ fn visit_local(&mut self,
LvalueContext::StorageLive | LvalueContext::StorageLive |
LvalueContext::StorageDead | LvalueContext::StorageDead |
LvalueContext::Validate | LvalueContext::Validate |
LvalueContext::Inspect |
LvalueContext::Consume => {} LvalueContext::Consume => {}
LvalueContext::Inspect |
LvalueContext::Store | LvalueContext::Store |
LvalueContext::Borrow { .. } | LvalueContext::Borrow { .. } |
LvalueContext::Projection(..) => { LvalueContext::Projection(..) => {
......
...@@ -274,13 +274,22 @@ fn trans_terminator(&mut self, ...@@ -274,13 +274,22 @@ fn trans_terminator(&mut self,
} }
let lvalue = self.trans_lvalue(&bcx, location); let lvalue = self.trans_lvalue(&bcx, location);
let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn); let mut args: &[_] = &[lvalue.llval, lvalue.llextra];
let (drop_fn, need_extra) = match ty.sty { args = &args[..1 + lvalue.has_extra() as usize];
ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), let (drop_fn, fn_ty) = match ty.sty {
false), ty::TyDynamic(..) => {
_ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn);
let sig = common::ty_fn_sig(bcx.ccx, fn_ty);
let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]);
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty)
}
_ => {
(callee::get_fn(bcx.ccx, drop_fn),
FnType::of_instance(bcx.ccx, &drop_fn))
}
}; };
let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize];
do_call(self, bcx, fn_ty, drop_fn, args, do_call(self, bcx, fn_ty, drop_fn, args,
Some((ReturnDest::Nothing, target)), Some((ReturnDest::Nothing, target)),
unwind); unwind);
...@@ -561,15 +570,13 @@ fn trans_terminator(&mut self, ...@@ -561,15 +570,13 @@ fn trans_terminator(&mut self,
(&args[..], None) (&args[..], None)
}; };
for (idx, arg) in first_args.iter().enumerate() { for (i, arg) in first_args.iter().enumerate() {
let mut op = self.trans_operand(&bcx, arg); let mut op = self.trans_operand(&bcx, arg);
if idx == 0 { if i == 0 {
if let Pair(_, meta) = op.val { if let Pair(_, meta) = op.val {
if let Some(ty::InstanceDef::Virtual(_, idx)) = def { if let Some(ty::InstanceDef::Virtual(_, idx)) = def {
let llmeth = meth::VirtualIndex::from_index(idx) llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&bcx, meta); .get_fn(&bcx, meta, &fn_ty));
let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
llfn = Some(bcx.pointercast(llmeth, llty));
} }
} }
} }
...@@ -582,7 +589,7 @@ fn trans_terminator(&mut self, ...@@ -582,7 +589,7 @@ fn trans_terminator(&mut self,
op.val = Ref(tmp.llval, tmp.alignment); op.val = Ref(tmp.llval, tmp.alignment);
} }
self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[idx]); self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]);
} }
if let Some(tup) = untuple { if let Some(tup) = untuple {
self.trans_arguments_untupled(&bcx, tup, &mut llargs, self.trans_arguments_untupled(&bcx, tup, &mut llargs,
......
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr};
use common::const_to_opt_u128; use common::const_to_opt_u128;
use consts; use consts;
use type_of::{self, LayoutLlvmExt}; use type_of::LayoutLlvmExt;
use type_::Type; use type_::Type;
use value::Value; use value::Value;
...@@ -145,7 +145,7 @@ pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> { ...@@ -145,7 +145,7 @@ pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) {
let (a, b) = self.get_pair(ccx); let (a, b) = self.get_pair(ccx);
OperandValue::Pair(a, b) OperandValue::Pair(a, b)
} else if llty == llvalty && common::type_is_immediate(ccx, self.ty) { } else if llty == llvalty && ccx.layout_of(self.ty).is_llvm_immediate() {
// If the types match, we can use the value directly. // If the types match, we can use the value directly.
OperandValue::Immediate(self.llval) OperandValue::Immediate(self.llval)
} else { } else {
...@@ -677,11 +677,12 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, ...@@ -677,11 +677,12 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
} }
C_fat_ptr(self.ccx, base, info) C_fat_ptr(self.ccx, base, info)
} }
mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => {
debug_assert!(common::type_is_immediate(self.ccx, cast_ty));
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
let ll_t_out = self.ccx.layout_of(cast_ty).immediate_llvm_type(self.ccx); let cast_layout = self.ccx.layout_of(cast_ty);
assert!(cast_layout.is_llvm_immediate());
let ll_t_out = cast_layout.immediate_llvm_type(self.ccx);
let llval = operand.llval; let llval = operand.llval;
let signed = match self.ccx.layout_of(operand.ty).abi { let signed = match self.ccx.layout_of(operand.ty).abi {
layout::Abi::Scalar(layout::Int(_, signed)) => signed, layout::Abi::Scalar(layout::Int(_, signed)) => signed,
...@@ -728,8 +729,10 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>, ...@@ -728,8 +729,10 @@ fn const_rvalue(&self, rvalue: &mir::Rvalue<'tcx>,
if common::type_is_fat_ptr(self.ccx, operand.ty) { if common::type_is_fat_ptr(self.ccx, operand.ty) {
let (data_ptr, meta) = operand.get_fat_ptr(self.ccx); let (data_ptr, meta) = operand.get_fat_ptr(self.ccx);
if common::type_is_fat_ptr(self.ccx, cast_ty) { if common::type_is_fat_ptr(self.ccx, cast_ty) {
let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty); let thin_ptr = self.ccx.layout_of(cast_ty)
let data_cast = consts::ptrcast(data_ptr, llcast_ty); .field(self.ccx, abi::FAT_PTR_ADDR);
let data_cast = consts::ptrcast(data_ptr,
thin_ptr.llvm_type(self.ccx));
C_fat_ptr(self.ccx, data_cast, meta) C_fat_ptr(self.ccx, data_cast, meta)
} else { // cast to thin-ptr } else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
...@@ -1091,7 +1094,7 @@ fn trans_const_adt<'a, 'tcx>( ...@@ -1091,7 +1094,7 @@ fn trans_const_adt<'a, 'tcx>(
mir::AggregateKind::Adt(_, index, _, _) => index, mir::AggregateKind::Adt(_, index, _, _) => index,
_ => 0, _ => 0,
}; };
match *l.layout { match l.layout {
layout::Layout::General { .. } => { layout::Layout::General { .. } => {
let discr = match *kind { let discr = match *kind {
mir::AggregateKind::Adt(adt_def, _, _, _) => { mir::AggregateKind::Adt(adt_def, _, _, _) => {
...@@ -1147,7 +1150,7 @@ fn trans_const_adt<'a, 'tcx>( ...@@ -1147,7 +1150,7 @@ fn trans_const_adt<'a, 'tcx>(
/// a two-element struct will locate it at offset 4, and accesses to it /// a two-element struct will locate it at offset 4, and accesses to it
/// will read the wrong memory. /// will read the wrong memory.
fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: layout::FullLayout<'tcx>, layout: layout::TyLayout<'tcx>,
vals: &[Const<'tcx>], vals: &[Const<'tcx>],
discr: Option<Const<'tcx>>) discr: Option<Const<'tcx>>)
-> Const<'tcx> { -> Const<'tcx> {
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
use llvm::{self, ValueRef}; use llvm::{self, ValueRef};
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
use rustc::mir; use rustc::mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
use builder::Builder; use builder::Builder;
use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty};
use consts; use consts;
use type_of::{self, LayoutLlvmExt}; use type_of::LayoutLlvmExt;
use type_::Type; use type_::Type;
use value::Value; use value::Value;
use glue; use glue;
...@@ -54,8 +54,8 @@ fn bitor(self, rhs: Self) -> Self { ...@@ -54,8 +54,8 @@ fn bitor(self, rhs: Self) -> Self {
} }
} }
impl<'a> From<FullLayout<'a>> for Alignment { impl<'a> From<TyLayout<'a>> for Alignment {
fn from(layout: FullLayout) -> Self { fn from(layout: TyLayout) -> Self {
if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi { if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi {
Alignment::Packed(align) Alignment::Packed(align)
} else { } else {
...@@ -86,7 +86,7 @@ pub struct LvalueRef<'tcx> { ...@@ -86,7 +86,7 @@ pub struct LvalueRef<'tcx> {
pub llextra: ValueRef, pub llextra: ValueRef,
/// Monomorphized type of this lvalue, including variant information /// Monomorphized type of this lvalue, including variant information
pub layout: FullLayout<'tcx>, pub layout: TyLayout<'tcx>,
/// Whether this lvalue is known to be aligned according to its layout /// Whether this lvalue is known to be aligned according to its layout
pub alignment: Alignment, pub alignment: Alignment,
...@@ -94,7 +94,7 @@ pub struct LvalueRef<'tcx> { ...@@ -94,7 +94,7 @@ pub struct LvalueRef<'tcx> {
impl<'a, 'tcx> LvalueRef<'tcx> { impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn new_sized(llval: ValueRef, pub fn new_sized(llval: ValueRef,
layout: FullLayout<'tcx>, layout: TyLayout<'tcx>,
alignment: Alignment) alignment: Alignment)
-> LvalueRef<'tcx> { -> LvalueRef<'tcx> {
LvalueRef { LvalueRef {
...@@ -105,7 +105,7 @@ pub fn new_sized(llval: ValueRef, ...@@ -105,7 +105,7 @@ pub fn new_sized(llval: ValueRef,
} }
} }
pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str) pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-> LvalueRef<'tcx> { -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, layout); debug!("alloca({:?}: {:?})", name, layout);
let tmp = bcx.alloca( let tmp = bcx.alloca(
...@@ -114,7 +114,7 @@ pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str) ...@@ -114,7 +114,7 @@ pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str)
} }
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
if let layout::FieldPlacement::Array { count, .. } = *self.layout.fields { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() { if self.layout.is_unsized() {
assert!(self.has_extra()); assert!(self.has_extra());
assert_eq!(count, 0); assert_eq!(count, 0);
...@@ -163,7 +163,7 @@ pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> { ...@@ -163,7 +163,7 @@ pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
OperandValue::Pair( OperandValue::Pair(
self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(),
self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate())
} else if common::type_is_immediate(bcx.ccx, self.layout.ty) { } else if self.layout.is_llvm_immediate() {
let mut const_llval = ptr::null_mut(); let mut const_llval = ptr::null_mut();
unsafe { unsafe {
let global = llvm::LLVMIsAGlobalVariable(self.llval); let global = llvm::LLVMIsAGlobalVariable(self.llval);
...@@ -202,28 +202,15 @@ pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx ...@@ -202,28 +202,15 @@ pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx
let ccx = bcx.ccx; let ccx = bcx.ccx;
let field = self.layout.field(ccx, ix); let field = self.layout.field(ccx, ix);
let offset = self.layout.fields.offset(ix).bytes(); let offset = self.layout.fields.offset(ix).bytes();
let alignment = self.alignment | Alignment::from(self.layout); let alignment = self.alignment | Alignment::from(self.layout);
// Unions and newtypes only use an offset of 0.
let has_llvm_fields = match *self.layout.fields {
layout::FieldPlacement::Union(_) => false,
layout::FieldPlacement::Array { .. } => true,
layout::FieldPlacement::Arbitrary { .. } => {
match self.layout.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false,
layout::Abi::Aggregate { .. } => true
}
}
};
let simple = || { let simple = || {
LvalueRef { LvalueRef {
llval: if has_llvm_fields { // Unions and newtypes only use an offset of 0.
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) llval: if offset == 0 {
} else {
assert_eq!(offset, 0);
bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to()) bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to())
} else {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
}, },
llextra: if ccx.shared().type_has_metadata(field.ty) { llextra: if ccx.shared().type_has_metadata(field.ty) {
self.llextra self.llextra
...@@ -309,7 +296,7 @@ pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx ...@@ -309,7 +296,7 @@ pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx
/// Obtain the actual discriminant of a value. /// Obtain the actual discriminant of a value.
pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
match *self.layout.layout { match self.layout.layout {
layout::Layout::Univariant { .. } | layout::Layout::Univariant { .. } |
layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0),
_ => {} _ => {}
...@@ -320,7 +307,7 @@ pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> Valu ...@@ -320,7 +307,7 @@ pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> Valu
layout::Abi::Scalar(discr) => discr, layout::Abi::Scalar(discr) => discr,
_ => bug!("discriminant not scalar: {:#?}", discr.layout) _ => bug!("discriminant not scalar: {:#?}", discr.layout)
}; };
let (min, max) = match *self.layout.layout { let (min, max) = match self.layout.layout {
layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end),
_ => (0, u64::max_value()), _ => (0, u64::max_value()),
}; };
...@@ -346,7 +333,7 @@ pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> Valu ...@@ -346,7 +333,7 @@ pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> Valu
bcx.load(discr.llval, discr.alignment.non_abi()) bcx.load(discr.llval, discr.alignment.non_abi())
} }
}; };
match *self.layout.layout { match self.layout.layout {
layout::Layout::General { .. } => { layout::Layout::General { .. } => {
let signed = match discr_scalar { let signed = match discr_scalar {
layout::Int(_, signed) => signed, layout::Int(_, signed) => signed,
...@@ -369,7 +356,7 @@ pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) { ...@@ -369,7 +356,7 @@ pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
let to = self.layout.ty.ty_adt_def().unwrap() let to = self.layout.ty.ty_adt_def().unwrap()
.discriminant_for_variant(bcx.tcx(), variant_index) .discriminant_for_variant(bcx.tcx(), variant_index)
.to_u128_unchecked() as u64; .to_u128_unchecked() as u64;
match *self.layout.layout { match self.layout.layout {
layout::Layout::General { .. } => { layout::Layout::General { .. } => {
let ptr = self.project_field(bcx, 0); let ptr = self.project_field(bcx, 0);
bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
...@@ -419,17 +406,9 @@ pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) ...@@ -419,17 +406,9 @@ pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
let mut downcast = *self; let mut downcast = *self;
downcast.layout = self.layout.for_variant(variant_index); downcast.layout = self.layout.for_variant(variant_index);
// If this is an enum, cast to the appropriate variant struct type. // Cast to the appropriate variant struct type.
match *self.layout.layout { let variant_ty = downcast.layout.llvm_type(bcx.ccx);
layout::Layout::NullablePointer { .. } | downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
layout::Layout::General { .. } => {
let variant_ty = Type::struct_(bcx.ccx,
&type_of::struct_llfields(bcx.ccx, downcast.layout),
downcast.layout.is_packed());
downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
}
_ => {}
}
downcast downcast
} }
......
...@@ -12,18 +12,17 @@ ...@@ -12,18 +12,17 @@
use llvm::{self, ValueRef, BasicBlockRef}; use llvm::{self, ValueRef, BasicBlockRef};
use llvm::debuginfo::DIScope; use llvm::debuginfo::DIScope;
use rustc::ty::{self, TypeFoldable}; use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::{LayoutOf, FullLayout}; use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::mir::{self, Mir}; use rustc::mir::{self, Mir};
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
use rustc::infer::TransNormalize; use rustc::infer::TransNormalize;
use rustc::session::config::FullDebugInfo; use rustc::session::config::FullDebugInfo;
use base; use base;
use builder::Builder; use builder::Builder;
use common::{self, CrateContext, Funclet}; use common::{CrateContext, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::Instance; use monomorphize::Instance;
use abi::{ArgAttribute, FnType}; use abi::{ArgAttribute, FnType};
use type_of::{self, LayoutLlvmExt};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords; use syntax::symbol::keywords;
...@@ -85,7 +84,7 @@ pub struct MirContext<'a, 'tcx:'a> { ...@@ -85,7 +84,7 @@ pub struct MirContext<'a, 'tcx:'a> {
/// directly using an `OperandRef`, which makes for tighter LLVM /// directly using an `OperandRef`, which makes for tighter LLVM
/// IR. The conditions for using an `OperandRef` are as follows: /// IR. The conditions for using an `OperandRef` are as follows:
/// ///
/// - the type of the local must be judged "immediate" by `type_is_immediate` /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
/// - the operand must never be referenced indirectly /// - the operand must never be referenced indirectly
/// - we should not take its address using the `&` operator /// - we should not take its address using the `&` operator
/// - nor should it appear in an lvalue path like `tmp.a` /// - nor should it appear in an lvalue path like `tmp.a`
...@@ -177,7 +176,7 @@ enum LocalRef<'tcx> { ...@@ -177,7 +176,7 @@ enum LocalRef<'tcx> {
} }
impl<'a, 'tcx> LocalRef<'tcx> { impl<'a, 'tcx> LocalRef<'tcx> {
fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> LocalRef<'tcx> { fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> {
if layout.is_zst() { if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which // Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but // doesn't matter because they don't contain data, but
...@@ -448,32 +447,14 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ...@@ -448,32 +447,14 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none()); assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none());
assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none()); assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none());
let mut a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(a, &(name.clone() + ".0"));
llarg_idx += 1; llarg_idx += 1;
let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
bcx.set_value_name(b, &(name + ".1"));
llarg_idx += 1; llarg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg.layout.ty) {
// FIXME(eddyb) As we can't perfectly represent the data and/or
// vtable pointer in a fat pointers in Rust's typesystem, and
// because we split fat pointers into two ArgType's, they're
// not the right type so we have to cast them for now.
let pointee = match arg.layout.ty.sty {
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty,
ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(),
_ => bug!()
};
let data_llty = bcx.ccx.layout_of(pointee).llvm_type(bcx.ccx);
let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee);
a = bcx.pointercast(a, data_llty.ptr_to());
bcx.set_value_name(a, &(name.clone() + ".ptr"));
b = bcx.pointercast(b, meta_llty);
bcx.set_value_name(b, &(name + ".meta"));
}
return LocalRef::Operand(Some(OperandRef { return LocalRef::Operand(Some(OperandRef {
val: OperandValue::Pair(a, b), val: OperandValue::Pair(a, b),
layout: arg.layout layout: arg.layout
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
use llvm::ValueRef; use llvm::ValueRef;
use rustc::ty; use rustc::ty;
use rustc::ty::layout::{LayoutOf, FullLayout}; use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::mir; use rustc::mir;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;
...@@ -71,7 +71,7 @@ pub struct OperandRef<'tcx> { ...@@ -71,7 +71,7 @@ pub struct OperandRef<'tcx> {
pub val: OperandValue, pub val: OperandValue,
// The layout of value, based on its Rust type. // The layout of value, based on its Rust type.
pub layout: FullLayout<'tcx>, pub layout: TyLayout<'tcx>,
} }
impl<'tcx> fmt::Debug for OperandRef<'tcx> { impl<'tcx> fmt::Debug for OperandRef<'tcx> {
...@@ -82,7 +82,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { ...@@ -82,7 +82,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
impl<'a, 'tcx> OperandRef<'tcx> { impl<'a, 'tcx> OperandRef<'tcx> {
pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, pub fn new_zst(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> OperandRef<'tcx> { layout: TyLayout<'tcx>) -> OperandRef<'tcx> {
assert!(layout.is_zst()); assert!(layout.is_zst());
let llty = layout.llvm_type(ccx); let llty = layout.llvm_type(ccx);
// FIXME(eddyb) ZSTs should always be immediate, not pairs. // FIXME(eddyb) ZSTs should always be immediate, not pairs.
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
use rustc_const_math::MAX_F32_PLUS_HALF_ULP; use rustc_const_math::MAX_F32_PLUS_HALF_ULP;
use std::{u128, i128}; use std::{u128, i128};
use abi;
use base; use base;
use builder::Builder; use builder::Builder;
use callee; use callee;
...@@ -26,7 +27,7 @@ ...@@ -26,7 +27,7 @@
use consts; use consts;
use monomorphize; use monomorphize;
use type_::Type; use type_::Type;
use type_of::{self, LayoutLlvmExt}; use type_of::LayoutLlvmExt;
use value::Value; use value::Value;
use super::{MirContext, LocalRef}; use super::{MirContext, LocalRef};
...@@ -234,8 +235,8 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -234,8 +235,8 @@ pub fn trans_rvalue_operand(&mut self,
// &'a fmt::Debug+Send => &'a fmt::Debug, // &'a fmt::Debug+Send => &'a fmt::Debug,
// So we need to pointercast the base to ensure // So we need to pointercast the base to ensure
// the types match up. // the types match up.
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR);
let lldata = bcx.pointercast(lldata, llcast_ty); let lldata = bcx.pointercast(lldata, thin_ptr.llvm_type(bcx.ccx));
OperandValue::Pair(lldata, llextra) OperandValue::Pair(lldata, llextra)
} }
OperandValue::Immediate(lldata) => { OperandValue::Immediate(lldata) => {
...@@ -253,8 +254,9 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -253,8 +254,9 @@ pub fn trans_rvalue_operand(&mut self,
mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => { mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => {
if let OperandValue::Pair(data_ptr, meta) = operand.val { if let OperandValue::Pair(data_ptr, meta) = operand.val {
if common::type_is_fat_ptr(bcx.ccx, cast.ty) { if common::type_is_fat_ptr(bcx.ccx, cast.ty) {
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR);
let data_cast = bcx.pointercast(data_ptr, llcast_ty); let data_cast = bcx.pointercast(data_ptr,
thin_ptr.llvm_type(bcx.ccx));
OperandValue::Pair(data_cast, meta) OperandValue::Pair(data_cast, meta)
} else { // cast to thin-ptr } else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
...@@ -268,7 +270,7 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -268,7 +270,7 @@ pub fn trans_rvalue_operand(&mut self,
} }
} }
mir::CastKind::Misc => { mir::CastKind::Misc => {
debug_assert!(common::type_is_immediate(bcx.ccx, cast.ty)); assert!(cast.is_llvm_immediate());
let r_t_in = CastTy::from_ty(operand.layout.ty) let r_t_in = CastTy::from_ty(operand.layout.ty)
.expect("bad input type for cast"); .expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
...@@ -276,7 +278,7 @@ pub fn trans_rvalue_operand(&mut self, ...@@ -276,7 +278,7 @@ pub fn trans_rvalue_operand(&mut self,
let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let ll_t_out = cast.immediate_llvm_type(bcx.ccx);
let llval = operand.immediate(); let llval = operand.immediate();
if let Layout::General { ref discr_range, .. } = *operand.layout.layout { if let Layout::General { ref discr_range, .. } = operand.layout.layout {
if discr_range.end > discr_range.start { if discr_range.end > discr_range.start {
// We want `table[e as usize]` to not // We want `table[e as usize]` to not
// have bound checks, and this is the most // have bound checks, and this is the most
......
...@@ -207,10 +207,6 @@ pub fn vector(ty: &Type, len: u64) -> Type { ...@@ -207,10 +207,6 @@ pub fn vector(ty: &Type, len: u64) -> Type {
ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint)) ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint))
} }
pub fn vtable_ptr(ccx: &CrateContext) -> Type {
Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to()
}
pub fn kind(&self) -> TypeKind { pub fn kind(&self) -> TypeKind {
unsafe { unsafe {
llvm::LLVMRustGetTypeKind(self.to_ref()) llvm::LLVMRustGetTypeKind(self.to_ref())
......
...@@ -11,131 +11,68 @@ ...@@ -11,131 +11,68 @@
use abi::FnType; use abi::FnType;
use common::*; use common::*;
use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, FullLayout}; use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, TyLayout};
use trans_item::DefPathBasedNames; use trans_item::DefPathBasedNames;
use type_::Type; use type_::Type;
use syntax::ast; use std::fmt::Write;
pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
match ty.sty { layout: TyLayout<'tcx>,
ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | defer: &mut Option<(Type, TyLayout<'tcx>)>)
ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => {
ccx.layout_of(t).llvm_type(ccx).ptr_to()
}
ty::TyAdt(def, _) if def.is_box() => {
ccx.layout_of(ty.boxed_ty()).llvm_type(ccx).ptr_to()
}
_ => bug!("expected fat ptr ty but got {:?}", ty)
}
}
pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
let unsized_part = ccx.tcx().struct_tail(ty);
match unsized_part.sty {
ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
Type::uint_from_ty(ccx, ast::UintTy::Us)
}
ty::TyDynamic(..) => Type::vtable_ptr(ccx),
_ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}",
unsized_part, ty)
}
}
fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>,
defer: &mut Option<(Type, FullLayout<'tcx>)>)
-> Type { -> Type {
let ptr_ty = |ty: Ty<'tcx>| { match layout.abi {
if cx.shared().type_has_metadata(ty) { layout::Abi::Scalar(_) => bug!("handled elsewhere"),
if let ty::TyStr = ty.sty { layout::Abi::Vector { .. } => {
// This means we get a nicer name in the output (str is always return Type::vector(&layout.field(ccx, 0).llvm_type(ccx),
// unsized). layout.fields.count() as u64);
cx.str_slice_type()
} else {
let ptr_ty = cx.layout_of(ty).llvm_type(cx).ptr_to();
let info_ty = unsized_info_ty(cx, ty);
Type::struct_(cx, &[
Type::array(&Type::i8(cx), 0),
ptr_ty,
Type::array(&Type::i8(cx), 0),
info_ty,
Type::array(&Type::i8(cx), 0)
], false)
}
} else {
cx.layout_of(ty).llvm_type(cx).ptr_to()
}
};
match ty.sty {
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
return ptr_ty(ty);
} }
ty::TyAdt(def, _) if def.is_box() => { layout::Abi::Aggregate { .. } => {}
return ptr_ty(ty.boxed_ty());
}
ty::TyFnPtr(sig) => {
let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig);
return FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to();
}
_ => {}
} }
let layout = cx.layout_of(ty); let name = match layout.ty.sty {
if let layout::Abi::Scalar(value) = layout.abi { ty::TyClosure(..) |
let llty = match value { ty::TyGenerator(..) |
layout::Int(layout::I1, _) => Type::i8(cx), ty::TyAdt(..) |
layout::Int(i, _) => Type::from_integer(cx, i), ty::TyDynamic(..) |
layout::F32 => Type::f32(cx), ty::TyForeign(..) |
layout::F64 => Type::f64(cx), ty::TyStr => {
layout::Pointer => {
cx.layout_of(layout::Pointer.to_ty(cx.tcx())).llvm_type(cx)
}
};
return llty;
}
if let layout::Abi::Vector { .. } = layout.abi {
return Type::vector(&layout.field(cx, 0).llvm_type(cx),
layout.fields.count() as u64);
}
let name = match ty.sty {
ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) => {
let mut name = String::with_capacity(32); let mut name = String::with_capacity(32);
let printer = DefPathBasedNames::new(cx.tcx(), true, true); let printer = DefPathBasedNames::new(ccx.tcx(), true, true);
printer.push_type_name(ty, &mut name); printer.push_type_name(layout.ty, &mut name);
if let (&ty::TyAdt(def, _), Some(v)) = (&layout.ty.sty, layout.variant_index) {
write!(&mut name, "::{}", def.variants[v].name).unwrap();
}
Some(name) Some(name)
} }
_ => None _ => None
}; };
match *layout.fields { match layout.fields {
layout::FieldPlacement::Union(_) => { layout::FieldPlacement::Union(_) => {
let size = layout.size(cx).bytes(); let size = layout.size(ccx).bytes();
let fill = Type::array(&Type::i8(cx), size); let fill = Type::array(&Type::i8(ccx), size);
match name { match name {
None => { None => {
Type::struct_(cx, &[fill], layout.is_packed()) Type::struct_(ccx, &[fill], layout.is_packed())
} }
Some(ref name) => { Some(ref name) => {
let mut llty = Type::named_struct(cx, name); let mut llty = Type::named_struct(ccx, name);
llty.set_struct_body(&[fill], layout.is_packed()); llty.set_struct_body(&[fill], layout.is_packed());
llty llty
} }
} }
} }
layout::FieldPlacement::Array { count, .. } => { layout::FieldPlacement::Array { count, .. } => {
Type::array(&layout.field(cx, 0).llvm_type(cx), count) Type::array(&layout.field(ccx, 0).llvm_type(ccx), count)
} }
layout::FieldPlacement::Arbitrary { .. } => { layout::FieldPlacement::Arbitrary { .. } => {
match name { match name {
None => { None => {
Type::struct_(cx, &struct_llfields(cx, layout), layout.is_packed()) Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed())
} }
Some(ref name) => { Some(ref name) => {
let llty = Type::named_struct(cx, name); let llty = Type::named_struct(ccx, name);
*defer = Some((llty, layout)); *defer = Some((llty, layout));
llty llty
} }
...@@ -144,37 +81,37 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ...@@ -144,37 +81,37 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
} }
} }
pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: FullLayout<'tcx>) -> Vec<Type> { layout: TyLayout<'tcx>) -> Vec<Type> {
debug!("struct_llfields: {:#?}", layout); debug!("struct_llfields: {:#?}", layout);
let align = layout.align(cx); let align = layout.align(ccx);
let size = layout.size(cx); let size = layout.size(ccx);
let field_count = layout.fields.count(); let field_count = layout.fields.count();
let mut offset = Size::from_bytes(0); let mut offset = Size::from_bytes(0);
let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2); let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() { for i in layout.fields.index_by_increasing_offset() {
let field = layout.field(cx, i); let field = layout.field(ccx, i);
let target_offset = layout.fields.offset(i as usize); let target_offset = layout.fields.offset(i as usize);
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}",
i, field, offset, target_offset); i, field, offset, target_offset);
assert!(target_offset >= offset); assert!(target_offset >= offset);
let padding = target_offset - offset; let padding = target_offset - offset;
result.push(Type::array(&Type::i8(cx), padding.bytes())); result.push(Type::array(&Type::i8(ccx), padding.bytes()));
debug!(" padding before: {:?}", padding); debug!(" padding before: {:?}", padding);
result.push(field.llvm_type(cx)); result.push(field.llvm_type(ccx));
if layout.is_packed() { if layout.is_packed() {
assert_eq!(padding.bytes(), 0); assert_eq!(padding.bytes(), 0);
} else { } else {
let field_align = field.align(cx); let field_align = field.align(ccx);
assert!(field_align.abi() <= align.abi(), assert!(field_align.abi() <= align.abi(),
"non-packed type has field with larger align ({}): {:#?}", "non-packed type has field with larger align ({}): {:#?}",
field_align.abi(), layout); field_align.abi(), layout);
} }
offset = target_offset + field.size(cx); offset = target_offset + field.size(ccx);
} }
if !layout.is_unsized() && field_count > 0 { if !layout.is_unsized() && field_count > 0 {
if offset > size { if offset > size {
...@@ -184,7 +121,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ...@@ -184,7 +121,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let padding = size - offset; let padding = size - offset;
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, size); padding, offset, size);
result.push(Type::array(&Type::i8(cx), padding.bytes())); result.push(Type::array(&Type::i8(ccx), padding.bytes()));
assert!(result.len() == 1 + field_count * 2); assert!(result.len() == 1 + field_count * 2);
} else { } else {
debug!("struct_llfields: offset: {:?} stride: {:?}", debug!("struct_llfields: offset: {:?} stride: {:?}",
...@@ -210,13 +147,22 @@ pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { ...@@ -210,13 +147,22 @@ pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
} }
pub trait LayoutLlvmExt<'tcx> { pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_immediate(&self) -> bool;
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn over_align(&self, ccx: &CrateContext) -> Option<Align>; fn over_align(&self, ccx: &CrateContext) -> Option<Align>;
fn llvm_field_index(&self, index: usize) -> u64; fn llvm_field_index(&self, index: usize) -> u64;
} }
impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.abi {
layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true,
layout::Abi::Aggregate { .. } => self.is_zst()
}
}
/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
/// The pointee type of the pointer in `LvalueRef` is always this type. /// The pointee type of the pointer in `LvalueRef` is always this type.
/// For sized types, it is also the right LLVM type for an `alloca` /// For sized types, it is also the right LLVM type for an `alloca`
...@@ -229,8 +175,42 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { ...@@ -229,8 +175,42 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> {
/// of that field's type - this is useful for taking the address of /// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment. /// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
if let layout::Abi::Scalar(value) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) {
return llty;
}
let llty = match value {
layout::Int(layout::I1, _) => Type::i8(ccx),
layout::Int(i, _) => Type::from_integer(ccx, i),
layout::F32 => Type::f32(ccx),
layout::F64 => Type::f64(ccx),
layout::Pointer => {
let pointee = match self.ty.sty {
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
ccx.layout_of(ty).llvm_type(ccx)
}
ty::TyAdt(def, _) if def.is_box() => {
ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx)
}
ty::TyFnPtr(sig) => {
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
FnType::new(ccx, sig, &[]).llvm_type(ccx)
}
_ => Type::i8(ccx)
};
pointee.ptr_to()
}
};
ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty);
return llty;
}
// Check the cache. // Check the cache.
if let Some(&llty) = ccx.lltypes().borrow().get(&self.ty) { if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, self.variant_index)) {
return llty; return llty;
} }
...@@ -244,13 +224,17 @@ fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { ...@@ -244,13 +224,17 @@ fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
let mut defer = None; let mut defer = None;
let llty = if self.ty != normal_ty { let llty = if self.ty != normal_ty {
ccx.layout_of(normal_ty).llvm_type(ccx) let mut layout = ccx.layout_of(normal_ty);
if let Some(v) = self.variant_index {
layout = layout.for_variant(v);
}
layout.llvm_type(ccx)
} else { } else {
uncached_llvm_type(ccx, self.ty, &mut defer) uncached_llvm_type(ccx, *self, &mut defer)
}; };
debug!("--> mapped {:#?} to llty={:?}", self, llty); debug!("--> mapped {:#?} to llty={:?}", self, llty);
ccx.lltypes().borrow_mut().insert(self.ty, llty); ccx.lltypes().borrow_mut().insert((self.ty, self.variant_index), llty);
if let Some((mut llty, layout)) = defer { if let Some((mut llty, layout)) = defer {
llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed()) llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed())
...@@ -279,11 +263,11 @@ fn over_align(&self, ccx: &CrateContext) -> Option<Align> { ...@@ -279,11 +263,11 @@ fn over_align(&self, ccx: &CrateContext) -> Option<Align> {
fn llvm_field_index(&self, index: usize) -> u64 { fn llvm_field_index(&self, index: usize) -> u64 {
if let layout::Abi::Scalar(_) = self.abi { if let layout::Abi::Scalar(_) = self.abi {
bug!("FullLayout::llvm_field_index({:?}): not applicable", self); bug!("TyLayout::llvm_field_index({:?}): not applicable", self);
} }
match *self.fields { match self.fields {
layout::FieldPlacement::Union(_) => { layout::FieldPlacement::Union(_) => {
bug!("FullLayout::llvm_field_index({:?}): not applicable", self) bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
} }
layout::FieldPlacement::Array { .. } => { layout::FieldPlacement::Array { .. } => {
......
...@@ -24,10 +24,9 @@ pub fn helper(_: usize) { ...@@ -24,10 +24,9 @@ pub fn helper(_: usize) {
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so // We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot // check that we copy directly to the return value slot
// CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]* // CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1
// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.ptr, 1 // CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3
// CHECK: %2 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1, [[USIZE]] %x.meta, 3 // CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1
// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %2
{ x } { x }
} }
......
...@@ -97,43 +97,43 @@ pub fn struct_return() -> S { ...@@ -97,43 +97,43 @@ pub fn struct_return() -> S {
pub fn helper(_: usize) { pub fn helper(_: usize) {
} }
// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) // CHECK: @slice([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
pub fn slice(_: &[u8]) { pub fn slice(_: &[u8]) {
} }
// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) // CHECK: @mutable_slice([0 x i8]* nonnull %arg0.0, [[USIZE]] %arg0.1)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
// ... there's this LLVM bug that forces us to not use noalias, see #29485 // ... there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle] #[no_mangle]
pub fn mutable_slice(_: &mut [u8]) { pub fn mutable_slice(_: &mut [u8]) {
} }
// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) // CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1)
// unsafe interior means this isn't actually readonly and there may be aliases ... // unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle] #[no_mangle]
pub fn unsafe_slice(_: &[UnsafeInner]) { pub fn unsafe_slice(_: &[UnsafeInner]) {
} }
// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) // CHECK: @str([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
pub fn str(_: &[u8]) { pub fn str(_: &[u8]) {
} }
// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly) // CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
pub fn trait_borrow(_: &Drop) { pub fn trait_borrow(_: &Drop) {
} }
// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly) // CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly)
#[no_mangle] #[no_mangle]
pub fn trait_box(_: Box<Drop>) { pub fn trait_box(_: Box<Drop>) {
} }
// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) // CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1)
#[no_mangle] #[no_mangle]
pub fn return_slice(x: &[u16]) -> &[u16] { pub fn return_slice(x: &[u16]) -> &[u16] {
x x
......
...@@ -24,10 +24,10 @@ pub fn helper(_: usize) { ...@@ -24,10 +24,10 @@ pub fn helper(_: usize) {
pub fn ref_dst(s: &[u8]) { pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x" // directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 1 // CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]**
// CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]] // CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3
// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] // CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]]
let x = &*s; let x = &*s;
&x; // keep variable in an alloca &x; // keep variable in an alloca
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册