提交 f50fd075 编写于 作者: B bors

Auto merge of #45225 - eddyb:trans-abi, r=arielb1

Refactor type memory layouts and ABIs, to be more general and easier to optimize.

To combat combinatorial explosion, type layouts are now described through 3 orthogonal properties:
* `Variants` describes the plurality of sum types (where applicable)
  * `Single` is for one inhabited/active variant, including all C `struct`s and `union`s
  * `Tagged` has its variants discriminated by an integer tag, including C `enum`s
  * `NicheFilling` uses otherwise-invalid values ("niches") for all but one of its inhabited variants
* `FieldPlacement` describes the number and memory offsets of fields (if any)
  * `Union` has all its fields at offset `0`
  * `Array` has offsets that are a multiple of its `stride`; guarantees all fields have one type
  * `Arbitrary` records all the field offsets, which can be out-of-order
* `Abi` describes how values of the type should be passed around, including for FFI
  * `Uninhabited` corresponds to no values, associated with unreachable control-flow
  * `Scalar` is ABI-identical to its only integer/floating-point/pointer "scalar component"
  * `ScalarPair` has two "scalar components", but only applies to the Rust ABI
  * `Vector` is for SIMD vectors, typically `#[repr(simd)]` `struct`s in Rust
  * `Aggregate` has arbitrary contents, including all non-transparent C `struct`s and `union`s

Size optimizations implemented so far:
* ignoring uninhabited variants (i.e. containing uninhabited fields), e.g.:
  * `Option<!>` is 0 bytes
  * `Result<T, !>` has the same size as `T`
* using arbitrary niches, not just `0`, to represent a data-less variant, e.g.:
  * `Option<bool>`, `Option<Option<bool>>`, `Option<Ordering>` are all 1 byte
  * `Option<char>` is 4 bytes
* using a range of niches to represent *multiple* data-less variants, e.g.:
  * `enum E { A(bool), B, C, D }` is 1 byte

Code generation now takes advantage of `Scalar` and `ScalarPair` to, in more cases, pass around scalar components as immediates instead of indirectly, through pointers into temporary memory, while avoiding LLVM's "first-class aggregates", and there's more untapped potential here.

Closes #44426, fixes #5977, fixes #14540, fixes #43278.
......@@ -151,7 +151,7 @@ fn pointer(&mut self) -> *mut T {
unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> {
let p = b.ptr as *mut T;
mem::forget(b);
mem::transmute(p)
Box::from_raw(p)
}
fn make_place<T>() -> IntermediateBox<T> {
......@@ -300,7 +300,10 @@ pub unsafe fn from_raw(raw: *mut T) -> Self {
issue = "27730")]
#[inline]
pub unsafe fn from_unique(u: Unique<T>) -> Self {
mem::transmute(u)
#[cfg(stage0)]
return mem::transmute(u);
#[cfg(not(stage0))]
return Box(u);
}
/// Consumes the `Box`, returning the wrapped raw pointer.
......@@ -362,7 +365,14 @@ pub fn into_raw(b: Box<T>) -> *mut T {
issue = "27730")]
#[inline]
pub fn into_unique(b: Box<T>) -> Unique<T> {
unsafe { mem::transmute(b) }
#[cfg(stage0)]
return unsafe { mem::transmute(b) };
#[cfg(not(stage0))]
return {
let unique = b.0;
mem::forget(b);
unique
};
}
}
......@@ -627,7 +637,7 @@ impl Box<Any + Send> {
pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any + Send>> {
<Box<Any>>::downcast(self).map_err(|s| unsafe {
// reapply the Send marker
mem::transmute::<Box<Any>, Box<Any + Send>>(s)
Box::from_raw(Box::into_raw(s) as *mut (Any + Send))
})
}
}
......
......@@ -46,11 +46,13 @@
#![feature(const_fn)]
#![feature(core_intrinsics)]
#![feature(drain_filter)]
#![feature(i128)]
#![feature(i128_type)]
#![feature(match_default_bindings)]
#![feature(inclusive_range)]
#![feature(inclusive_range_syntax)]
#![cfg_attr(windows, feature(libc))]
#![feature(macro_vis_matcher)]
#![feature(match_default_bindings)]
#![feature(never_type)]
#![feature(nonzero)]
#![feature(quote)]
......
......@@ -34,7 +34,8 @@
use rustc_serialize::{Decoder, Decodable, Encoder, Encodable};
use session::{config, early_error, Session};
use traits::Reveal;
use ty::{self, TyCtxt};
use ty::{self, TyCtxt, Ty};
use ty::layout::{LayoutError, LayoutOf, TyLayout};
use util::nodemap::FxHashMap;
use std::default::Default as StdDefault;
......@@ -626,6 +627,14 @@ fn with_param_env<F>(&mut self, id: ast::NodeId, f: F)
}
}
impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a LateContext<'a, 'tcx> {
type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(self.tcx, self.param_env.reveal_all()).layout_of(ty)
}
}
impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> {
/// Because lints are scoped lexically, we want to walk nested
/// items in the context of the outer item, so enable
......
......@@ -210,7 +210,7 @@ fn resolve_field(&self, field_name: FieldName) -> Option<(&'tcx ty::AdtDef, &'tc
adt_def.variant_with_id(variant_did)
}
_ => {
assert!(adt_def.is_univariant());
assert_eq!(adt_def.variants.len(), 1);
&adt_def.variants[0]
}
};
......@@ -1096,7 +1096,7 @@ pub fn cat_downcast_if_needed<N:ast_node>(&self,
-> cmt<'tcx> {
// univariant enums do not need downcasts
let base_did = self.tcx.parent_def_id(variant_did).unwrap();
if !self.tcx.adt_def(base_did).is_univariant() {
if self.tcx.adt_def(base_did).variants.len() != 1 {
let base_ty = base_cmt.ty;
let ret = Rc::new(cmt_ {
id: node.id(),
......
......@@ -41,7 +41,7 @@
use ty::RegionKind;
use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
use ty::TypeVariants::*;
use ty::layout::{Layout, TargetDataLayout};
use ty::layout::{LayoutDetails, TargetDataLayout};
use ty::maps;
use ty::steal::Steal;
use ty::BindingMode;
......@@ -78,7 +78,7 @@
/// Internal storage
pub struct GlobalArenas<'tcx> {
// internings
layout: TypedArena<Layout>,
layout: TypedArena<LayoutDetails>,
// references
generics: TypedArena<ty::Generics>,
......@@ -918,7 +918,7 @@ pub struct GlobalCtxt<'tcx> {
stability_interner: RefCell<FxHashSet<&'tcx attr::Stability>>,
layout_interner: RefCell<FxHashSet<&'tcx Layout>>,
layout_interner: RefCell<FxHashSet<&'tcx LayoutDetails>>,
/// A vector of every trait accessible in the whole crate
/// (i.e. including those from subcrates). This is used only for
......@@ -1016,7 +1016,7 @@ pub fn intern_stability(self, stab: attr::Stability) -> &'gcx attr::Stability {
interned
}
pub fn intern_layout(self, layout: Layout) -> &'gcx Layout {
pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails {
if let Some(layout) = self.layout_interner.borrow().get(&layout) {
return layout;
}
......
此差异已折叠。
......@@ -34,7 +34,6 @@
use traits::Vtable;
use traits::specialization_graph;
use ty::{self, CrateInherentImpls, Ty, TyCtxt};
use ty::layout::{Layout, LayoutError};
use ty::steal::Steal;
use ty::subst::Substs;
use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet};
......@@ -265,7 +264,8 @@
[] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
[] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
[] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-> Result<&'tcx Layout, LayoutError<'tcx>>,
-> Result<&'tcx ty::layout::LayoutDetails,
ty::layout::LayoutError<'tcx>>,
[] fn dylib_dependency_formats: DylibDepFormats(CrateNum)
-> Rc<Vec<(CrateNum, LinkagePreference)>>,
......
......@@ -1674,11 +1674,6 @@ pub fn all_fields<'s>(&'s self) -> impl Iterator<Item = &'s FieldDef> {
self.variants.iter().flat_map(|v| v.fields.iter())
}
#[inline]
pub fn is_univariant(&self) -> bool {
self.variants.len() == 1
}
pub fn is_payloadfree(&self) -> bool {
!self.variants.is_empty() &&
self.variants.iter().all(|v| v.fields.is_empty())
......@@ -2622,9 +2617,10 @@ fn original_crate_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
pub fn provide(providers: &mut ty::maps::Providers) {
util::provide(providers);
context::provide(providers);
erase_regions::provide(providers);
layout::provide(providers);
util::provide(providers);
*providers = ty::maps::Providers {
associated_item,
associated_item_def_ids,
......
......@@ -19,7 +19,6 @@
use traits::{self, Reveal};
use ty::{self, Ty, TyCtxt, TypeFoldable};
use ty::fold::TypeVisitor;
use ty::layout::{Layout, LayoutError};
use ty::subst::{Subst, Kind};
use ty::TypeVariants::*;
use util::common::ErrorReported;
......@@ -852,30 +851,6 @@ pub fn needs_drop(&'tcx self,
tcx.needs_drop_raw(param_env.and(self))
}
/// Computes the layout of a type. Note that this implicitly
/// executes in "reveal all" mode.
#[inline]
pub fn layout<'lcx>(&'tcx self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>)
-> Result<&'tcx Layout, LayoutError<'tcx>> {
let ty = tcx.erase_regions(&self);
let layout = tcx.layout_raw(param_env.reveal_all().and(ty));
// NB: This recording is normally disabled; when enabled, it
// can however trigger recursive invocations of `layout()`.
// Therefore, we execute it *after* the main query has
// completed, to avoid problems around recursive structures
// and the like. (Admitedly, I wasn't able to reproduce a problem
// here, but it seems like the right thing to do. -nmatsakis)
if let Ok(l) = layout {
Layout::record_layout_for_printing(tcx, ty, param_env, l);
}
layout
}
/// Check whether a type is representable. This means it cannot contain unboxed
/// structural recursion. This check is needed for structs and enums.
pub fn is_representable(&'tcx self,
......@@ -1184,26 +1159,6 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-> Result<&'tcx Layout, LayoutError<'tcx>>
{
let (param_env, ty) = query.into_parts();
let rec_limit = tcx.sess.recursion_limit.get();
let depth = tcx.layout_depth.get();
if depth > rec_limit {
tcx.sess.fatal(
&format!("overflow representing the type `{}`", ty));
}
tcx.layout_depth.set(depth+1);
let layout = Layout::compute_uncached(tcx, param_env, ty);
tcx.layout_depth.set(depth);
layout
}
pub enum ExplicitSelf<'tcx> {
ByValue,
ByReference(ty::Region<'tcx>, hir::Mutability),
......@@ -1262,7 +1217,6 @@ pub fn provide(providers: &mut ty::maps::Providers) {
is_sized_raw,
is_freeze_raw,
needs_drop_raw,
layout_raw,
..*providers
};
}
......@@ -255,7 +255,7 @@ fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> usize {
match self {
&Variant(vid) => adt.variant_index_with_id(vid),
&Single => {
assert_eq!(adt.variants.len(), 1);
assert!(!adt.is_enum());
0
}
_ => bug!("bad constructor {:?} for adt {:?}", self, adt)
......@@ -356,7 +356,7 @@ fn apply_constructor<'a>(
}).collect();
if let ty::TyAdt(adt, substs) = ty.sty {
if adt.variants.len() > 1 {
if adt.is_enum() {
PatternKind::Variant {
adt_def: adt,
substs,
......@@ -444,7 +444,7 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>,
(0..pcx.max_slice_length+1).map(|length| Slice(length)).collect()
}
}
ty::TyAdt(def, substs) if def.is_enum() && def.variants.len() != 1 => {
ty::TyAdt(def, substs) if def.is_enum() => {
def.variants.iter()
.filter(|v| !cx.is_variant_uninhabited(v, substs))
.map(|v| Variant(v.did))
......
......@@ -17,6 +17,7 @@
use rustc::hir::def::{Def, CtorKind};
use rustc::hir::def_id::DefId;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::LayoutOf;
use rustc::ty::maps::Providers;
use rustc::ty::util::IntTypeExt;
use rustc::ty::subst::{Substs, Subst};
......@@ -313,18 +314,18 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>,
if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic {
let layout_of = |ty: Ty<'tcx>| {
let ty = tcx.erase_regions(&ty);
tcx.at(e.span).layout_raw(cx.param_env.reveal_all().and(ty)).map_err(|err| {
(tcx.at(e.span), cx.param_env).layout_of(ty).map_err(|err| {
ConstEvalErr { span: e.span, kind: LayoutError(err) }
})
};
match &tcx.item_name(def_id)[..] {
"size_of" => {
let size = layout_of(substs.type_at(0))?.size(tcx).bytes();
let size = layout_of(substs.type_at(0))?.size.bytes();
return Ok(mk_const(Integral(Usize(ConstUsize::new(size,
tcx.sess.target.usize_ty).unwrap()))));
}
"min_align_of" => {
let align = layout_of(substs.type_at(0))?.align(tcx).abi();
let align = layout_of(substs.type_at(0))?.align.abi();
return Ok(mk_const(Integral(Usize(ConstUsize::new(align,
tcx.sess.target.usize_ty).unwrap()))));
}
......
......@@ -150,7 +150,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Some(&adt_def.variants[variant_index])
}
_ => if let ty::TyAdt(adt, _) = self.ty.sty {
if adt.is_univariant() {
if !adt.is_enum() {
Some(&adt.variants[0])
} else {
None
......@@ -598,7 +598,7 @@ fn lower_variant_or_leaf(
Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => {
let enum_id = self.tcx.parent_def_id(variant_id).unwrap();
let adt_def = self.tcx.adt_def(enum_id);
if adt_def.variants.len() > 1 {
if adt_def.is_enum() {
let substs = match ty.sty {
ty::TyAdt(_, substs) |
ty::TyFnDef(_, substs) => substs,
......
......@@ -13,7 +13,7 @@
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::ty::{self, AdtKind, Ty, TyCtxt};
use rustc::ty::layout::{Layout, Primitive};
use rustc::ty::layout::{self, LayoutOf};
use middle::const_val::ConstVal;
use rustc_const_eval::ConstContext;
use util::nodemap::FxHashSet;
......@@ -748,25 +748,23 @@ fn check_item(&mut self, cx: &LateContext, it: &hir::Item) {
// sizes only make sense for non-generic types
let item_def_id = cx.tcx.hir.local_def_id(it.id);
let t = cx.tcx.type_of(item_def_id);
let param_env = cx.param_env.reveal_all();
let ty = cx.tcx.erase_regions(&t);
let layout = ty.layout(cx.tcx, param_env).unwrap_or_else(|e| {
let layout = cx.layout_of(ty).unwrap_or_else(|e| {
bug!("failed to get layout for `{}`: {}", t, e)
});
if let Layout::General { ref variants, ref size, discr, .. } = *layout {
let discr_size = Primitive::Int(discr).size(cx.tcx).bytes();
if let layout::Variants::Tagged { ref variants, ref discr, .. } = layout.variants {
let discr_size = discr.value.size(cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
t, size.bytes(), layout);
t, layout.size.bytes(), layout);
let (largest, slargest, largest_index) = enum_definition.variants
.iter()
.zip(variants)
.map(|(variant, variant_layout)| {
// Subtract the size of the enum discriminant
let bytes = variant_layout.min_size
.bytes()
let bytes = variant_layout.size.bytes()
.saturating_sub(discr_size);
debug!("- variant `{}` is {} bytes large", variant.node.name, bytes);
......
......@@ -575,8 +575,6 @@ pub fn LLVMStructTypeInContext(C: ContextRef,
ElementCount: c_uint,
Packed: Bool)
-> TypeRef;
pub fn LLVMCountStructElementTypes(StructTy: TypeRef) -> c_uint;
pub fn LLVMGetStructElementTypes(StructTy: TypeRef, Dest: *mut TypeRef);
pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool;
// Operations on array, pointer, and vector types (sequence types)
......@@ -585,7 +583,6 @@ pub fn LLVMStructTypeInContext(C: ContextRef,
pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) -> TypeRef;
pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef;
pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint;
pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint;
// Operations on other types
......@@ -611,10 +608,7 @@ pub fn LLVMStructTypeInContext(C: ContextRef,
pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef;
pub fn LLVMConstICmp(Pred: IntPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef;
pub fn LLVMConstFCmp(Pred: RealPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef;
// only for isize/vector
pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef;
pub fn LLVMIsNull(Val: ValueRef) -> Bool;
pub fn LLVMIsUndef(Val: ValueRef) -> Bool;
// Operations on metadata
pub fn LLVMMDStringInContext(C: ContextRef, Str: *const c_char, SLen: c_uint) -> ValueRef;
......@@ -736,7 +730,9 @@ pub fn LLVMRustGetOrInsertFunction(M: ModuleRef,
FunctionTy: TypeRef)
-> ValueRef;
pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint);
pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32);
pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64);
pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute);
pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef,
index: c_uint,
......@@ -766,7 +762,11 @@ pub fn LLVMAppendBasicBlockInContext(C: ContextRef,
// Operations on call sites
pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint);
pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute);
pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32);
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64);
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef,
index: c_uint,
bytes: u64);
// Operations on load/store instructions (only)
pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
......@@ -1205,15 +1205,13 @@ pub fn LLVMBuildPtrDiff(B: BuilderRef,
pub fn LLVMRustBuildAtomicLoad(B: BuilderRef,
PointerVal: ValueRef,
Name: *const c_char,
Order: AtomicOrdering,
Alignment: c_uint)
Order: AtomicOrdering)
-> ValueRef;
pub fn LLVMRustBuildAtomicStore(B: BuilderRef,
Val: ValueRef,
Ptr: ValueRef,
Order: AtomicOrdering,
Alignment: c_uint)
Order: AtomicOrdering)
-> ValueRef;
pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef,
......@@ -1247,23 +1245,6 @@ pub fn LLVMRustBuildAtomicFence(B: BuilderRef,
/// Creates target data from a target layout string.
pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef;
/// Number of bytes clobbered when doing a Store to *T.
pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
/// Distance between successive elements in an array of T. Includes ABI padding.
pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
/// Returns the preferred alignment of a type.
pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint;
/// Returns the minimum alignment of a type.
pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint;
/// Computes the byte offset of the indexed struct element for a
/// target.
pub fn LLVMOffsetOfElement(TD: TargetDataRef,
StructTy: TypeRef,
Element: c_uint)
-> c_ulonglong;
/// Disposes target data.
pub fn LLVMDisposeTargetData(TD: TargetDataRef);
......@@ -1341,11 +1322,6 @@ pub fn LLVMStructSetBody(StructTy: TypeRef,
ElementCount: c_uint,
Packed: Bool);
pub fn LLVMConstNamedStruct(S: TypeRef,
ConstantVals: *const ValueRef,
Count: c_uint)
-> ValueRef;
/// Enables LLVM debug output.
pub fn LLVMRustSetDebug(Enabled: c_int);
......
......@@ -74,22 +74,19 @@ pub fn AddFunctionAttrStringValue(llfn: ValueRef,
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub enum AttributePlace {
ReturnValue,
Argument(u32),
Function,
}
impl AttributePlace {
pub fn ReturnValue() -> Self {
AttributePlace::Argument(0)
}
pub fn as_uint(self) -> c_uint {
match self {
AttributePlace::ReturnValue => 0,
AttributePlace::Argument(i) => 1 + i,
AttributePlace::Function => !0,
AttributePlace::Argument(i) => i,
}
}
}
......
......@@ -98,19 +98,16 @@ fn simplify_match_pair<'pat>(&mut self,
}
PatternKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
if self.hir.tcx().sess.features.borrow().never_type {
let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| {
i == variant_index || {
self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs)
}
});
if irrefutable {
let lvalue = match_pair.lvalue.downcast(adt_def, variant_index);
candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns));
Ok(())
} else {
Err(match_pair)
let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| {
i == variant_index || {
self.hir.tcx().sess.features.borrow().never_type &&
self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs)
}
});
if irrefutable {
let lvalue = match_pair.lvalue.downcast(adt_def, variant_index);
candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns));
Ok(())
} else {
Err(match_pair)
}
......
......@@ -39,7 +39,7 @@ pub fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> {
span: match_pair.pattern.span,
kind: TestKind::Switch {
adt_def: adt_def.clone(),
variants: BitVector::new(self.hir.num_variants(adt_def)),
variants: BitVector::new(adt_def.variants.len()),
},
}
}
......@@ -184,7 +184,7 @@ pub fn perform_test(&mut self,
match test.kind {
TestKind::Switch { adt_def, ref variants } => {
// Variants is a BitVec of indexes into adt_def.variants.
let num_enum_variants = self.hir.num_variants(adt_def);
let num_enum_variants = adt_def.variants.len();
let used_variants = variants.count();
let mut otherwise_block = None;
let mut target_blocks = Vec::with_capacity(num_enum_variants);
......
......@@ -213,10 +213,6 @@ pub fn trait_method(&mut self,
bug!("found no method `{}` in `{:?}`", method_name, trait_def_id);
}
pub fn num_variants(&mut self, adt_def: &ty::AdtDef) -> usize {
adt_def.variants.len()
}
pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: usize) -> Vec<Field> {
(0..adt_def.variants[variant_index].fields.len())
.map(Field::new)
......
......@@ -67,7 +67,7 @@ fn run_pass<'a, 'tcx>(&self,
let ty = variant_def.fields[i].ty(tcx, substs);
let rhs = Rvalue::Use(op.clone());
let lhs_cast = if adt_def.variants.len() > 1 {
let lhs_cast = if adt_def.is_enum() {
Lvalue::Projection(Box::new(LvalueProjection {
base: lhs.clone(),
elem: ProjectionElem::Downcast(adt_def, variant),
......@@ -89,7 +89,7 @@ fn run_pass<'a, 'tcx>(&self,
}
// if the aggregate was an enum, we need to set the discriminant
if adt_def.variants.len() > 1 {
if adt_def.is_enum() {
let set_discriminant = Statement {
kind: StatementKind::SetDiscriminant {
lvalue: lhs.clone(),
......
......@@ -19,6 +19,7 @@
use rustc::mir::*;
use rustc::mir::visit::*;
use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable};
use rustc::ty::layout::LayoutOf;
use rustc::ty::subst::{Subst,Substs};
use std::collections::VecDeque;
......@@ -625,9 +626,7 @@ fn create_temp_if_necessary(
fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>) -> Option<u64> {
ty.layout(tcx, param_env).ok().map(|layout| {
layout.size(&tcx.data_layout).bytes()
})
(tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes())
}
fn subst_and_normalize<'a, 'tcx: 'a>(
......
......@@ -344,7 +344,7 @@ fn field_ty(
variant_index,
} => (&adt_def.variants[variant_index], substs),
LvalueTy::Ty { ty } => match ty.sty {
ty::TyAdt(adt_def, substs) if adt_def.is_univariant() => {
ty::TyAdt(adt_def, substs) if !adt_def.is_enum() => {
(&adt_def.variants[0], substs)
}
ty::TyClosure(def_id, substs) => {
......
......@@ -384,7 +384,7 @@ fn open_drop_for_adt_contents(&mut self, adt: &'tcx ty::AdtDef,
substs: &'tcx Substs<'tcx>)
-> (BasicBlock, Unwind) {
let (succ, unwind) = self.drop_ladder_bottom();
if adt.variants.len() == 1 {
if !adt.is_enum() {
let fields = self.move_paths_for_fields(
self.lvalue,
self.path,
......
此差异已折叠。
此差异已折叠。
......@@ -11,16 +11,15 @@
//! # Translation of inline assembly.
use llvm::{self, ValueRef};
use base;
use common::*;
use type_of;
use type_::Type;
use type_of::LayoutLlvmExt;
use builder::Builder;
use rustc::hir;
use rustc::ty::Ty;
use mir::lvalue::Alignment;
use mir::lvalue::LvalueRef;
use mir::operand::OperandValue;
use std::ffi::CString;
use syntax::ast::AsmDialect;
......@@ -30,7 +29,7 @@
pub fn trans_inline_asm<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<(ValueRef, Ty<'tcx>)>,
outputs: Vec<LvalueRef<'tcx>>,
mut inputs: Vec<ValueRef>
) {
let mut ext_constraints = vec![];
......@@ -38,20 +37,15 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
Some(base::load_ty(bcx, val, Alignment::Packed, ty))
} else {
None
};
for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(val.unwrap());
inputs.push(lvalue.load(bcx).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(val.unwrap());
indirect_outputs.push(lvalue.load(bcx).immediate());
} else {
output_types.push(type_of::type_of(bcx.ccx, ty));
output_types.push(lvalue.layout.llvm_type(bcx.ccx));
}
}
if !indirect_outputs.is_empty() {
......@@ -106,9 +100,9 @@ pub fn trans_inline_asm<'a, 'tcx>(
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
bcx.store(v, val, None);
for (i, (_, &lvalue)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(bcx, lvalue);
}
// Store mark in a metadata node so we can map LLVM errors
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册