提交 cdeb4b0d 编写于 作者: E Eduard-Mihai Burtescu

rustc: encode scalar pairs in layout ABI.

上级 f1b7cd99
......@@ -757,6 +757,7 @@ pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator<Item=usiz
pub enum Abi {
Uninhabited,
Scalar(Scalar),
ScalarPair(Scalar, Scalar),
Vector,
Aggregate {
/// If true, the size is exact, otherwise it's only a lower bound.
......@@ -769,7 +770,10 @@ impl Abi {
/// Returns true if the layout corresponds to an unsized type.
pub fn is_unsized(&self) -> bool {
match *self {
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false,
Abi::Uninhabited |
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector => false,
Abi::Aggregate { sized, .. } => !sized
}
}
......@@ -777,7 +781,10 @@ pub fn is_unsized(&self) -> bool {
/// Returns true if the fields of the layout are packed.
pub fn is_packed(&self) -> bool {
match *self {
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false,
Abi::Uninhabited |
Abi::Scalar(_) |
Abi::ScalarPair(..) |
Abi::Vector => false,
Abi::Aggregate { packed, .. } => packed
}
}
......@@ -905,13 +912,32 @@ fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-> Result<&'tcx Self, LayoutError<'tcx>> {
let cx = (tcx, param_env);
let dl = cx.data_layout();
let scalar = |value: Primitive| {
let scalar_unit = |value: Primitive| {
let bits = value.size(dl).bits();
assert!(bits <= 128);
tcx.intern_layout(CachedLayout::scalar(cx, Scalar {
Scalar {
value,
valid_range: 0..=(!0 >> (128 - bits))
}))
}
};
let scalar = |value: Primitive| {
tcx.intern_layout(CachedLayout::scalar(cx, scalar_unit(value)))
};
let scalar_pair = |a: Scalar, b: Scalar| {
let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
let size = (b_offset + b.value.size(dl)).abi_align(align);
CachedLayout {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Arbitrary {
offsets: vec![Size::from_bytes(0), b_offset],
memory_index: vec![0, 1]
},
abi: Abi::ScalarPair(a, b),
align,
primitive_align: align,
size
}
};
#[derive(Copy, Clone, Debug)]
......@@ -1049,19 +1075,54 @@ enum StructKind {
memory_index = inverse_memory_index;
}
let size = min_size.abi_align(align);
let mut abi = Abi::Aggregate {
sized,
packed
};
// Look for a scalar pair, as an ABI optimization.
// FIXME(eddyb) ignore extra ZST fields and field ordering.
if sized && !packed && fields.len() == 2 {
match (&fields[0].abi, &fields[1].abi) {
(&Abi::Scalar(ref a), &Abi::Scalar(ref b)) => {
let pair = scalar_pair(a.clone(), b.clone());
let pair_offsets = match pair.fields {
FieldPlacement::Arbitrary {
ref offsets,
ref memory_index
} => {
assert_eq!(memory_index, &[0, 1]);
offsets
}
_ => bug!()
};
if offsets[0] == pair_offsets[0] &&
offsets[1] == pair_offsets[1] &&
memory_index[0] == 0 &&
memory_index[1] == 1 &&
align == pair.align &&
primitive_align == pair.primitive_align &&
size == pair.size {
// We can use `ScalarPair` only when it matches our
// already computed layout (including `#[repr(C)]`).
abi = pair.abi;
}
}
_ => {}
}
}
Ok(CachedLayout {
variants: Variants::Single { index: 0 },
fields: FieldPlacement::Arbitrary {
offsets,
memory_index
},
abi: Abi::Aggregate {
sized,
packed
},
abi,
align,
primitive_align,
size: min_size.abi_align(align)
size
})
};
let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
......@@ -1070,45 +1131,34 @@ enum StructKind {
assert!(!ty.has_infer_types());
let ptr_layout = |pointee: Ty<'tcx>| {
let mut data_ptr = scalar_unit(Pointer);
if !ty.is_unsafe_ptr() {
data_ptr.valid_range.start = 1;
}
let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
if pointee.is_sized(tcx, param_env, DUMMY_SP) {
let non_zero = !ty.is_unsafe_ptr();
let bits = Pointer.size(dl).bits();
return Ok(tcx.intern_layout(CachedLayout::scalar(cx, Scalar {
value: Pointer,
valid_range: (non_zero as u128)..=(!0 >> (128 - bits))
})));
return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr)));
}
let unsized_part = tcx.struct_tail(pointee);
let metadata = match unsized_part.sty {
ty::TyForeign(..) => return Ok(scalar(Pointer)),
ty::TyForeign(..) => {
return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr)));
}
ty::TySlice(_) | ty::TyStr => {
Int(dl.ptr_sized_integer(), false)
scalar_unit(Int(dl.ptr_sized_integer(), false))
}
ty::TyDynamic(..) => {
let mut vtable = scalar_unit(Pointer);
vtable.valid_range.start = 1;
vtable
}
ty::TyDynamic(..) => Pointer,
_ => return Err(LayoutError::Unknown(unsized_part))
};
// Effectively a (ptr, meta) tuple.
let align = Pointer.align(dl).max(metadata.align(dl));
let meta_offset = Pointer.size(dl);
assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl)));
let fields = FieldPlacement::Arbitrary {
offsets: vec![Size::from_bytes(0), meta_offset],
memory_index: vec![0, 1]
};
Ok(tcx.intern_layout(CachedLayout {
variants: Variants::Single { index: 0 },
fields,
abi: Abi::Aggregate {
sized: true,
packed: false
},
align,
primitive_align: align,
size: (meta_offset + metadata.size(dl)).abi_align(align)
}))
Ok(tcx.intern_layout(scalar_pair(data_ptr, metadata)))
};
Ok(match ty.sty {
......@@ -1134,11 +1184,9 @@ enum StructKind {
ty::TyFloat(FloatTy::F32) => scalar(F32),
ty::TyFloat(FloatTy::F64) => scalar(F64),
ty::TyFnPtr(_) => {
let bits = Pointer.size(dl).bits();
tcx.intern_layout(CachedLayout::scalar(cx, Scalar {
value: Pointer,
valid_range: 1..=(!0 >> (128 - bits))
}))
let mut ptr = scalar_unit(Pointer);
ptr.valid_range.start = 1;
tcx.intern_layout(CachedLayout::scalar(cx, ptr))
}
// The never type.
......@@ -2194,7 +2242,7 @@ pub fn is_packed(&self) -> bool {
pub fn is_zst(&self) -> bool {
match self.abi {
Abi::Uninhabited => true,
Abi::Scalar(_) => false,
Abi::Scalar(_) | Abi::ScalarPair(..) => false,
Abi::Vector => self.size.bytes() == 0,
Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0
}
......@@ -2347,6 +2395,10 @@ fn hash_stable<W: StableHasherResult>(&self,
Scalar(ref value) => {
value.hash_stable(hcx, hasher);
}
ScalarPair(ref a, ref b) => {
a.hash_stable(hcx, hasher);
b.hash_stable(hcx, hasher);
}
Vector => {}
Aggregate { packed, sized } => {
packed.hash_stable(hcx, hasher);
......
......@@ -311,6 +311,7 @@ fn is_aggregate(&self) -> bool {
layout::Abi::Uninhabited |
layout::Abi::Scalar(_) |
layout::Abi::Vector => false,
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => true
}
}
......@@ -340,6 +341,7 @@ fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>
})
}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
let mut total = Size::from_bytes(0);
let mut result = None;
......@@ -745,10 +747,13 @@ pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
arg.attrs.set(ArgAttribute::NonNull);
}
}
_ => {}
_ => {
// Nothing to do for non-pointer types.
return;
}
}
if let Some(pointee) = arg.layout.pointee_info(ccx) {
if let Some(pointee) = arg.layout.pointee_info_at(ccx, Size::from_bytes(0)) {
if let Some(kind) = pointee.safe {
arg.attrs.pointee_size = pointee.size;
arg.attrs.pointee_align = Some(pointee.align);
......
......@@ -88,6 +88,7 @@ fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
match layout.variants {
layout::Variants::Single { .. } => {
......
......@@ -18,6 +18,7 @@ pub fn compute_abi_info(fty: &mut FnType) {
let fixup = |a: &mut ArgType| {
match a.layout.abi {
layout::Abi::Uninhabited => {}
layout::Abi::ScalarPair(..) |
layout::Abi::Aggregate { .. } => {
match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()),
......
......@@ -232,16 +232,9 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
}
pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef {
let empty = C_array(Type::i8(cx), &[]);
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_struct(cx, &[
empty,
ptr,
empty,
meta,
empty
], false)
C_struct(cx, &[ptr, meta], false)
}
pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
......
......@@ -31,7 +31,7 @@
use rustc_data_structures::stable_hasher::StableHashingContextProvider;
use rustc::session::config::{self, NoDebugInfo};
use rustc::session::Session;
use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout};
use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::util::nodemap::FxHashMap;
use rustc_trans_utils;
......@@ -103,7 +103,7 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> {
lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
pointee_infos: RefCell<FxHashMap<Ty<'tcx>, Option<PointeeInfo>>>,
pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
isize_ty: Type,
dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
......@@ -516,7 +516,8 @@ pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
&self.local().scalar_lltypes
}
pub fn pointee_infos<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Option<PointeeInfo>>> {
pub fn pointee_infos<'a>(&'a self)
-> &'a RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>> {
&self.local().pointee_infos
}
......
......@@ -35,7 +35,7 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
if layout.is_llvm_immediate() {
// These sorts of types are immediates that we can store
// in an ValueRef without an alloca.
} else if layout.is_llvm_scalar_pair(mircx.ccx) {
} else if layout.is_llvm_scalar_pair() {
// We allow pairs and uses of any of their 2 fields.
} else {
// These sorts of types require an alloca. Note that
......@@ -146,7 +146,7 @@ fn visit_lvalue(&mut self,
let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
let layout = self.cx.ccx.layout_of(ty);
if layout.is_llvm_scalar_pair(self.cx.ccx) {
if layout.is_llvm_scalar_pair() {
return;
}
}
......
......@@ -117,7 +117,12 @@ pub fn from_constval(ccx: &CrateContext<'a, 'tcx>,
}
fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef {
const_get_elt(self.llval, ccx.layout_of(self.ty).llvm_field_index(i))
let layout = ccx.layout_of(self.ty);
if let layout::Abi::ScalarPair(..) = layout.abi {
const_get_elt(self.llval, i as u64)
} else {
const_get_elt(self.llval, layout.llvm_field_index(i))
}
}
fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
......@@ -143,7 +148,7 @@ pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
let llty = layout.immediate_llvm_type(ccx);
let llvalty = val_ty(self.llval);
let val = if llty == llvalty && layout.is_llvm_scalar_pair(ccx) {
let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
let (a, b) = self.get_pair(ccx);
OperandValue::Pair(a, b)
} else if llty == llvalty && layout.is_llvm_immediate() {
......@@ -1174,6 +1179,14 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-> Const<'tcx> {
assert_eq!(vals.len(), layout.fields.count());
if let layout::Abi::ScalarPair(..) = layout.abi {
assert_eq!(vals.len(), 2);
return Const::new(C_struct(ccx, &[
vals[0].llval,
vals[1].llval,
], false), layout.ty);
}
// offset of current value
let mut offset = Size::from_bytes(0);
let mut cfields = Vec::new();
......
......@@ -175,10 +175,13 @@ pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
load
};
OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
} else if self.layout.is_llvm_scalar_pair(bcx.ccx) {
OperandValue::Pair(
self.project_field(bcx, 0).load(bcx).immediate(),
self.project_field(bcx, 1).load(bcx).immediate())
} else if self.layout.is_llvm_scalar_pair() {
let load = |i| {
let x = self.project_field(bcx, i).load(bcx).immediate();
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
bcx.bitcast(x, self.layout.scalar_pair_element_llvm_type(bcx.ccx, i))
};
OperandValue::Pair(load(0), load(1))
} else {
OperandValue::Ref(self.llval, self.alignment)
};
......@@ -190,17 +193,23 @@ pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> {
let ccx = bcx.ccx;
let field = self.layout.field(ccx, ix);
let offset = self.layout.fields.offset(ix).bytes();
let offset = self.layout.fields.offset(ix);
let alignment = self.alignment | Alignment::from(self.layout);
let simple = || {
// Unions and newtypes only use an offset of 0.
let llval = if offset.bytes() == 0 {
self.llval
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
// Offsets have to match either first or second field.
assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx)));
bcx.struct_gep(self.llval, 1)
} else {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
};
LvalueRef {
// Unions and newtypes only use an offset of 0.
llval: if offset == 0 {
bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to())
} else {
bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
},
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()),
llextra: if ccx.shared().type_has_metadata(field.ty) {
self.llextra
} else {
......@@ -249,7 +258,7 @@ pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx
let meta = self.llextra;
let unaligned_offset = C_usize(ccx, offset);
let unaligned_offset = C_usize(ccx, offset.bytes());
// Get the alignment of the field
let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
......
......@@ -123,11 +123,8 @@ pub fn immediate_or_packed_pair(self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
self, llty);
// Reconstruct the immediate aggregate.
let mut llpair = C_undef(llty);
let elems = [a, b];
for i in 0..2 {
let elem = base::from_immediate(bcx, elems[i]);
llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i));
}
llpair = bcx.insert_value(llpair, a, 0);
llpair = bcx.insert_value(llpair, b, 1);
llpair
} else {
self.immediate()
......@@ -139,18 +136,13 @@ pub fn from_immediate_or_packed_pair(bcx: &Builder<'a, 'tcx>,
llval: ValueRef,
layout: TyLayout<'tcx>)
-> OperandRef<'tcx> {
let val = if layout.is_llvm_scalar_pair(bcx.ccx) {
let val = if layout.is_llvm_scalar_pair() {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
llval, layout);
// Deconstruct the immediate aggregate.
let a = bcx.extract_value(llval, layout.llvm_field_index(0));
let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0));
let b = bcx.extract_value(llval, layout.llvm_field_index(1));
let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1));
OperandValue::Pair(a, b)
OperandValue::Pair(bcx.extract_value(llval, 0),
bcx.extract_value(llval, 1))
} else {
OperandValue::Immediate(llval)
};
......@@ -175,8 +167,11 @@ pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) {
}
OperandValue::Pair(a, b) => {
for (i, &x) in [a, b].iter().enumerate() {
OperandValue::Immediate(x)
.store(bcx, dest.project_field(bcx, i));
let field = dest.project_field(bcx, i);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let x = bcx.bitcast(x, field.layout.immediate_llvm_type(bcx.ccx));
bcx.store(base::from_immediate(bcx, x),
field.llval, field.alignment.non_abi());
}
}
}
......@@ -214,10 +209,15 @@ pub fn trans_consume(&mut self,
match (o.val, &proj.elem) {
(OperandValue::Pair(a, b),
&mir::ProjectionElem::Field(ref f, ty)) => {
let layout = bcx.ccx.layout_of(self.monomorphize(&ty));
let llval = [a, b][f.index()];
// HACK(eddyb) have to bitcast pointers
// until LLVM removes pointee types.
let llval = bcx.bitcast(llval,
layout.immediate_llvm_type(bcx.ccx));
return OperandRef {
val: OperandValue::Immediate(llval),
layout: bcx.ccx.layout_of(self.monomorphize(&ty))
layout
};
}
_ => {}
......
......@@ -29,6 +29,12 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
return Type::vector(&layout.field(ccx, 0).llvm_type(ccx),
layout.fields.count() as u64);
}
layout::Abi::ScalarPair(..) => {
return Type::struct_(ccx, &[
layout.scalar_pair_element_llvm_type(ccx, 0),
layout.scalar_pair_element_llvm_type(ccx, 1),
], false);
}
layout::Abi::Uninhabited |
layout::Abi::Aggregate { .. } => {}
}
......@@ -174,12 +180,15 @@ pub struct PointeeInfo {
pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_immediate(&self) -> bool;
fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool;
fn is_llvm_scalar_pair<'a>(&self) -> bool;
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
index: usize) -> Type;
fn over_align(&self) -> Option<Align>;
fn llvm_field_index(&self, index: usize) -> u64;
fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>;
fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
-> Option<PointeeInfo>;
}
impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
......@@ -188,26 +197,18 @@ fn is_llvm_immediate(&self) -> bool {
layout::Abi::Uninhabited |
layout::Abi::Scalar(_) |
layout::Abi::Vector => true,
layout::Abi::ScalarPair(..) => false,
layout::Abi::Aggregate { .. } => self.is_zst()
}
}
fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool {
match self.fields {
layout::FieldPlacement::Arbitrary { .. } => {
// There must be only 2 fields.
if self.fields.count() != 2 {
return false;
}
// The two fields must be both scalars.
match (&self.field(ccx, 0).abi, &self.field(ccx, 1).abi) {
(&layout::Abi::Scalar(_), &layout::Abi::Scalar(_)) => true,
_ => false
}
}
_ => false
fn is_llvm_scalar_pair<'a>(&self) -> bool {
match self.abi {
layout::Abi::ScalarPair(..) => true,
layout::Abi::Uninhabited |
layout::Abi::Scalar(_) |
layout::Abi::Vector |
layout::Abi::Aggregate { .. } => false
}
}
......@@ -248,7 +249,7 @@ fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
}
_ => {
// If we know the alignment, pick something better than i8.
if let Some(pointee) = self.pointee_info(ccx) {
if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) {
Type::pointee_for_abi_align(ccx, pointee.align)
} else {
Type::i8(ccx)
......@@ -310,6 +311,59 @@ fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
self.llvm_type(ccx)
}
fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
index: usize) -> Type {
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.sty {
ty::TyRef(..) |
ty::TyRawPtr(_) => {
return self.field(ccx, index).llvm_type(ccx);
}
ty::TyAdt(def, _) if def.is_box() => {
return self.field(ccx, index).llvm_type(ccx);
}
_ => {}
}
let (a, b) = match self.abi {
layout::Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self)
};
let scalar = [a, b][index];
// Make sure to return the same type `immediate_llvm_type` would,
// to avoid dealing with two types and the associated conversions.
// This means that `(bool, bool)` is represented as `{i1, i1}`,
// both in memory and as an immediate, while `bool` is typically
// `i8` in memory and only `i1` when immediate. While we need to
// load/store `bool` as `i8` to avoid crippling LLVM optimizations,
// `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`.
if scalar.is_bool() {
return Type::i1(ccx);
}
match scalar.value {
layout::Int(i, _) => Type::from_integer(ccx, i),
layout::F32 => Type::f32(ccx),
layout::F64 => Type::f64(ccx),
layout::Pointer => {
// If we know the alignment, pick something better than i8.
let offset = if index == 0 {
Size::from_bytes(0)
} else {
a.value.size(ccx).abi_align(b.value.align(ccx))
};
let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) {
Type::pointee_for_abi_align(ccx, pointee.align)
} else {
Type::i8(ccx)
};
pointee.ptr_to()
}
}
}
fn over_align(&self) -> Option<Align> {
if self.align != self.primitive_align {
Some(self.align)
......@@ -319,8 +373,12 @@ fn over_align(&self) -> Option<Align> {
}
fn llvm_field_index(&self, index: usize) -> u64 {
if let layout::Abi::Scalar(_) = self.abi {
bug!("TyLayout::llvm_field_index({:?}): not applicable", self);
match self.abi {
layout::Abi::Scalar(_) |
layout::Abi::ScalarPair(..) => {
bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
}
_ => {}
}
match self.fields {
layout::FieldPlacement::Union(_) => {
......@@ -337,20 +395,15 @@ fn llvm_field_index(&self, index: usize) -> u64 {
}
}
fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo> {
// We only handle thin pointers here.
match self.abi {
layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {}
_ => return None
}
if let Some(&pointee) = ccx.pointee_infos().borrow().get(&self.ty) {
fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
-> Option<PointeeInfo> {
if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) {
return pointee;
}
let mut result = None;
match self.ty.sty {
ty::TyRawPtr(mt) => {
ty::TyRawPtr(mt) if offset.bytes() == 0 => {
let (size, align) = ccx.size_and_align_of(mt.ty);
result = Some(PointeeInfo {
size,
......@@ -359,7 +412,7 @@ fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>
});
}
ty::TyRef(_, mt) => {
ty::TyRef(_, mt) if offset.bytes() == 0 => {
let (size, align) = ccx.size_and_align_of(mt.ty);
let kind = match mt.mutbl {
......@@ -385,7 +438,7 @@ fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>
});
}
ty::TyAdt(def, _) if def.is_box() => {
ty::TyAdt(def, _) if def.is_box() && offset.bytes() == 0 => {
let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty());
result = Some(PointeeInfo {
size,
......@@ -408,7 +461,7 @@ fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>
// to work as long as we don't start using more
// niches than just null (e.g. the first page
// of the address space, or unaligned pointers).
if self.fields.offset(0).bytes() == 0 {
if self.fields.offset(0) == offset {
Some(self.for_variant(ccx, dataful_variant))
} else {
None
......@@ -425,12 +478,16 @@ fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>
}
if let Some(variant) = data_variant {
let ptr_end = offset + layout::Pointer.size(ccx);
for i in 0..variant.fields.count() {
let field = variant.field(ccx, i);
if field.size == self.size {
// We found the pointer field, use its information.
result = field.pointee_info(ccx);
break;
let field_start = variant.fields.offset(i);
if field_start <= offset {
let field = variant.field(ccx, i);
if ptr_end <= field_start + field.size {
// We found the right field, look inside it.
result = field.pointee_info_at(ccx, offset - field_start);
break;
}
}
}
}
......@@ -447,7 +504,7 @@ fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<PointeeInfo>
}
}
ccx.pointee_infos().borrow_mut().insert(self.ty, result);
ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result);
result
}
}
......@@ -24,9 +24,9 @@ pub fn helper(_: usize) {
pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
// We used to generate an extra alloca and memcpy for the block's trailing expression value, so
// check that we copy directly to the return value slot
// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1
// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3
// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1
// CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0
// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1
// CHECK: ret { [0 x i8]*, [[USIZE]] } %1
{ x }
}
......
......@@ -133,7 +133,7 @@ pub fn trait_borrow(_: &Drop) {
pub fn trait_box(_: Box<Drop>) {
}
// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1)
// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1)
#[no_mangle]
pub fn return_slice(x: &[u16]) -> &[u16] {
x
......
......@@ -54,9 +54,6 @@ pub fn call_pkd(f: fn() -> Array) -> BigPacked {
// CHECK-LABEL: @pkd_pair
#[no_mangle]
pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) {
// CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* %{{.*}}, align 1
// CHECK: [[V2:%[a-z0-9]+]] = load i32, i32* %{{.*}}, align 1
// CHECK: store i8 [[V1]], i8* {{.*}}, align 1
// CHECK: store i32 [[V2]], i32* {{.*}}, align 1
// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 5, i32 1, i1 false)
*pair2 = *pair1;
}
......@@ -24,9 +24,9 @@ pub fn helper(_: usize) {
pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]**
// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8]*, [[USIZE]] }* %x to [0 x i8]**
// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]]
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3
// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1
// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]]
let x = &*s;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册