提交 09825de6 编写于 作者: A Ariel Ben-Yehuda

emit "align 1" metadata on loads/stores of packed structs

According to the LLVM reference:
> A value of 0 or an omitted align argument means that the operation has
the ABI alignment for the target.

So loads/stores of fields of packed structs need to have their align set
to 1. Implement that by tracking the alignment of `LvalueRef`s.

Fixes #39376.
上级 4379e2fa
......@@ -56,6 +56,8 @@
use type_::Type;
use type_of;
use mir::lvalue::Alignment;
/// Given an enum, struct, closure, or tuple, extracts fields.
/// Treats closures as a struct with one variant.
/// `empty_if_no_variants` is a switch to deal with empty enums.
......@@ -279,6 +281,7 @@ pub fn trans_get_discr<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
t: Ty<'tcx>,
scrutinee: ValueRef,
alignment: Alignment,
cast_to: Option<Type>,
range_assert: bool
) -> ValueRef {
......@@ -292,11 +295,12 @@ pub fn trans_get_discr<'a, 'tcx>(
let val = match *l {
layout::CEnum { discr, min, max, .. } => {
load_discr(bcx, discr, scrutinee, min, max, range_assert)
load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert)
}
layout::General { discr, .. } => {
let ptr = bcx.struct_gep(scrutinee, 0);
load_discr(bcx, discr, ptr, 0, def.variants.len() as u64 - 1,
load_discr(bcx, discr, ptr, alignment,
0, def.variants.len() as u64 - 1,
range_assert)
}
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
......@@ -305,10 +309,10 @@ pub fn trans_get_discr<'a, 'tcx>(
let llptrty = type_of::sizing_type_of(bcx.ccx,
monomorphize::field_ty(bcx.tcx(), substs,
&def.variants[nndiscr as usize].fields[0]));
bcx.icmp(cmp, bcx.load(scrutinee), C_null(llptrty))
bcx.icmp(cmp, bcx.load(scrutinee, alignment.to_align()), C_null(llptrty))
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee)
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment)
},
_ => bug!("{} is not an enum", t)
};
......@@ -322,17 +326,19 @@ fn struct_wrapped_nullable_bitdiscr(
bcx: &Builder,
nndiscr: u64,
discrfield: &layout::FieldPath,
scrutinee: ValueRef
scrutinee: ValueRef,
alignment: Alignment,
) -> ValueRef {
let llptrptr = bcx.gepi(scrutinee,
&discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>()[..]);
let llptr = bcx.load(llptrptr);
let llptr = bcx.load(llptrptr, alignment.to_align());
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
}
/// Helper for cases where the discriminant is simply loaded.
fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max: u64,
fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
alignment: Alignment, min: u64, max: u64,
range_assert: bool)
-> ValueRef {
let llty = Type::from_integer(bcx.ccx, ity);
......@@ -348,11 +354,12 @@ fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef, min: u64, max:
// rejected by the LLVM verifier (it would mean either an
// empty set, which is impossible, or the entire range of the
// type, which is pointless).
bcx.load(ptr)
bcx.load(ptr, alignment.to_align())
} else {
// llvm::ConstantRange can deal with ranges that wrap around,
// so an overflow on (max + 1) is fine.
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True)
bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True,
alignment.to_align())
}
}
......
......@@ -20,6 +20,8 @@
use rustc::hir;
use rustc::ty::Ty;
use mir::lvalue::Alignment;
use std::ffi::CString;
use syntax::ast::AsmDialect;
use libc::{c_uint, c_char};
......@@ -38,7 +40,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
let mut indirect_outputs = vec![];
for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
let val = if out.is_rw || out.is_indirect {
Some(base::load_ty(bcx, val, ty))
Some(base::load_ty(bcx, val, Alignment::Packed, ty))
} else {
None
};
......
......@@ -90,6 +90,8 @@
use rustc::ty::layout::{self, Layout};
use syntax::ast;
use mir::lvalue::Alignment;
pub struct StatRecorder<'a, 'tcx: 'a> {
ccx: &'a CrateContext<'a, 'tcx>,
name: Option<String>,
......@@ -250,25 +252,25 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
src: ValueRef,
src_ty: Ty<'tcx>,
dst: ValueRef,
dst_ty: Ty<'tcx>) {
src: &LvalueRef<'tcx>,
dst: &LvalueRef<'tcx>) {
let src_ty = src.ty.to_ty(bcx.tcx());
let dst_ty = dst.ty.to_ty(bcx.tcx());
let coerce_ptr = || {
let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let (base, info) = load_fat_ptr(bcx, src, src_ty);
let (base, info) = load_fat_ptr(bcx, src.llval, src.alignment, src_ty);
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty);
let base = bcx.pointercast(base, llcast_ty);
(base, info)
} else {
let base = load_ty(bcx, src, src_ty);
let base = load_ty(bcx, src.llval, src.alignment, src_ty);
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
};
store_fat_ptr(bcx, base, info, dst, dst_ty);
store_fat_ptr(bcx, base, info, dst.llval, dst.alignment, dst_ty);
};
match (&src_ty.sty, &dst_ty.sty) {
(&ty::TyRef(..), &ty::TyRef(..)) |
......@@ -290,21 +292,22 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
monomorphize::field_ty(bcx.tcx(), substs_b, f)
});
let src = LvalueRef::new_sized_ty(src, src_ty);
let dst = LvalueRef::new_sized_ty(dst, dst_ty);
let iter = src_fields.zip(dst_fields).enumerate();
for (i, (src_fty, dst_fty)) in iter {
if type_is_zero_size(bcx.ccx, dst_fty) {
continue;
}
let src_f = src.trans_field_ptr(bcx, i);
let dst_f = dst.trans_field_ptr(bcx, i);
let (src_f, src_f_align) = src.trans_field_ptr(bcx, i);
let (dst_f, dst_f_align) = dst.trans_field_ptr(bcx, i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty, None);
} else {
coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
coerce_unsized_into(
bcx,
&LvalueRef::new_sized_ty(src_f, src_fty, src_f_align),
&LvalueRef::new_sized_ty(dst_f, dst_fty, dst_f_align)
);
}
}
}
......@@ -399,7 +402,8 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values. Also handles various special cases where the type
/// gives us better information about what we are loading.
pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef,
alignment: Alignment, t: Ty<'tcx>) -> ValueRef {
let ccx = b.ccx;
if type_is_zero_size(ccx, t) {
return C_undef(type_of::type_of(ccx, t));
......@@ -419,29 +423,31 @@ pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> V
}
if t.is_bool() {
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx))
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False, alignment.to_align()),
Type::i1(ccx))
} else if t.is_char() {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False)
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align())
} else if (t.is_region_ptr() || t.is_box()) && !common::type_is_fat_ptr(ccx, t) {
b.load_nonnull(ptr)
b.load_nonnull(ptr, alignment.to_align())
} else {
b.load(ptr)
b.load(ptr, alignment.to_align())
}
}
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef,
dst_align: Alignment, t: Ty<'tcx>) {
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
if common::type_is_fat_ptr(cx.ccx, t) {
let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR);
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
store_fat_ptr(cx, lladdr, llextra, dst, t);
store_fat_ptr(cx, lladdr, llextra, dst, dst_align, t);
} else {
cx.store(from_immediate(cx, v), dst, None);
cx.store(from_immediate(cx, v), dst, dst_align.to_align());
}
}
......@@ -449,24 +455,25 @@ pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
data: ValueRef,
extra: ValueRef,
dst: ValueRef,
dst_align: Alignment,
_ty: Ty<'tcx>) {
// FIXME: emit metadata
cx.store(data, get_dataptr(cx, dst), None);
cx.store(extra, get_meta(cx, dst), None);
cx.store(data, get_dataptr(cx, dst), dst_align.to_align());
cx.store(extra, get_meta(cx, dst), dst_align.to_align());
}
pub fn load_fat_ptr<'a, 'tcx>(
b: &Builder<'a, 'tcx>, src: ValueRef, t: Ty<'tcx>
b: &Builder<'a, 'tcx>, src: ValueRef, alignment: Alignment, t: Ty<'tcx>
) -> (ValueRef, ValueRef) {
let ptr = get_dataptr(b, src);
let ptr = if t.is_region_ptr() || t.is_box() {
b.load_nonnull(ptr)
b.load_nonnull(ptr, alignment.to_align())
} else {
b.load(ptr)
b.load(ptr, alignment.to_align())
};
// FIXME: emit metadata on `meta`.
let meta = b.load(get_meta(b, src));
let meta = b.load(get_meta(b, src), alignment.to_align());
(ptr, meta)
}
......@@ -633,7 +640,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
bcx.alloca(fn_ty.ret.memory_ty(ccx), "sret_slot")
};
// Can return unsized value
let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output());
let mut dest_val = LvalueRef::new_sized_ty(dest, sig.output(), Alignment::AbiAligned);
dest_val.ty = LvalueTy::Downcast {
adt_def: sig.output().ty_adt_def().unwrap(),
substs: substs,
......@@ -642,7 +649,7 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
let mut arg_idx = 0;
for (i, arg_ty) in sig.inputs().iter().enumerate() {
let lldestptr = dest_val.trans_field_ptr(&bcx, i);
let (lldestptr, _) = dest_val.trans_field_ptr(&bcx, i);
let arg = &fn_ty.args[arg_idx];
arg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
......@@ -662,14 +669,12 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
if let Some(cast_ty) = fn_ty.ret.cast {
let load = bcx.load(bcx.pointercast(dest, cast_ty.ptr_to()));
let llalign = llalign_of_min(ccx, fn_ty.ret.ty);
unsafe {
llvm::LLVMSetAlignment(load, llalign);
}
bcx.ret(load)
bcx.ret(bcx.load(
bcx.pointercast(dest, cast_ty.ptr_to()),
Some(llalign_of_min(ccx, fn_ty.ret.ty))
));
} else {
bcx.ret(bcx.load(dest))
bcx.ret(bcx.load(dest, None))
}
} else {
bcx.ret_void();
......
......@@ -19,9 +19,8 @@
use type_::Type;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{Ty, TyCtxt, TypeFoldable};
use rustc::ty::TyCtxt;
use rustc::session::Session;
use type_of;
use std::borrow::Cow;
use std::ffi::CString;
......@@ -486,11 +485,6 @@ pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
builder.dynamic_alloca(ty, name)
}
pub fn alloca_ty(&self, ty: Ty<'tcx>, name: &str) -> ValueRef {
assert!(!ty.has_param_types());
self.alloca(type_of::type_of(self.ccx, ty), name)
}
pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
self.count_insn("alloca");
unsafe {
......@@ -511,10 +505,14 @@ pub fn free(&self, ptr: ValueRef) {
}
}
pub fn load(&self, ptr: ValueRef) -> ValueRef {
pub fn load(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef {
self.count_insn("load");
unsafe {
llvm::LLVMBuildLoad(self.llbuilder, ptr, noname())
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
if let Some(align) = align {
llvm::LLVMSetAlignment(load, align as c_uint);
}
load
}
}
......@@ -539,8 +537,9 @@ pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
hi: u64, signed: llvm::Bool) -> ValueRef {
let value = self.load(ptr);
hi: u64, signed: llvm::Bool,
align: Option<u32>) -> ValueRef {
let value = self.load(ptr, align);
unsafe {
let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr));
......@@ -558,8 +557,8 @@ pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
value
}
pub fn load_nonnull(&self, ptr: ValueRef) -> ValueRef {
let value = self.load(ptr);
pub fn load_nonnull(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef {
let value = self.load(ptr, align);
unsafe {
llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint,
llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));
......
......@@ -41,6 +41,8 @@
use syntax_pos::DUMMY_SP;
use mir::lvalue::Alignment;
#[derive(Debug)]
pub enum CalleeData {
/// Constructor for enum variant/tuple-like-struct.
......@@ -358,29 +360,27 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let self_idx = fn_ty.ret.is_indirect() as usize;
let env_arg = &orig_fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
llargs[self_idx]
let env = if env_arg.is_indirect() {
LvalueRef::new_sized_ty(llargs[self_idx], closure_ty, Alignment::AbiAligned)
} else {
let scratch = bcx.alloca_ty(closure_ty, "self");
let scratch = LvalueRef::alloca(&bcx, closure_ty, "self");
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch);
env_arg.store_fn_arg(&bcx, &mut llarg_idx, scratch.llval);
scratch
};
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
debug!("trans_fn_once_adapter_shim: env={:?}", env);
// Adjust llargs such that llargs[self_idx..] has the call arguments.
// For zero-sized closures that means sneaking in a new argument.
if env_arg.is_ignore() {
llargs.insert(self_idx, llenv);
llargs.insert(self_idx, env.llval);
} else {
llargs[self_idx] = llenv;
llargs[self_idx] = env.llval;
}
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = CleanupScope::schedule_drop_mem(
&bcx, LvalueRef::new_sized_ty(llenv, closure_ty)
);
let self_scope = CleanupScope::schedule_drop_mem(&bcx, env);
let llfn = callee.reify(bcx.ccx);
let llret;
......@@ -512,7 +512,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let llfnpointer = llfnpointer.unwrap_or_else(|| {
// the first argument (`self`) will be ptr to the fn pointer
if is_by_ref {
bcx.load(self_arg)
bcx.load(self_arg, None)
} else {
self_arg
}
......
......@@ -13,7 +13,6 @@
// Code relating to drop glue.
use std;
use std::ptr;
use std::iter;
use llvm;
......@@ -41,6 +40,7 @@
use builder::Builder;
use syntax_pos::DUMMY_SP;
use mir::lvalue::Alignment;
pub fn trans_exchange_free_ty<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, ptr: LvalueRef<'tcx>) {
let content_ty = ptr.ty.to_ty(bcx.tcx());
......@@ -199,9 +199,9 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
let value = get_param(llfn, 0);
let ptr = if ccx.shared().type_is_sized(t) {
LvalueRef::new_sized_ty(value, t)
LvalueRef::new_sized_ty(value, t, Alignment::AbiAligned)
} else {
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t)
LvalueRef::new_unsized_ty(value, get_param(llfn, 1), t, Alignment::AbiAligned)
};
let skip_dtor = match g {
......@@ -216,11 +216,13 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
assert!(!skip_dtor);
let content_ty = t.boxed_ty();
let ptr = if !bcx.ccx.shared().type_is_sized(content_ty) {
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval));
let info = bcx.load(get_meta(&bcx, ptr.llval));
LvalueRef::new_unsized_ty(llbox, info, content_ty)
let llbox = bcx.load(get_dataptr(&bcx, ptr.llval), None);
let info = bcx.load(get_meta(&bcx, ptr.llval), None);
LvalueRef::new_unsized_ty(llbox, info, content_ty, Alignment::AbiAligned)
} else {
LvalueRef::new_sized_ty(bcx.load(ptr.llval), content_ty)
LvalueRef::new_sized_ty(
bcx.load(ptr.llval, None),
content_ty, Alignment::AbiAligned)
};
drop_ty(&bcx, ptr);
trans_exchange_free_ty(&bcx, ptr);
......@@ -231,7 +233,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// versus without calling Drop::drop. Assert caller is
// okay with always calling the Drop impl, if any.
assert!(!skip_dtor);
let dtor = bcx.load(ptr.llextra);
let dtor = bcx.load(ptr.llextra, None);
bcx.call(dtor, &[ptr.llval], None);
bcx
}
......@@ -384,7 +386,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
let info = bcx.pointercast(info, Type::int(bcx.ccx).ptr_to());
let size_ptr = bcx.gepi(info, &[1]);
let align_ptr = bcx.gepi(info, &[2]);
(bcx.load(size_ptr), bcx.load(align_ptr))
(bcx.load(size_ptr, None), bcx.load(align_ptr, None))
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx());
......@@ -416,8 +418,8 @@ fn iter_variant_fields<'a, 'tcx>(
let tcx = cx.tcx();
for (i, field) in variant.fields.iter().enumerate() {
let arg = monomorphize::field_ty(tcx, substs, field);
let field_ptr = av.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg));
let (field_ptr, align) = av.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg, align));
}
}
......@@ -426,8 +428,8 @@ fn iter_variant_fields<'a, 'tcx>(
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
let llupvar = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty));
let (llupvar, align) = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty, align));
}
}
ty::TyArray(_, n) => {
......@@ -435,29 +437,29 @@ fn iter_variant_fields<'a, 'tcx>(
let len = C_uint(cx.ccx, n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, base, unit_ty, len,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::slice_for_each(&cx, ptr.llval, unit_ty, ptr.llextra,
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty)));
|bb, vv| drop_ty(bb, LvalueRef::new_sized_ty(vv, unit_ty, ptr.alignment)));
}
ty::TyTuple(ref args, _) => {
for (i, arg) in args.iter().enumerate() {
let llfld_a = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg));
let (llfld_a, align) = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg, align));
}
}
ty::TyAdt(adt, substs) => match adt.adt_kind() {
AdtKind::Struct => {
for (i, field) in adt.variants[0].fields.iter().enumerate() {
let field_ty = monomorphize::field_ty(cx.tcx(), substs, field);
let mut field_ptr = ptr.clone();
field_ptr.llval = ptr.trans_field_ptr(&cx, i);
field_ptr.ty = LvalueTy::from_ty(field_ty);
if cx.ccx.shared().type_is_sized(field_ty) {
field_ptr.llextra = ptr::null_mut();
}
let (llval, align) = ptr.trans_field_ptr(&cx, i);
let field_ptr = if cx.ccx.shared().type_is_sized(field_ty) {
LvalueRef::new_sized_ty(llval, field_ty, align)
} else {
LvalueRef::new_unsized_ty(llval, ptr.llextra, field_ty, align)
};
drop_ty(&cx, field_ptr);
}
}
......@@ -490,9 +492,12 @@ fn iter_variant_fields<'a, 'tcx>(
layout::General { .. } |
layout::RawNullablePointer { .. } |
layout::StructWrappedNullablePointer { .. } => {
let lldiscrim_a = adt::trans_get_discr(&cx, t, ptr.llval, None, false);
let lldiscrim_a = adt::trans_get_discr(
&cx, t, ptr.llval, ptr.alignment, None, false);
let tcx = cx.tcx();
drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize));
// FIXME: why are dropping an isize?
drop_ty(&cx, LvalueRef::new_sized_ty(lldiscrim_a, tcx.types.isize,
ptr.alignment));
// Create a fall-through basic block for the "else" case of
// the switch instruction we're about to generate. Note that
......
......@@ -36,6 +36,8 @@
use std::cmp::Ordering;
use std::iter;
use mir::lvalue::Alignment;
fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
......@@ -243,7 +245,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
} else {
let val = if fn_ty.args[1].is_indirect() {
bcx.load(llargs[1])
bcx.load(llargs[1], None) // FIXME: this is incorrect
} else {
from_immediate(bcx, llargs[1])
};
......@@ -348,7 +350,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let val_ty = substs.type_at(0);
match val_ty.sty {
ty::TyAdt(adt, ..) if adt.is_enum() => {
adt::trans_get_discr(bcx, val_ty, llargs[0],
adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned,
Some(llret_ty), true)
}
_ => C_null(llret_ty)
......@@ -547,8 +549,11 @@ fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// destructors, and the contents are SIMD
// etc.
assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
let arg = LvalueRef::new_sized_ty(llarg, arg_type);
(0..contents.len()).map(|i| bcx.load(arg.trans_field_ptr(bcx, i))).collect()
let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned);
(0..contents.len()).map(|i| {
let (ptr, align) = arg.trans_field_ptr(bcx, i);
bcx.load(ptr, align.to_align())
}).collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
......@@ -624,7 +629,7 @@ fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let ptr = bcx.pointercast(llresult, ty.ptr_to());
bcx.store(llval, ptr, Some(type_of::align_of(ccx, ret_ty)));
} else {
store_ty(bcx, llval, llresult, ret_ty);
store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty);
}
}
}
......@@ -780,10 +785,10 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
None => bug!("msvc_try_filter not defined"),
};
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(ccx, 0), slot]);
let addr = catchpad.load(slot);
let arg1 = catchpad.load(addr);
let addr = catchpad.load(slot, None);
let arg1 = catchpad.load(addr, None);
let val1 = C_i32(ccx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]));
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), None);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, None);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
......
......@@ -36,7 +36,7 @@ pub fn get_virtual_method<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
vtable_index, Value(llvtable));
bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]))
bcx.load(bcx.gepi(llvtable, &[vtable_index + VTABLE_OFFSET]), None)
}
/// Generate a shim function that allows an object type like `SomeTrait` to
......
......@@ -37,7 +37,7 @@
use super::{MirContext, LocalRef};
use super::analyze::CleanupKind;
use super::constant::Const;
use super::lvalue::LvalueRef;
use super::lvalue::{Alignment, LvalueRef};
use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
......@@ -120,7 +120,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
bcx.cleanup_ret(cleanup_pad, None);
} else {
let ps = self.get_personality_slot(&bcx);
let lp = bcx.load(ps);
let lp = bcx.load(ps, None);
Lifetime::End.call(&bcx, ps);
if !bcx.sess().target.target.options.custom_unwind_resume {
bcx.resume(lp);
......@@ -147,7 +147,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
mir::TerminatorKind::Switch { ref discr, ref adt_def, ref targets } => {
let discr_lvalue = self.trans_lvalue(&bcx, discr);
let ty = discr_lvalue.ty.to_ty(bcx.tcx());
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, None, true);
let discr = adt::trans_get_discr(&bcx, ty, discr_lvalue.llval, discr_lvalue.alignment,
None, true);
let mut bb_hist = FxHashMap();
for target in targets {
......@@ -179,7 +180,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let lv = self.trans_lvalue(&bcx, discr);
let discr = bcx.load(lv.llval, lv.alignment.to_align());
let discr = base::to_immediate(&bcx, discr, switch_ty);
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
for (value, target) in values.iter().zip(targets) {
......@@ -202,7 +204,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
LocalRef::Operand(None) => bug!("use of return before def"),
LocalRef::Lvalue(tr_lvalue) => {
OperandRef {
val: Ref(tr_lvalue.llval),
val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
ty: tr_lvalue.ty.to_ty(bcx.tcx())
}
}
......@@ -210,21 +212,22 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let llscratch = bcx.alloca(ret.original_ty, "ret");
self.store_operand(&bcx, llscratch, op, None);
self.store_operand(&bcx, llscratch, None, op);
llscratch
}
Ref(llval) => llval
Ref(llval, align) => {
assert_eq!(align, Alignment::AbiAligned, "return pointer is unaligned!");
llval
}
};
let load = bcx.load(bcx.pointercast(llslot, cast_ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx, ret.ty);
unsafe {
llvm::LLVMSetAlignment(load, llalign);
}
let load = bcx.load(
bcx.pointercast(llslot, cast_ty.ptr_to()),
Some(llalign_of_min(bcx.ccx, ret.ty)));
load
} else {
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
if let Ref(llval) = op.val {
base::load_ty(&bcx, llval, op.ty)
if let Ref(llval, align) = op.val {
base::load_ty(&bcx, llval, align, op.ty)
} else {
op.pack_if_pair(&bcx).immediate()
}
......@@ -425,7 +428,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
// The first argument is a thin destination pointer.
let llptr = self.trans_operand(&bcx, &args[0]).immediate();
let val = self.trans_operand(&bcx, &args[1]);
self.store_operand(&bcx, llptr, val, None);
self.store_operand(&bcx, llptr, None, val);
funclet_br(self, bcx, target);
return;
}
......@@ -550,7 +553,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
// Make a fake operand for store_return
let op = OperandRef {
val: Ref(dst),
val: Ref(dst, Alignment::AbiAligned),
ty: sig.output(),
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
......@@ -652,33 +655,39 @@ fn trans_argument(&mut self,
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, by_ref) = match op.val {
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
let llscratch = bcx.alloca(arg.original_ty, "arg");
self.store_operand(bcx, llscratch, op, None);
(llscratch, true)
self.store_operand(bcx, llscratch, None, op);
(llscratch, Alignment::AbiAligned, true)
} else {
(op.pack_if_pair(bcx).immediate(), false)
(op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false)
}
}
Ref(llval) => (llval, true)
Ref(llval, Alignment::Packed) if arg.is_indirect() => {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
let llscratch = bcx.alloca(arg.original_ty, "arg");
base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
(llscratch, Alignment::AbiAligned, true)
}
Ref(llval, align) => (llval, align, true)
};
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx) {
// We store bools as i8 so we need to truncate to i1.
llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None);
llval = bcx.trunc(llval, arg.original_ty);
} else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx, arg.ty);
unsafe {
llvm::LLVMSetAlignment(llval, llalign);
}
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()),
align.min_with(llalign_of_min(bcx.ccx, arg.ty)));
} else {
llval = bcx.load(llval);
llval = bcx.load(llval, align.to_align());
}
}
......@@ -702,16 +711,16 @@ fn trans_arguments_untupled(&mut self,
// Handle both by-ref and immediate tuples.
match tuple.val {
Ref(llval) => {
Ref(llval, align) => {
for (n, &ty) in arg_types.iter().enumerate() {
let ptr = LvalueRef::new_sized_ty(llval, tuple.ty);
let ptr = ptr.trans_field_ptr(bcx, n);
let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align);
let (ptr, align) = ptr.trans_field_ptr(bcx, n);
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty);
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty);
Pair(lldata, llextra)
} else {
// trans_argument will load this if it needs to
Ref(ptr)
Ref(ptr, align)
};
let op = OperandRef {
val: val,
......@@ -839,15 +848,15 @@ fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
return if fn_ret_ty.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
llargs.push(tmp);
ReturnDest::IndirectOperand(tmp, index)
let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
llargs.push(tmp.llval);
ReturnDest::IndirectOperand(tmp.llval, index)
} else if is_intrinsic {
// Currently, intrinsics always need a location to store
// the result. so we create a temporary alloca for the
// result
let tmp = bcx.alloca_ty(ret_ty, "tmp_ret");
ReturnDest::IndirectOperand(tmp, index)
let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
ReturnDest::IndirectOperand(tmp.llval, index)
} else {
ReturnDest::DirectOperand(index)
};
......@@ -892,7 +901,7 @@ fn trans_transmute(&mut self, bcx: &Builder<'a, 'tcx>,
let in_type = val.ty;
let out_type = dst.ty.to_ty(bcx.tcx());;
let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type));
self.store_operand(bcx, cast_ptr, val, Some(llalign));
self.store_operand(bcx, cast_ptr, Some(llalign), val);
}
......@@ -908,15 +917,15 @@ fn store_return(&mut self,
Nothing => (),
Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
IndirectOperand(tmp, index) => {
let op = self.trans_load(bcx, tmp, op.ty);
let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty);
self.locals[index] = LocalRef::Operand(Some(op));
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if ret_ty.cast.is_some() {
let tmp = bcx.alloca_ty(op.ty, "tmp_ret");
ret_ty.store(bcx, op.immediate(), tmp);
self.trans_load(bcx, tmp, op.ty)
let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret");
ret_ty.store(bcx, op.immediate(), tmp.llval);
self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty)
} else {
op.unpack_if_pair(bcx)
};
......
......@@ -40,6 +40,7 @@
use std::fmt;
use std::ptr;
use super::lvalue::Alignment;
use super::operand::{OperandRef, OperandValue};
use super::MirContext;
......@@ -140,7 +141,7 @@ pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
// a constant LLVM global and cast its address if necessary.
let align = type_of::align_of(ccx, self.ty);
let ptr = consts::addr_of(ccx, self.llval, align, "const");
OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()))
OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned)
};
OperandRef {
......
......@@ -25,10 +25,52 @@
use glue;
use std::ptr;
use std::ops;
use super::{MirContext, LocalRef};
use super::operand::OperandValue;
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Alignment {
Packed,
AbiAligned,
}
impl ops::BitOr for Alignment {
type Output = Self;
fn bitor(self, rhs: Self) -> Self {
match (self, rhs) {
(Alignment::Packed, _) => Alignment::Packed,
(Alignment::AbiAligned, a) => a,
}
}
}
impl Alignment {
pub fn from_packed(packed: bool) -> Self {
if packed {
Alignment::Packed
} else {
Alignment::AbiAligned
}
}
pub fn to_align(self) -> Option<u32> {
match self {
Alignment::Packed => Some(1),
Alignment::AbiAligned => None,
}
}
pub fn min_with(self, align: u32) -> Option<u32> {
match self {
Alignment::Packed => Some(1),
Alignment::AbiAligned => Some(align),
}
}
}
#[derive(Copy, Clone, Debug)]
pub struct LvalueRef<'tcx> {
/// Pointer to the contents of the lvalue
......@@ -39,25 +81,38 @@ pub struct LvalueRef<'tcx> {
/// Monomorphized type of this lvalue, including variant information
pub ty: LvalueTy<'tcx>,
/// Whether this lvalue is known to be aligned according to its layout
pub alignment: Alignment,
}
impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>,
alignment: Alignment) -> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment }
}
pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> {
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty))
pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
}
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>) -> LvalueRef<'tcx> {
pub fn new_unsized_ty(llval: ValueRef, llextra: ValueRef, ty: Ty<'tcx>, alignment: Alignment)
-> LvalueRef<'tcx> {
LvalueRef {
llval: llval,
llextra: llextra,
ty: LvalueTy::from_ty(ty),
alignment: alignment,
}
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
assert!(!ty.has_param_types());
Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
}
pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
let ty = self.ty.to_ty(ccx.tcx());
match ty.sty {
......@@ -81,10 +136,12 @@ fn struct_field_ptr(
fields: &Vec<Ty<'tcx>>,
ix: usize,
needs_cast: bool
) -> ValueRef {
) -> (ValueRef, Alignment) {
let fty = fields[ix];
let ccx = bcx.ccx;
let alignment = self.alignment | Alignment::from_packed(st.packed);
let ptr_val = if needs_cast {
let fields = st.field_index_by_increasing_offset().map(|i| {
type_of::in_memory_type_of(ccx, fields[i])
......@@ -101,14 +158,14 @@ fn struct_field_ptr(
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
bcx.ccx.shared().type_is_sized(fty) {
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
}
// If the type of the last field is [T] or str, then we don't need to do
// any adjusments
match fty.sty {
ty::TySlice(..) | ty::TyStr => {
return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
}
_ => ()
}
......@@ -117,7 +174,7 @@ fn struct_field_ptr(
if !self.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(ptr_val));
return bcx.struct_gep(ptr_val, ix);
return (bcx.struct_gep(ptr_val, ix), alignment);
}
// We need to get the pointer manually now.
......@@ -163,11 +220,11 @@ fn struct_field_ptr(
// Finally, cast back to the type expected
let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
bcx.pointercast(byte_ptr, ll_fty.ptr_to())
(bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment)
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef {
pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
let discr = match self.ty {
LvalueTy::Ty { .. } => 0,
LvalueTy::Downcast { variant_index, .. } => variant_index,
......@@ -186,17 +243,18 @@ pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef {
layout::Vector { count, .. } => {
assert_eq!(discr, 0);
assert!((ix as u64) < count);
bcx.struct_gep(self.llval, ix)
(bcx.struct_gep(self.llval, ix), self.alignment)
}
layout::General { discr: d, ref variants, .. } => {
let mut fields = adt::compute_fields(bcx.ccx, t, discr, false);
fields.insert(0, d.to_ty(&bcx.tcx(), false));
self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true)
}
layout::UntaggedUnion { .. } => {
layout::UntaggedUnion { ref variants } => {
let fields = adt::compute_fields(bcx.ccx, t, 0, false);
let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
bcx.pointercast(self.llval, ty.ptr_to())
(bcx.pointercast(self.llval, ty.ptr_to()),
self.alignment | Alignment::from_packed(variants.packed))
}
layout::RawNullablePointer { nndiscr, .. } |
layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => {
......@@ -205,19 +263,19 @@ pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef {
// (e.d., Result of Either with (), as one side.)
let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
bcx.pointercast(self.llval, ty.ptr_to())
(bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed)
}
layout::RawNullablePointer { nndiscr, .. } => {
let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
assert_eq!(ix, 0);
assert_eq!(discr as u64, nndiscr);
let ty = type_of::type_of(bcx.ccx, nnty);
bcx.pointercast(self.llval, ty.ptr_to())
(bcx.pointercast(self.llval, ty.ptr_to()), self.alignment)
}
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
assert_eq!(discr as u64, nndiscr);
self.struct_field_ptr(bcx, &nonnull,
&adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
&adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
}
_ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
}
......@@ -250,7 +308,8 @@ pub fn trans_lvalue(&mut self,
mir::Lvalue::Static(def_id) => {
let const_ty = self.monomorphized_lvalue_ty(lvalue);
LvalueRef::new_sized(consts::get_static(ccx, def_id),
LvalueTy::from_ty(const_ty))
LvalueTy::from_ty(const_ty),
Alignment::AbiAligned)
},
mir::Lvalue::Projection(box mir::Projection {
ref base,
......@@ -264,18 +323,20 @@ pub fn trans_lvalue(&mut self,
let (llptr, llextra) = match ptr.val {
OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
OperandValue::Pair(llptr, llextra) => (llptr, llextra),
OperandValue::Ref(_) => bug!("Deref of by-Ref type {:?}", ptr.ty)
OperandValue::Ref(..) => bug!("Deref of by-Ref type {:?}", ptr.ty)
};
LvalueRef {
llval: llptr,
llextra: llextra,
ty: projected_ty,
alignment: Alignment::AbiAligned,
}
}
mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base);
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
let projected_ty = self.monomorphize(&projected_ty);
let align = tr_base.alignment;
let project_index = |llindex| {
let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
......@@ -285,10 +346,10 @@ pub fn trans_lvalue(&mut self,
let zero = common::C_uint(bcx.ccx, 0u64);
bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
};
element
(element, align)
};
let (llprojected, llextra) = match projection.elem {
let ((llprojected, align), llextra) = match projection.elem {
mir::ProjectionElem::Deref => bug!(),
mir::ProjectionElem::Field(ref field, _) => {
let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) {
......@@ -318,7 +379,7 @@ pub fn trans_lvalue(&mut self,
}
mir::ProjectionElem::Subslice { from, to } => {
let llindex = C_uint(bcx.ccx, from);
let llbase = project_index(llindex);
let (llbase, align) = project_index(llindex);
let base_ty = tr_base.ty.to_ty(bcx.tcx());
match base_ty.sty {
......@@ -328,25 +389,26 @@ pub fn trans_lvalue(&mut self,
let base_ty = self.monomorphized_lvalue_ty(lvalue);
let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to();
let llbase = bcx.pointercast(llbase, llbasety);
(llbase, ptr::null_mut())
((llbase, align), ptr::null_mut())
}
ty::TySlice(..) => {
assert!(tr_base.llextra != ptr::null_mut());
let lllen = bcx.sub(tr_base.llextra,
C_uint(bcx.ccx, from+to));
(llbase, lllen)
((llbase, align), lllen)
}
_ => bug!("unexpected type {:?} in Subslice", base_ty)
}
}
mir::ProjectionElem::Downcast(..) => {
(tr_base.llval, tr_base.llextra)
((tr_base.llval, align), tr_base.llextra)
}
};
LvalueRef {
llval: llprojected,
llextra: llextra,
ty: projected_ty,
alignment: align,
}
}
};
......@@ -357,6 +419,8 @@ pub fn trans_lvalue(&mut self,
// Perform an action using the given Lvalue.
// If the Lvalue is an empty LocalRef::Operand, then a temporary stack slot
// is created first, then used as an operand to update the Lvalue.
//
// FIXME: this is only called from transmute; please remove it.
pub fn with_lvalue_ref<F, U>(&mut self, bcx: &Builder<'a, 'tcx>,
lvalue: &mir::Lvalue<'tcx>, f: F) -> U
where F: FnOnce(&mut Self, LvalueRef<'tcx>) -> U
......@@ -367,10 +431,9 @@ pub fn with_lvalue_ref<F, U>(&mut self, bcx: &Builder<'a, 'tcx>,
LocalRef::Operand(None) => {
let lvalue_ty = self.monomorphized_lvalue_ty(lvalue);
assert!(!lvalue_ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(lvalue_ty, "lvalue_temp");
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(lvalue_ty));
let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "lvalue_temp");
let ret = f(self, lvalue);
let op = self.trans_load(bcx, lvalue.llval, lvalue_ty);
let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty);
self.locals[index] = LocalRef::Operand(Some(op));
ret
}
......@@ -381,7 +444,8 @@ pub fn with_lvalue_ref<F, U>(&mut self, bcx: &Builder<'a, 'tcx>,
if common::type_is_zero_size(bcx.ccx, ty) {
// Pass an undef pointer as no stores can actually occur.
let llptr = C_undef(type_of(bcx.ccx, ty).ptr_to());
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty)))
f(self, LvalueRef::new_sized(llptr, LvalueTy::from_ty(ty),
Alignment::AbiAligned))
} else {
bug!("Lvalue local already set");
}
......
......@@ -38,7 +38,7 @@
pub use self::constant::trans_static_initializer;
use self::analyze::CleanupKind;
use self::lvalue::LvalueRef;
use self::lvalue::{Alignment, LvalueRef};
use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
......@@ -269,8 +269,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
debug!("alloc: {:?} ({}) -> lvalue", local, name);
assert!(!ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(ty, &name.as_str());
let lvalue = LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty));
let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
if dbg {
let (scope, span) = mircx.debug_loc(source_info);
declare_local(&bcx, &mircx.debug_context, name, ty, scope,
......@@ -283,12 +282,12 @@ pub fn trans_mir<'a, 'tcx: 'a>(
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return pointer) -> lvalue", local);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty),
Alignment::AbiAligned))
} else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local);
assert!(!ty.has_erasable_regions());
let lltemp = bcx.alloca_ty(ty, &format!("{:?}", local));
LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)))
LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", local)))
} else {
// If this is an immediate local, we do not create an
// alloca in advance. Instead we wait until we see the
......@@ -388,9 +387,9 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
_ => bug!("spread argument isn't a tuple?!")
};
let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
let lvalue = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let dst = bcx.struct_gep(lltemp, i);
let dst = bcx.struct_gep(lvalue.llval, i);
let arg = &mircx.fn_ty.args[idx];
idx += 1;
if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
......@@ -409,7 +408,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// we can create one debuginfo entry for the argument.
arg_scope.map(|scope| {
let variable_access = VariableAccess::DirectVariable {
alloca: lltemp
alloca: lvalue.llval
};
declare_local(
bcx,
......@@ -422,7 +421,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
);
});
return LocalRef::Lvalue(LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty)));
return LocalRef::Lvalue(lvalue);
}
let arg = &mircx.fn_ty.args[idx];
......@@ -469,21 +468,21 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
};
return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
} else {
let lltemp = bcx.alloca_ty(arg_ty, &format!("arg{}", arg_index));
let lltemp = LvalueRef::alloca(bcx, arg_ty, &format!("arg{}", arg_index));
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
// so make an alloca to store them in.
let meta = &mircx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp));
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp));
arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp.llval));
meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp.llval));
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp);
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp.llval);
}
lltemp
lltemp.llval
};
arg_scope.map(|scope| {
// Is this a regular argument?
......@@ -573,7 +572,8 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
);
}
});
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)))
LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty),
Alignment::AbiAligned))
}).collect()
}
......
......@@ -10,6 +10,7 @@
use llvm::ValueRef;
use rustc::ty::Ty;
use rustc::ty::layout::Layout;
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
......@@ -23,6 +24,7 @@
use std::fmt;
use super::{MirContext, LocalRef};
use super::lvalue::Alignment;
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
......@@ -31,7 +33,7 @@
pub enum OperandValue {
/// A reference to the actual operand. The data is guaranteed
/// to be valid for the operand's lifetime.
Ref(ValueRef),
Ref(ValueRef, Alignment),
/// A single LLVM value.
Immediate(ValueRef),
/// A pair of immediate LLVM values. Used by fat pointers too.
......@@ -58,9 +60,9 @@ pub struct OperandRef<'tcx> {
impl<'tcx> fmt::Debug for OperandRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.val {
OperandValue::Ref(r) => {
write!(f, "OperandRef(Ref({:?}) @ {:?})",
Value(r), self.ty)
OperandValue::Ref(r, align) => {
write!(f, "OperandRef(Ref({:?}, {:?}) @ {:?})",
Value(r), align, self.ty)
}
OperandValue::Immediate(i) => {
write!(f, "OperandRef(Immediate({:?}) @ {:?})",
......@@ -137,27 +139,33 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_load(&mut self,
bcx: &Builder<'a, 'tcx>,
llval: ValueRef,
align: Alignment,
ty: Ty<'tcx>)
-> OperandRef<'tcx>
{
debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, ty);
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
let f_align = match *bcx.ccx.layout_of(ty) {
Layout::Univariant { ref variant, .. } =>
Alignment::from_packed(variant.packed) | align,
_ => align
};
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
let a_ptr = bcx.struct_gep(llval, 0);
let b_ptr = bcx.struct_gep(llval, 1);
OperandValue::Pair(
base::load_ty(bcx, a_ptr, a_ty),
base::load_ty(bcx, b_ptr, b_ty)
base::load_ty(bcx, a_ptr, f_align, a_ty),
base::load_ty(bcx, b_ptr, f_align, b_ty)
)
} else if common::type_is_immediate(bcx.ccx, ty) {
OperandValue::Immediate(base::load_ty(bcx, llval, ty))
OperandValue::Immediate(base::load_ty(bcx, llval, align, ty))
} else {
OperandValue::Ref(llval)
OperandValue::Ref(llval, align)
};
OperandRef { val: val, ty: ty }
......@@ -212,7 +220,7 @@ pub fn trans_consume(&mut self,
// out from their home
let tr_lvalue = self.trans_lvalue(bcx, lvalue);
let ty = tr_lvalue.ty.to_ty(bcx.tcx());
self.trans_load(bcx, tr_lvalue.llval, ty)
self.trans_load(bcx, tr_lvalue.llval, tr_lvalue.alignment, ty)
}
pub fn trans_operand(&mut self,
......@@ -230,9 +238,9 @@ pub fn trans_operand(&mut self,
mir::Operand::Constant(ref constant) => {
let val = self.trans_constant(bcx, constant);
let operand = val.to_operand(bcx.ccx);
if let OperandValue::Ref(ptr) = operand.val {
if let OperandValue::Ref(ptr, align) = operand.val {
// If this is a OperandValue::Ref to an immediate constant, load it.
self.trans_load(bcx, ptr, operand.ty)
self.trans_load(bcx, ptr, align, operand.ty)
} else {
operand
}
......@@ -243,8 +251,8 @@ pub fn trans_operand(&mut self,
pub fn store_operand(&mut self,
bcx: &Builder<'a, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>,
align: Option<u32>) {
align: Option<u32>,
operand: OperandRef<'tcx>) {
debug!("store_operand: operand={:?}, align={:?}", operand, align);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
......@@ -252,7 +260,10 @@ pub fn store_operand(&mut self,
return;
}
match operand.val {
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty, align),
OperandValue::Ref(r, Alignment::Packed) =>
base::memcpy_ty(bcx, lldest, r, operand.ty, Some(1)),
OperandValue::Ref(r, Alignment::AbiAligned) =>
base::memcpy_ty(bcx, lldest, r, operand.ty, align),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
......
......@@ -33,7 +33,7 @@
use super::MirContext;
use super::constant::const_scalar_checked_binop;
use super::operand::{OperandRef, OperandValue};
use super::lvalue::{LvalueRef};
use super::lvalue::LvalueRef;
impl<'a, 'tcx> MirContext<'a, 'tcx> {
pub fn trans_rvalue(&mut self,
......@@ -50,7 +50,7 @@ pub fn trans_rvalue(&mut self,
let tr_operand = self.trans_operand(&bcx, operand);
// FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
self.store_operand(&bcx, dest.llval, tr_operand, None);
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
bcx
}
......@@ -61,7 +61,7 @@ pub fn trans_rvalue(&mut self,
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, temp, None);
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
return bcx;
}
......@@ -81,13 +81,15 @@ pub fn trans_rvalue(&mut self,
// index into the struct, and this case isn't
// important enough for it.
debug!("trans_rvalue: creating ugly alloca");
let lltemp = bcx.alloca_ty(operand.ty, "__unsize_temp");
base::store_ty(&bcx, llval, lltemp, operand.ty);
lltemp
let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
scratch
}
OperandValue::Ref(llref, align) => {
LvalueRef::new_sized_ty(llref, operand.ty, align)
}
OperandValue::Ref(llref) => llref
};
base::coerce_unsized_into(&bcx, llref, operand.ty, dest.llval, cast_ty);
base::coerce_unsized_into(&bcx, &llref, &dest);
bcx
}
......@@ -97,7 +99,7 @@ pub fn trans_rvalue(&mut self,
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
self.store_operand(bcx, llslot, tr_elem, None);
self.store_operand(bcx, llslot, dest.alignment.to_align(), tr_elem);
})
}
......@@ -111,15 +113,15 @@ pub fn trans_rvalue(&mut self,
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx, op.ty) {
let mut val = LvalueRef::new_sized(dest.llval, dest.ty);
let mut val = LvalueRef::new_sized(dest.llval, dest.ty, dest.alignment);
let field_index = active_field_index.unwrap_or(i);
val.ty = LvalueTy::Downcast {
adt_def: adt_def,
substs: self.monomorphize(&substs),
variant_index: disr.0 as usize,
};
let lldest_i = val.trans_field_ptr(&bcx, field_index);
self.store_operand(&bcx, lldest_i, op, None);
let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
self.store_operand(&bcx, lldest_i, align.to_align(), op);
}
}
},
......@@ -131,6 +133,7 @@ pub fn trans_rvalue(&mut self,
} else {
None
};
let alignment = dest.alignment;
for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields.
......@@ -144,7 +147,7 @@ pub fn trans_rvalue(&mut self,
i
};
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op, None);
self.store_operand(&bcx, dest, alignment.to_align(), op);
}
}
}
......@@ -169,7 +172,7 @@ pub fn trans_rvalue(&mut self,
_ => {
assert!(rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, temp, None);
self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
bcx
}
}
......@@ -228,7 +231,7 @@ pub fn trans_rvalue_operand(&mut self,
operand.ty, cast_ty);
OperandValue::Pair(lldata, llextra)
}
OperandValue::Ref(_) => {
OperandValue::Ref(..) => {
bug!("by-ref operand {:?} in trans_rvalue_operand",
operand);
}
......
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C no-prepopulate-passes
#![crate_type = "lib"]
#[repr(packed)]
pub struct Packed {
dealign: u8,
data: u32
}
// CHECK-LABEL: @write_pkd
#[no_mangle]
pub fn write_pkd(pkd: &mut Packed) -> u32 {
// CHECK: %{{.*}} = load i32, i32* %{{.*}}, align 1
// CHECK: store i32 42, i32* %{{.*}}, align 1
let result = pkd.data;
pkd.data = 42;
result
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册