提交 71a11a0b 编写于 作者: B Björn Steinbrink

Fix transmute::<T, U> where T requires a bigger alignment than U

For transmute::<T, U> we simply pointercast the destination from a U
pointer to a T pointer, without providing any alignment information,
thus LLVM assumes that the destination is aligned to hold a value of
type T, which is not necessarily true. This can lead to LLVM emitting
machine instructions that assume said alignment, and thus cause aborts.

To fix this, we need to provide the actual alignment to store_operand()
and in turn to store() so they can set the proper alignment information
on the stores and LLVM can emit the proper machine instructions.

Fixes #32947
上级 82801b55
......@@ -248,11 +248,8 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst, ty.ptr_to());
let store = bcx.store(val, cast_dst);
let llalign = llalign_of_min(ccx, self.ty);
unsafe {
llvm::LLVMSetAlignment(store, llalign);
}
bcx.store(val, cast_dst, Some(llalign));
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
......@@ -273,7 +270,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value...
bcx.store(val, llscratch);
bcx.store(val, llscratch, None);
// ...and then memcpy it to the intended destination.
base::call_memcpy(bcx,
......@@ -289,7 +286,7 @@ pub fn store(&self, bcx: &BlockAndBuilder, mut val: ValueRef, dst: ValueRef) {
if self.original_ty == Type::i1(ccx) {
val = bcx.zext(val, Type::i8(ccx));
}
bcx.store(val, dst);
bcx.store(val, dst, None);
}
}
......
......@@ -443,11 +443,11 @@ pub fn trans_set_discr<'a, 'tcx>(
layout::CEnum{ discr, min, max, .. } => {
assert_discr_in_range(Disr(min), Disr(max), to);
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
val);
val, None);
}
layout::General{ discr, .. } => {
bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to.0, true),
bcx.struct_gep(val, 0));
bcx.struct_gep(val, 0), None);
}
layout::Univariant { .. }
| layout::UntaggedUnion { .. }
......@@ -458,7 +458,7 @@ pub fn trans_set_discr<'a, 'tcx>(
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
if to.0 != nndiscr {
let llptrty = type_of::sizing_type_of(bcx.ccx, nnty);
bcx.store(C_null(llptrty), val);
bcx.store(C_null(llptrty), val, None);
}
}
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
......@@ -476,7 +476,7 @@ pub fn trans_set_discr<'a, 'tcx>(
let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
let llptrptr = bcx.gepi(val, &path[..]);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr);
bcx.store(C_null(llptrty), llptrptr, None);
}
}
}
......
......@@ -105,7 +105,7 @@ pub fn trans_inline_asm<'a, 'tcx>(
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &(val, _))) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
bcx.store(v, val);
bcx.store(v, val, None);
}
// Store expn_id in a metadata node so we can map LLVM errors
......
......@@ -290,7 +290,7 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i);
let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty);
memcpy_ty(bcx, dst_f, src_f, src_fty, None);
} else {
coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
}
......@@ -429,7 +429,7 @@ pub fn store_ty<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>, v: ValueRef, dst: Valu
let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
store_fat_ptr(cx, lladdr, llextra, dst, t);
} else {
cx.store(from_immediate(cx, v), dst);
cx.store(from_immediate(cx, v), dst, None);
}
}
......@@ -439,8 +439,8 @@ pub fn store_fat_ptr<'a, 'tcx>(cx: &BlockAndBuilder<'a, 'tcx>,
dst: ValueRef,
_ty: Ty<'tcx>) {
// FIXME: emit metadata
cx.store(data, get_dataptr(cx, dst));
cx.store(extra, get_meta(cx, dst));
cx.store(data, get_dataptr(cx, dst), None);
cx.store(extra, get_meta(cx, dst), None);
}
pub fn load_fat_ptr<'a, 'tcx>(
......@@ -523,26 +523,21 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>,
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
}
pub fn memcpy_ty<'a, 'tcx>(
bcx: &BlockAndBuilder<'a, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>
) {
pub fn memcpy_ty<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
dst: ValueRef,
src: ValueRef,
t: Ty<'tcx>,
align: Option<u32>) {
let ccx = bcx.ccx;
if type_is_zero_size(ccx, t) {
return;
}
if t.is_structural() {
let llty = type_of::type_of(ccx, t);
let llsz = llsize_of(ccx, llty);
let llalign = type_of::align_of(ccx, t);
call_memcpy(bcx, dst, src, llsz, llalign as u32);
} else if common::type_is_fat_ptr(bcx.ccx, t) {
let (data, extra) = load_fat_ptr(bcx, src, t);
store_fat_ptr(bcx, data, extra, dst, t);
} else {
store_ty(bcx, load_ty(bcx, src, t), dst, t);
}
let llty = type_of::type_of(ccx, t);
let llsz = llsize_of(ccx, llty);
let llalign = align.unwrap_or_else(|| type_of::align_of(ccx, t));
call_memcpy(bcx, dst, src, llsz, llalign as u32);
}
pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
......
......@@ -512,13 +512,17 @@ pub fn load_nonnull(&self, ptr: ValueRef) -> ValueRef {
value
}
pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<u32>) -> ValueRef {
debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
assert!(!self.llbuilder.is_null());
self.count_insn("store");
let ptr = self.check_store(val, ptr);
unsafe {
llvm::LLVMBuildStore(self.llbuilder, val, ptr)
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
if let Some(align) = align {
llvm::LLVMSetAlignment(store, align as c_uint);
}
store
}
}
......
......@@ -288,8 +288,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let val = bcx.call(llfn, &[llargs[0], llargs[1]], None);
let result = bcx.extract_value(val, 0);
let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx));
bcx.store(result, bcx.struct_gep(llresult, 0));
bcx.store(overflow, bcx.struct_gep(llresult, 1));
bcx.store(result, bcx.struct_gep(llresult, 0), None);
bcx.store(overflow, bcx.struct_gep(llresult, 1), None);
C_nil(bcx.ccx)
},
......@@ -407,8 +407,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
failorder, weak);
let result = bcx.extract_value(val, 0);
let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx));
bcx.store(result, bcx.struct_gep(llresult, 0));
bcx.store(success, bcx.struct_gep(llresult, 1));
bcx.store(result, bcx.struct_gep(llresult, 0), None);
bcx.store(success, bcx.struct_gep(llresult, 1), None);
} else {
invalid_monomorphization(sty);
}
......@@ -613,7 +613,7 @@ fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
for i in 0..elems.len() {
let val = bcx.extract_value(val, i);
bcx.store(val, bcx.struct_gep(llresult, i));
bcx.store(val, bcx.struct_gep(llresult, i), None);
}
C_nil(ccx)
}
......@@ -625,10 +625,7 @@ fn modify_as_needed<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
if let Some(ty) = fn_ty.ret.cast {
let ptr = bcx.pointercast(llresult, ty.ptr_to());
let store = bcx.store(llval, ptr);
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
}
bcx.store(llval, ptr, Some(type_of::align_of(ccx, ret_ty)));
} else {
store_ty(bcx, llval, llresult, ret_ty);
}
......@@ -695,7 +692,7 @@ fn try_intrinsic<'a, 'tcx>(
) {
if bcx.sess().no_landing_pads() {
bcx.call(func, &[data], None);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest);
bcx.store(C_null(Type::i8p(&bcx.ccx)), dest, None);
} else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, func, data, local_ptr, dest);
} else {
......@@ -789,8 +786,8 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let val1 = C_i32(ccx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]));
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]));
catchpad.store(arg1, local_ptr, None);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), None);
catchpad.catch_ret(tok, caught.llbb());
caught.ret(C_i32(ccx, 1));
......@@ -799,7 +796,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest);
bcx.store(ret, dest, None);
}
// Definition of the standard "try" function for Rust using the GNU-like model
......@@ -858,14 +855,14 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
let vals = catch.landing_pad(lpad_ty, bcx.ccx.eh_personality(), 1, catch.fcx().llfn);
catch.add_clause(vals, C_null(Type::i8p(ccx)));
let ptr = catch.extract_value(vals, 0);
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()));
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(ccx).ptr_to()), None);
catch.ret(C_i32(ccx, 1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bcx.call(llfn, &[func, data, local_ptr], None);
bcx.store(ret, dest);
bcx.store(ret, dest, None);
}
// Helper function to give a Block to a closure to translate a shim function.
......
......@@ -23,7 +23,7 @@
use Disr;
use machine::{llalign_of_min, llbitsize_of_real};
use meth;
use type_of;
use type_of::{self, align_of};
use glue;
use type_::Type;
......@@ -31,6 +31,8 @@
use rustc_data_structures::fx::FxHashMap;
use syntax::symbol::Symbol;
use std::cmp;
use super::{MirContext, LocalRef};
use super::analyze::CleanupKind;
use super::constant::Const;
......@@ -207,7 +209,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let llscratch = bcx.fcx().alloca(ret.original_ty, "ret");
self.store_operand(&bcx, llscratch, op);
self.store_operand(&bcx, llscratch, op, None);
llscratch
}
Ref(llval) => llval
......@@ -426,7 +428,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
// The first argument is a thin destination pointer.
let llptr = self.trans_operand(&bcx, &args[0]).immediate();
let val = self.trans_operand(&bcx, &args[1]);
self.store_operand(&bcx, llptr, val);
self.store_operand(&bcx, llptr, val, None);
funclet_br(self, bcx, target);
return;
}
......@@ -659,7 +661,7 @@ fn trans_argument(&mut self,
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
let llscratch = bcx.fcx().alloca(arg.original_ty, "arg");
self.store_operand(bcx, llscratch, op);
self.store_operand(bcx, llscratch, op, None);
(llscratch, true)
} else {
(op.pack_if_pair(bcx).immediate(), false)
......@@ -801,7 +803,7 @@ fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef {
let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot);
bcx.store(llretval, slot, None);
bcx.br(target.llbb());
bcx.llbb()
}
......@@ -886,7 +888,10 @@ fn trans_transmute(&mut self, bcx: &BlockAndBuilder<'a, 'tcx>,
let llty = type_of::type_of(bcx.ccx, val.ty);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
self.store_operand(bcx, cast_ptr, val);
let in_type = val.ty;
let out_type = dst.ty.to_ty(bcx.tcx());;
let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type));
self.store_operand(bcx, cast_ptr, val, Some(llalign));
}
......
......@@ -514,7 +514,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &BlockAndBuilder<'a, 'tcx>,
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
let alloc = bcx.fcx().alloca(common::val_ty(llval), "__debuginfo_env_ptr");
bcx.store(llval, alloc);
bcx.store(llval, alloc, None);
alloc
} else {
llval
......
......@@ -244,21 +244,24 @@ pub fn trans_operand(&mut self,
pub fn store_operand(&mut self,
bcx: &BlockAndBuilder<'a, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>) {
debug!("store_operand: operand={:?}", operand);
operand: OperandRef<'tcx>,
align: Option<u32>) {
debug!("store_operand: operand={:?}, align={:?}", operand, align);
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`, and store itself is useless.
if common::type_is_zero_size(bcx.ccx, operand.ty) {
return;
}
match operand.val {
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty),
OperandValue::Immediate(s) => base::store_ty(bcx, s, lldest, operand.ty),
OperandValue::Ref(r) => base::memcpy_ty(bcx, lldest, r, operand.ty, align),
OperandValue::Immediate(s) => {
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
OperandValue::Pair(a, b) => {
let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b);
bcx.store(a, bcx.struct_gep(lldest, 0));
bcx.store(b, bcx.struct_gep(lldest, 1));
bcx.store(a, bcx.struct_gep(lldest, 0), align);
bcx.store(b, bcx.struct_gep(lldest, 1), align);
}
}
}
......
......@@ -48,7 +48,7 @@ pub fn trans_rvalue(&mut self,
let tr_operand = self.trans_operand(&bcx, operand);
// FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
self.store_operand(&bcx, dest.llval, tr_operand);
self.store_operand(&bcx, dest.llval, tr_operand, None);
bcx
}
......@@ -59,7 +59,7 @@ pub fn trans_rvalue(&mut self,
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, temp);
self.store_operand(&bcx, dest.llval, temp, None);
return bcx;
}
......@@ -95,7 +95,7 @@ pub fn trans_rvalue(&mut self,
let size = C_uint(bcx.ccx, size);
let base = base::get_dataptr(&bcx, dest.llval);
tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot| {
self.store_operand(bcx, llslot, tr_elem);
self.store_operand(bcx, llslot, tr_elem, None);
})
}
......@@ -113,7 +113,7 @@ pub fn trans_rvalue(&mut self,
let field_index = active_field_index.unwrap_or(i);
let lldest_i = adt::trans_field_ptr(&bcx, dest_ty, val, disr,
field_index);
self.store_operand(&bcx, lldest_i, op);
self.store_operand(&bcx, lldest_i, op, None);
}
}
},
......@@ -138,7 +138,7 @@ pub fn trans_rvalue(&mut self,
i
};
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op);
self.store_operand(&bcx, dest, op, None);
}
}
}
......@@ -163,7 +163,7 @@ pub fn trans_rvalue(&mut self,
_ => {
assert!(rvalue_creates_operand(rvalue));
let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
self.store_operand(&bcx, dest.llval, temp);
self.store_operand(&bcx, dest.llval, temp, None);
bcx
}
}
......
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(repr_simd, test)]
extern crate test;
#[repr(simd)]
pub struct Mu64(pub u64, pub u64, pub u64, pub u64);
fn main() {
// This ensures an unaligned pointer even in optimized builds, though LLVM
// gets enough type information to actually not mess things up in that case,
// but at the time of writing this, it's enough to trigger the bug in
// non-optimized builds
unsafe {
let memory = &mut [0u64; 8] as *mut _ as *mut u8;
let misaligned_ptr: &mut [u8; 32] = {
std::mem::transmute(memory.offset(1))
};
*misaligned_ptr = std::mem::transmute(Mu64(1, 1, 1, 1));
test::black_box(memory);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册