提交 d4ca1cff 编写于 作者: B Björn Steinbrink

Fix volatile / atomic ops on bools and small aggregates

Boolean values and small aggregates have a different type in
args/allocas than in SSA values but the intrinsics for volatile and
atomic ops were missing the necessary casts to handle that.

Fixes #23550
上级 ecdf792d
......@@ -983,56 +983,72 @@ pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
/// gives us better information about what we are loading.
pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
if type_is_zero_size(cx.ccx(), t) {
C_undef(type_of::type_of(cx.ccx(), t))
} else if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate LLVM type
// for this leads to bad optimizations, so its arg type is an appropriately sized integer
// and we have to convert it
Load(cx, BitCast(cx, ptr, type_of::arg_type_of(cx.ccx(), t).ptr_to()))
} else {
unsafe {
let global = llvm::LLVMIsAGlobalVariable(ptr);
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
let val = llvm::LLVMGetInitializer(global);
if !val.is_null() {
// This could go into its own function, for DRY.
// (something like "pre-store packing/post-load unpacking")
if ty::type_is_bool(t) {
return Trunc(cx, val, Type::i1(cx.ccx()));
} else {
return val;
}
}
if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) {
return C_undef(type_of::type_of(cx.ccx(), t));
}
let ptr = to_arg_ty_ptr(cx, ptr, t);
if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
return Load(cx, ptr);
}
unsafe {
let global = llvm::LLVMIsAGlobalVariable(ptr);
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
let val = llvm::LLVMGetInitializer(global);
if !val.is_null() {
return from_arg_ty(cx, val, t);
}
}
if ty::type_is_bool(t) {
Trunc(cx, LoadRangeAssert(cx, ptr, 0, 2, llvm::False), Type::i1(cx.ccx()))
} else if ty::type_is_char(t) {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
} else if (ty::type_is_region_ptr(t) || ty::type_is_unique(t))
&& !common::type_is_fat_ptr(cx.tcx(), t) {
LoadNonNull(cx, ptr)
} else {
Load(cx, ptr)
}
}
let val = if ty::type_is_bool(t) {
LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
} else if ty::type_is_char(t) {
// a char is a Unicode codepoint, and so takes values from 0
// to 0x10FFFF inclusive only.
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
} else if (ty::type_is_region_ptr(t) || ty::type_is_unique(t))
&& !common::type_is_fat_ptr(cx.tcx(), t) {
LoadNonNull(cx, ptr)
} else {
Load(cx, ptr)
};
from_arg_ty(cx, val, t)
}
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
/// differs from the type used for SSA values.
pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
if ty::type_is_bool(t) {
Store(cx, ZExt(cx, v, Type::i8(cx.ccx())), dst);
} else if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
Store(cx, to_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
}
pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
if ty::type_is_bool(ty) {
ZExt(bcx, val, Type::i8(bcx.ccx()))
} else {
val
}
}
pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
if ty::type_is_bool(ty) {
Trunc(bcx, val, Type::i1(bcx.ccx()))
} else {
val
}
}
pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef {
if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate LLVM type
// for this leads to bad optimizations, so its arg type is an appropriately sized integer
// and we have to convert it
Store(cx, v, BitCast(cx, dst, type_of::arg_type_of(cx.ccx(), t).ptr_to()));
BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to())
} else {
Store(cx, v, dst);
ptr
}
}
......
......@@ -446,10 +446,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
call_debug_location)
}
(_, "volatile_load") => {
VolatileLoad(bcx, llargs[0])
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
from_arg_ty(bcx, VolatileLoad(bcx, ptr), tp_ty)
},
(_, "volatile_store") => {
VolatileStore(bcx, llargs[1], llargs[0]);
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let val = to_arg_ty(bcx, llargs[1], tp_ty);
VolatileStore(bcx, val, ptr);
C_nil(ccx)
},
......@@ -709,8 +714,11 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
llvm::SequentiallyConsistent
};
let res = AtomicCmpXchg(bcx, llargs[0], llargs[1],
llargs[2], order,
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let cmp = to_arg_ty(bcx, llargs[1], tp_ty);
let src = to_arg_ty(bcx, llargs[2], tp_ty);
let res = AtomicCmpXchg(bcx, ptr, cmp, src, order,
strongest_failure_ordering);
if unsafe { llvm::LLVMVersionMinor() >= 5 } {
ExtractValue(bcx, res, 0)
......@@ -720,10 +728,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}
"load" => {
AtomicLoad(bcx, llargs[0], order)
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
from_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
}
"store" => {
AtomicStore(bcx, llargs[1], llargs[0], order);
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let val = to_arg_ty(bcx, llargs[1], tp_ty);
AtomicStore(bcx, val, ptr, order);
C_nil(ccx)
}
......@@ -749,7 +762,10 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
_ => ccx.sess().fatal("unknown atomic operation")
};
AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty);
let val = to_arg_ty(bcx, llargs[1], tp_ty);
AtomicRMW(bcx, atom_op, ptr, val, order)
}
}
......
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(core)]
#![allow(warnings)]
use std::intrinsics;
#[derive(Copy)]
struct Wrap(i64);
// These volatile and atomic intrinsics used to cause an ICE
unsafe fn test_bool(p: &mut bool, v: bool) {
intrinsics::volatile_load(p);
intrinsics::volatile_store(p, v);
intrinsics::atomic_load(p);
intrinsics::atomic_cxchg(p, v, v);
intrinsics::atomic_store(p, v);
intrinsics::atomic_xchg(p, v);
}
unsafe fn test_immediate_fca(p: &mut Wrap, v: Wrap) {
intrinsics::volatile_load(p);
intrinsics::volatile_store(p, v);
intrinsics::atomic_load(p);
intrinsics::atomic_cxchg(p, v, v);
intrinsics::atomic_store(p, v);
intrinsics::atomic_xchg(p, v);
}
fn main() {}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册