提交 cbe31a42 编写于 作者: D Denis Merigoux 提交者: Eduard-Mihai Burtescu

Generalized base::coerce_unsized_into

上级 78dd95f4
......@@ -35,13 +35,13 @@ pub fn codegen_inline_asm(
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() {
for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(place.load(bx).immediate());
inputs.push(bx.load_operand(place).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(place.load(bx).immediate());
indirect_outputs.push(bx.load_operand(place).immediate());
} else {
output_types.push(place.layout.llvm_type(bx.cx()));
}
......
......@@ -65,7 +65,7 @@
use rustc_codegen_utils::symbol_names_test;
use time_graph;
use mono_item::{MonoItem, MonoItemExt};
use type_of::LayoutLlvmExt;
use rustc::util::nodemap::FxHashMap;
use CrateInfo;
use rustc_data_structures::small_c_str::SmallCStr;
......@@ -208,7 +208,7 @@ pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
.field(cx, abi::FAT_PTR_EXTRA);
cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()),
cx.backend_type(&vtable_ptr))
cx.backend_type(vtable_ptr))
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
......@@ -232,13 +232,13 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b)));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b)));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
......@@ -263,8 +263,8 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
}
let (lldata, llextra) = result.unwrap();
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(&dst_layout, 0, true)),
bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(&dst_layout, 1, true)))
(bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)))
}
_ => bug!("unsize_thin_ptr: called on bad types"),
}
......@@ -272,22 +272,22 @@ pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into(
bx: &Builder<'a, 'll, 'tcx>,
src: PlaceRef<'tcx, &'ll Value>,
dst: PlaceRef<'tcx, &'ll Value>
) {
pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
src: PlaceRef<'tcx, Bx::Value>,
dst: PlaceRef<'tcx, Bx::Value>
) {
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
let coerce_ptr = || {
let (base, info) = match src.load(bx).val {
let (base, info) = match bx.load_operand(src).val {
OperandValue::Pair(base, info) => {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR);
(bx.pointercast(base, thin_ptr.llvm_type(bx.cx())), info)
(bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
}
OperandValue::Immediate(base) => {
unsize_thin_ptr(bx, base, src_ty, dst_ty)
......
......@@ -13,15 +13,18 @@
use common::{self, *};
use context::CodegenCx;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{Align, Size, TyLayout};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::session::{config, Session};
use rustc_data_structures::small_c_str::SmallCStr;
use interfaces::*;
use syntax;
use base;
use mir::operand::{OperandValue, OperandRef};
use mir::place::PlaceRef;
use std::borrow::Cow;
use std::ops::Range;
use std::ptr;
......@@ -538,6 +541,73 @@ fn atomic_load(
}
}
fn load_operand(
&self,
place: PlaceRef<'tcx, &'ll Value>
) -> OperandRef<'tcx, &'ll Value> {
debug!("PlaceRef::load: {:?}", place);
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() {
return OperandRef::new_zst(self.cx(), place.layout);
}
let scalar_load_metadata = |load, scalar: &layout::Scalar| {
let vr = scalar.valid_range.clone();
match scalar.value {
layout::Int(..) => {
let range = scalar.valid_range_exclusive(self.cx());
if range.start != range.end {
self.range_metadata(load, range);
}
}
layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
self.nonnull_metadata(load);
}
_ => {}
}
};
let val = if let Some(llextra) = place.llextra {
OperandValue::Ref(place.llval, Some(llextra), place.align)
} else if place.layout.is_llvm_immediate() {
let mut const_llval = None;
unsafe {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
}
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.llval, place.align);
if let layout::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(load, scalar);
}
load
});
OperandValue::Immediate(base::to_immediate(self, llval, place.layout))
} else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let load = |i, scalar: &layout::Scalar| {
let llptr = self.struct_gep(place.llval, i as u64);
let load = self.load(llptr, place.align);
scalar_load_metadata(load, scalar);
if scalar.is_bool() {
self.trunc(load, self.cx().type_i1())
} else {
load
}
};
OperandValue::Pair(load(0, a), load(1, b))
} else {
OperandValue::Ref(place.llval, None, place.align)
};
OperandRef { val, layout: place.layout }
}
fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
if self.sess().target.target.arch == "amdgpu" {
......
......@@ -14,23 +14,21 @@
use std;
use builder::Builder;
use common::*;
use meth;
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use rustc::ty::layout::LayoutOf;
use rustc::ty::{self, Ty};
use value::Value;
use interfaces::*;
pub fn size_and_align_of_dst(
bx: &Builder<'_, 'll, 'tcx>,
pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
t: Ty<'tcx>,
info: Option<&'ll Value>
) -> (&'ll Value, &'ll Value) {
info: Option<Bx::Value>
) -> (Bx::Value, Bx::Value) {
debug!("calculate size of DST: {}; with lost info: {:?}",
t, info);
if bx.cx().type_is_sized(t) {
let (size, align) = bx.cx().size_and_align_of(t);
let (size, align) = bx.cx().layout_of(t).size_and_align();
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
t, info, size, align);
let size = bx.cx().const_usize(size.bytes());
......@@ -47,7 +45,7 @@ pub fn size_and_align_of_dst(
let unit = t.sequence_element_type(bx.tcx());
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bx.cx().size_and_align_of(unit);
let (size, align) = bx.cx().layout_of(unit).size_and_align();
(bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())),
bx.cx().const_usize(align.abi()))
}
......
......@@ -10,12 +10,13 @@
use rustc::ty::layout::{HasTyCtxt, LayoutOf, TyLayout};
use rustc::ty::Ty;
use std::fmt::Debug;
use super::CodegenObject;
pub trait BackendTypes {
type Value: Debug + PartialEq + Copy;
type Value: CodegenObject;
type BasicBlock;
type Type: Debug + PartialEq + Copy;
type Type: CodegenObject;
type Context;
}
......
......@@ -12,6 +12,8 @@
use builder::MemFlags;
use common::*;
use libc::c_char;
use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::session::Session;
use rustc::ty::layout::{Align, Size};
......@@ -88,6 +90,7 @@ fn array_alloca(
fn load(&self, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&self, ptr: Self::Value) -> Self::Value;
fn atomic_load(&self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
fn load_operand(&self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>;
fn range_metadata(&self, load: Self::Value, range: Range<u128>);
fn nonnull_metadata(&self, load: Self::Value);
......
......@@ -26,6 +26,8 @@
pub use self::statics::StaticMethods;
pub use self::type_::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods};
use std::fmt;
pub trait CodegenMethods<'tcx>:
Backend<'tcx>
+ TypeMethods<'tcx>
......@@ -54,3 +56,6 @@ pub trait HasCodegen<'tcx>: Backend<'tcx> {
Context = Self::Context,
>;
}
pub trait CodegenObject: Copy + PartialEq + fmt::Debug {}
impl<T: Copy + PartialEq + fmt::Debug> CodegenObject for T {}
......@@ -69,12 +69,14 @@ pub trait DerivedTypeMethods<'tcx>: Backend<'tcx> {
}
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type;
fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
fn scalar_pair_element_backend_type<'a>(
&self,
ty: &TyLayout<'tcx>,
layout: TyLayout<'tcx>,
index: usize,
immediate: bool
immediate: bool,
) -> Self::Type;
}
......
......@@ -154,7 +154,7 @@ fn codegen_intrinsic_call(
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (llsize, _) =
glue::size_and_align_of_dst(&self, tp_ty, Some(meta));
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llsize
} else {
cx.const_usize(cx.size_of(tp_ty).bytes())
......@@ -168,7 +168,7 @@ fn codegen_intrinsic_call(
let tp_ty = substs.type_at(0);
if let OperandValue::Pair(_, meta) = args[0].val {
let (_, llalign) =
glue::size_and_align_of_dst(&self, tp_ty, Some(meta));
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llalign
} else {
cx.const_usize(cx.align_of(tp_ty).abi())
......@@ -353,9 +353,9 @@ fn codegen_intrinsic_call(
cx.type_bool()
);
let dest = result.project_field(&self, 0);
let dest = result.project_field(self, 0);
self.store(val, dest.llval, dest.align);
let dest = result.project_field(&self, 1);
let dest = result.project_field(self, 1);
self.store(overflow, dest.llval, dest.align);
return;
......@@ -520,9 +520,9 @@ fn codegen_intrinsic_call(
cx.type_bool()
);
let dest = result.project_field(&self, 0);
let dest = result.project_field(self, 0);
self.store(val, dest.llval, dest.align);
let dest = result.project_field(&self, 1);
let dest = result.project_field(self, 1);
self.store(success, dest.llval, dest.align);
return;
} else {
......@@ -678,7 +678,7 @@ fn modify_as_needed<'ll, 'tcx>(
};
let arg = PlaceRef::new_sized(ptr, arg.layout, align);
(0..contents.len()).map(|i| {
arg.project_field(bx, i).load(bx).immediate()
bx.load_operand(arg.project_field(bx, i)).immediate()
}).collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
......@@ -729,7 +729,7 @@ fn modify_as_needed<'ll, 'tcx>(
assert!(!flatten);
for i in 0..elems.len() {
let dest = result.project_field(&self, i);
let dest = result.project_field(self, i);
let val = self.extract_value(val, i as u64);
self.store(val, dest.llval, dest.align);
}
......@@ -746,7 +746,7 @@ fn modify_as_needed<'ll, 'tcx>(
self.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(&self, llval, result.layout)
.val.store(&self, result);
.val.store(self, result);
}
}
}
......
......@@ -26,7 +26,7 @@
pub const SIZE: VirtualIndex = VirtualIndex(1);
pub const ALIGN: VirtualIndex = VirtualIndex(2);
impl<'a, 'tcx> VirtualIndex {
impl<'a, 'tcx: 'a> VirtualIndex {
pub fn from_index(index: usize) -> Self {
VirtualIndex(index as u64 + 3)
}
......@@ -52,11 +52,11 @@ pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>,
ptr
}
pub fn get_usize(
pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
llvtable: &'ll Value
) -> &'ll Value {
bx: &Bx,
llvtable: Bx::Value
) -> Bx::Value {
// Load the data pointer from the object.
debug!("get_int({:?}, {:?})", llvtable, self);
......
......@@ -165,8 +165,8 @@ fn codegen_terminator(&mut self,
bx.cleanup_ret(cleanup_pad, None);
} else {
let slot = self.get_personality_slot(&bx);
let lp0 = slot.project_field(&bx, 0).load(&bx).immediate();
let lp1 = slot.project_field(&bx, 1).load(&bx).immediate();
let lp0 = bx.load_operand(slot.project_field(&bx, 0)).immediate();
let lp1 = bx.load_operand(slot.project_field(&bx, 1)).immediate();
slot.storage_dead(&bx);
if !bx.sess().target.target.options.custom_unwind_resume {
......@@ -835,7 +835,7 @@ fn codegen_arguments_untupled(&mut self,
let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bx, i);
self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]);
self.codegen_argument(bx, bx.load_operand(field_ptr), llargs, &args[i]);
}
} else if let Ref(_, Some(_), _) = tuple.val {
bug!("closure arguments must be sized")
......@@ -994,7 +994,7 @@ fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>,
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
place.storage_live(bx);
self.codegen_transmute_into(bx, src, place);
let op = place.load(bx);
let op = bx.load_operand(place);
place.storage_dead(bx);
self.locals[index] = LocalRef::Operand(Some(op));
}
......@@ -1032,7 +1032,7 @@ fn store_return(&mut self,
Nothing => (),
Store(dst) => ret_ty.store(bx, llval, dst),
IndirectOperand(tmp, index) => {
let op = tmp.load(bx);
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
self.locals[index] = LocalRef::Operand(Some(op));
}
......@@ -1042,7 +1042,7 @@ fn store_return(&mut self,
let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
tmp.storage_live(bx);
ret_ty.store(bx, llval, tmp);
let op = tmp.load(bx);
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
op
} else {
......
......@@ -567,7 +567,7 @@ fn arg_local_refs(
let indirect_operand = OperandValue::Pair(llarg, llextra);
let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name);
indirect_operand.store(&bx, tmp);
indirect_operand.store(bx, tmp);
tmp
} else {
let tmp = PlaceRef::alloca(bx, arg.layout, &name);
......
......@@ -20,7 +20,7 @@
use type_of::LayoutLlvmExt;
use glue;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, IntrinsicDeclarationMethods};
use interfaces::*;
use std::fmt;
......@@ -67,16 +67,20 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
}
}
impl OperandRef<'tcx, &'ll Value> {
pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>,
layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> {
impl<'tcx, V: CodegenObject> OperandRef<'tcx, V> {
pub fn new_zst<Cx: CodegenMethods<'tcx, Value = V>>(
cx: &Cx,
layout: TyLayout<'tcx>
) -> OperandRef<'tcx, V> {
assert!(layout.is_zst());
OperandRef {
val: OperandValue::Immediate(cx.const_undef(layout.immediate_llvm_type(cx))),
val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))),
layout
}
}
}
impl OperandRef<'tcx, &'ll Value> {
pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
val: &'tcx ty::Const<'tcx>)
-> Result<OperandRef<'tcx, &'ll Value>, ErrorHandled> {
......@@ -122,7 +126,7 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
OperandValue::Pair(a_llval, b_llval)
},
ConstValue::ByRef(_, alloc, offset) => {
return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx));
return Ok(bx.load_operand(PlaceRef::from_const_alloc(bx, layout, alloc, offset)));
},
};
......@@ -256,10 +260,17 @@ pub fn extract_field(
}
}
impl OperandValue<&'ll Value> {
pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'tcx, &'ll Value>) {
impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx,
dest: PlaceRef<'tcx, Bx::Value>
) {
self.store_with_flags(bx, dest, MemFlags::empty());
}
}
impl OperandValue<&'ll Value> {
pub fn volatile_store(
self,
......@@ -286,11 +297,13 @@ pub fn nontemporal_store(
) {
self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
}
}
fn store_with_flags(
impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx, &'ll Value>,
dest: PlaceRef<'tcx, &'ll Value>,
bx: &Bx,
dest: PlaceRef<'tcx, Bx::Value>,
flags: MemFlags,
) {
debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
......@@ -427,7 +440,7 @@ pub fn codegen_consume(&mut self,
// for most places, to consume them we just load them
// out from their home
self.codegen_place(bx, place).load(bx)
bx.load_operand(self.codegen_place(bx, place))
}
pub fn codegen_operand(&mut self,
......@@ -461,11 +474,11 @@ pub fn codegen_operand(&mut self,
bx.call(fnname, &[], None);
// We've errored, so we don't have to produce working code.
let layout = bx.cx().layout_of(ty);
PlaceRef::new_sized(
bx.load_operand(PlaceRef::new_sized(
bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))),
layout,
layout.align,
).load(bx)
))
})
}
}
......
......@@ -8,12 +8,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, LLVMConstInBoundsGEP};
use llvm::LLVMConstInBoundsGEP;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use base;
use builder::{Builder, MemFlags};
use common::{CodegenCx, IntPredicate};
use type_of::LayoutLlvmExt;
......@@ -24,7 +23,7 @@
use interfaces::*;
use super::{FunctionCx, LocalRef};
use super::operand::{OperandRef, OperandValue};
use super::operand::OperandValue;
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx, V> {
......@@ -108,75 +107,14 @@ pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
}
}
pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> {
debug!("PlaceRef::load: {:?}", self);
assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
if self.layout.is_zst() {
return OperandRef::new_zst(bx.cx(), self.layout);
}
let scalar_load_metadata = |load, scalar: &layout::Scalar| {
let vr = scalar.valid_range.clone();
match scalar.value {
layout::Int(..) => {
let range = scalar.valid_range_exclusive(bx.cx());
if range.start != range.end {
bx.range_metadata(load, range);
}
}
layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
bx.nonnull_metadata(load);
}
_ => {}
}
};
let val = if let Some(llextra) = self.llextra {
OperandValue::Ref(self.llval, Some(llextra), self.align)
} else if self.layout.is_llvm_immediate() {
let mut const_llval = None;
unsafe {
if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
}
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = bx.load(self.llval, self.align);
if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
scalar_load_metadata(load, scalar);
}
load
});
OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
let load = |i, scalar: &layout::Scalar| {
let llptr = bx.struct_gep(self.llval, i as u64);
let load = bx.load(llptr, self.align);
scalar_load_metadata(load, scalar);
if scalar.is_bool() {
bx.trunc(load, bx.cx().type_i1())
} else {
load
}
};
OperandValue::Pair(load(0, a), load(1, b))
} else {
OperandValue::Ref(self.llval, None, self.align)
};
OperandRef { val, layout: self.layout }
}
}
impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
/// Access a field, at a point when the value's case is known.
pub fn project_field(
self,
bx: &Builder<'a, 'll, 'tcx>,
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self, bx: &Bx,
ix: usize,
) -> PlaceRef<'tcx, &'ll Value> {
) -> PlaceRef<'tcx, Bx::Value> {
let cx = bx.cx();
let field = self.layout.field(cx, ix);
let offset = self.layout.fields.offset(ix);
......@@ -195,7 +133,7 @@ pub fn project_field(
};
PlaceRef {
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))),
llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
llextra: if cx.type_has_metadata(field.ty) {
self.llextra
} else {
......@@ -268,7 +206,7 @@ pub fn project_field(
let byte_ptr = bx.gep(byte_ptr, &[offset]);
// Finally, cast back to the type expected
let ll_fty = field.llvm_type(cx);
let ll_fty = cx.backend_type(field);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
PlaceRef {
......@@ -278,6 +216,9 @@ pub fn project_field(
align: effective_field_align,
}
}
}
impl PlaceRef<'tcx, &'ll Value> {
/// Obtain the actual discriminant of a value.
pub fn codegen_get_discr(
......@@ -301,7 +242,7 @@ pub fn codegen_get_discr(
}
let discr = self.project_field(bx, 0);
let lldiscr = discr.load(bx).immediate();
let lldiscr = bx.load_operand(discr).immediate();
match self.layout.variants {
layout::Variants::Single { .. } => bug!(),
layout::Variants::Tagged { ref tag, .. } => {
......@@ -449,7 +390,7 @@ pub fn codegen_place(&mut self,
return place;
}
LocalRef::UnsizedPlace(place) => {
return place.load(bx).deref(&cx);
return bx.load_operand(place).deref(&cx);
}
LocalRef::Operand(..) => {
bug!("using operand local {:?} as place", place);
......
......@@ -394,15 +394,21 @@ fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
}
impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type {
ty.llvm_type(&self)
fn backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type {
layout.llvm_type(&self)
}
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type {
layout.immediate_llvm_type(self)
}
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool {
layout.is_llvm_immediate()
}
fn scalar_pair_element_backend_type<'a>(
&self,
ty: &TyLayout<'tcx>,
layout: TyLayout<'tcx>,
index: usize,
immediate: bool
) -> &'ll Type {
ty.scalar_pair_element_llvm_type(&self, index, immediate)
layout.scalar_pair_element_llvm_type(self, index, immediate)
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册