提交 6a993fe3 编写于 作者: D Denis Merigoux 提交者: Eduard-Mihai Burtescu

Generalized mir::codegen_mir (and all subsequent functions)

上级 cbe31a42
......@@ -16,11 +16,12 @@
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;
use rustc_target::abi::call::ArgType;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods};
use interfaces::*;
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
use rustc::ty::{self, Ty};
use rustc::ty::{self, Ty, Instance};
use rustc::ty::layout;
use libc::c_uint;
......@@ -280,6 +281,27 @@ fn store_fn_arg(
}
}
impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn store_fn_arg(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
) {
ty.store_fn_arg(self, idx, dst)
}
fn store_arg_ty(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>
) {
ty.store(self, val, dst)
}
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.memory_ty(self.cx())
}
}
pub trait FnTypeExt<'tcx> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
fn new(cx: &CodegenCx<'ll, 'tcx>,
......@@ -790,3 +812,29 @@ fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value
}
}
}
impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> {
FnType::new(&self, sig, extra_args)
}
fn new_vtable(
&self,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]
) -> FnType<'tcx, Ty<'tcx>> {
FnType::new_vtable(&self, sig, extra_args)
}
fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> {
FnType::of_instance(&self, instance)
}
}
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn apply_attrs_callsite(
&self,
ty: &FnType<'tcx, Ty<'tcx>>,
callsite: Self::Value
) {
ty.apply_attrs_callsite(self, callsite)
}
}
......@@ -15,7 +15,7 @@
use value::Value;
use rustc::hir;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods};
use interfaces::*;
use mir::place::PlaceRef;
use mir::operand::OperandValue;
......@@ -23,106 +23,110 @@
use std::ffi::CString;
use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM
pub fn codegen_inline_asm(
bx: &Builder<'a, 'll, 'tcx>,
ia: &hir::InlineAsm,
outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
mut inputs: Vec<&'ll Value>
) -> bool {
let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(bx.load_operand(place).immediate());
ext_constraints.push(i.to_string());
impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
// Take an inline assembly expression and splat it out via LLVM
fn codegen_inline_asm(
&self,
ia: &hir::InlineAsm,
outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
mut inputs: Vec<&'ll Value>
) -> bool {
let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(self.load_operand(place).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(self.load_operand(place).immediate());
} else {
output_types.push(place.layout.llvm_type(self.cx()));
}
}
if out.is_indirect {
indirect_outputs.push(bx.load_operand(place).immediate());
} else {
output_types.push(place.layout.llvm_type(bx.cx()));
if !indirect_outputs.is_empty() {
indirect_outputs.extend_from_slice(&inputs);
inputs = indirect_outputs;
}
}
if !indirect_outputs.is_empty() {
indirect_outputs.extend_from_slice(&inputs);
inputs = indirect_outputs;
}
let clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", &s));
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &bx.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
"mips" | "mips64" => vec!["~{$1}"],
_ => Vec::new()
};
let all_constraints =
ia.outputs.iter().map(|out| out.constraint.to_string())
.chain(ia.inputs.iter().map(|s| s.to_string()))
.chain(ext_constraints)
.chain(clobbers)
.chain(arch_clobbers.iter().map(|s| s.to_string()))
.collect::<Vec<String>>().join(",");
debug!("Asm Constraints: {}", &all_constraints);
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => bx.cx().type_void(),
1 => output_types[0],
_ => bx.cx().type_struct(&output_types, false)
};
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
let constraint_cstr = CString::new(all_constraints).unwrap();
let r = bx.inline_asm_call(
asm.as_ptr(),
constraint_cstr.as_ptr(),
&inputs,
output_type,
ia.volatile,
ia.alignstack,
ia.dialect
);
if r.is_none() {
return false;
}
let r = r.unwrap();
let clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", &s));
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &self.cx().sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
"mips" | "mips64" => vec!["~{$1}"],
_ => Vec::new()
};
let all_constraints =
ia.outputs.iter().map(|out| out.constraint.to_string())
.chain(ia.inputs.iter().map(|s| s.to_string()))
.chain(ext_constraints)
.chain(clobbers)
.chain(arch_clobbers.iter().map(|s| s.to_string()))
.collect::<Vec<String>>().join(",");
debug!("Asm Constraints: {}", &all_constraints);
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => self.cx().type_void(),
1 => output_types[0],
_ => self.cx().type_struct(&output_types, false)
};
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
let constraint_cstr = CString::new(all_constraints).unwrap();
let r = self.inline_asm_call(
asm.as_ptr(),
constraint_cstr.as_ptr(),
&inputs,
output_type,
ia.volatile,
ia.alignstack,
ia.dialect
);
if r.is_none() {
return false;
}
let r = r.unwrap();
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(bx, place);
}
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(self, place);
}
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
}
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1));
}
return true;
true
}
}
pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ga: &hir::GlobalAsm) {
let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
unsafe {
llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr());
impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn codegen_global_asm(&self, ga: &hir::GlobalAsm) {
let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
unsafe {
llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr());
}
}
}
......@@ -21,6 +21,7 @@
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::fx::FxHashMap;
use rustc_target::spec::PanicStrategy;
use interfaces::*;
use attributes;
use llvm::{self, Attribute};
......
......@@ -57,7 +57,6 @@
use common::{self, IntPredicate, RealPredicate, TypeKind};
use context::CodegenCx;
use debuginfo;
use declare;
use meth;
use mir;
use monomorphize::Instance;
......@@ -392,15 +391,18 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc
}
pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) {
pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) {
let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
bx.call(assume_intrinsic, &[val], None);
}
pub fn from_immediate<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx: &Builder,
val: Builder::Value
) -> Builder::Value {
pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) -> Bx::Value {
if bx.cx().val_ty(val) == bx.cx().type_i1() {
bx.zext(val, bx.cx().type_i8())
} else {
......@@ -447,7 +449,7 @@ pub fn memcpy_ty<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
pub fn codegen_instance(cx: &CodegenCx<'_, 'tcx>, instance: Instance<'tcx>) {
let _s = if cx.sess().codegen_stats() {
let mut instance_name = String::new();
DefPathBasedNames::new(cx.tcx, true, true)
......@@ -471,7 +473,7 @@ pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'
cx.stats.borrow_mut().n_closures += 1;
let mir = cx.tcx.instance_mir(instance.def);
mir::codegen_mir(cx, lldecl, &mir, instance, sig);
mir::codegen_mir::<Builder>(cx, lldecl, &mir, instance, sig);
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
......@@ -532,7 +534,7 @@ fn create_entry_fn(
&main_ret_ty.no_bound_vars().unwrap(),
);
if declare::get_defined_value(cx, "main").is_some() {
if cx.get_defined_value("main").is_some() {
// FIXME: We should be smart and show a better diagnostic here.
cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
.help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
......@@ -540,7 +542,7 @@ fn create_entry_fn(
cx.sess().abort_if_errors();
bug!();
}
let llfn = declare::declare_cfn(cx, "main", llfty);
let llfn = cx.declare_cfn("main", llfty);
// `main` should respect same config for frame pointer elimination as rest of code
attributes::set_frame_pointer_elimination(cx, llfn);
......
......@@ -18,7 +18,7 @@
use libc::{c_uint, c_char};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::session::{config, Session};
use rustc::session::config;
use rustc_data_structures::small_c_str::SmallCStr;
use interfaces::*;
use syntax;
......@@ -59,11 +59,13 @@ pub struct MemFlags: u8 {
}
}
impl BackendTypes for Builder<'_, 'll, '_> {
type Value = &'ll Value;
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Context = &'ll llvm::Context;
impl BackendTypes for Builder<'_, 'll, 'tcx> {
type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
type Context = <CodegenCx<'ll, 'tcx> as BackendTypes>::Context;
type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
}
impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
......@@ -126,10 +128,6 @@ fn build_sibling_block<'b>(&self, name: &'b str) -> Self {
Builder::new_block(self.cx, self.llfn(), name)
}
fn sess(&self) -> &Session {
self.cx.sess()
}
fn llfn(&self) -> &'ll Value {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
......@@ -223,7 +221,7 @@ fn invoke(&self,
args: &[&'ll Value],
then: &'ll BasicBlock,
catch: &'ll BasicBlock,
bundle: Option<&common::OperandBundleDef<&'ll Value>>) -> &'ll Value {
funclet: Option<&common::Funclet<&'ll Value>>) -> &'ll Value {
self.count_insn("invoke");
debug!("Invoke {:?} with args ({:?})",
......@@ -231,6 +229,7 @@ fn invoke(&self,
args);
let args = self.check_call("invoke", llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.map(OperandBundleDef::from_generic);
let bundle = bundle.as_ref().map(|b| &*b.raw);
......@@ -610,7 +609,7 @@ fn load_operand(
fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
if self.sess().target.target.arch == "amdgpu" {
if self.cx().sess().target.target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks a i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
// tripping an assertion. So, for now, just disable this
......@@ -920,7 +919,7 @@ fn memset(
align: Align,
flags: MemFlags,
) {
let ptr_width = &self.sess().target.target.target_pointer_width;
let ptr_width = &self.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p());
......@@ -1362,7 +1361,7 @@ fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size)
}
fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
bundle: Option<&common::OperandBundleDef<&'ll Value>>) -> &'ll Value {
funclet: Option<&common::Funclet<&'ll Value>>) -> &'ll Value {
self.count_insn("call");
debug!("Call {:?} with args ({:?})",
......@@ -1370,6 +1369,7 @@ fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
args);
let args = self.check_call("call", llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.map(OperandBundleDef::from_generic);
let bundle = bundle.as_ref().map(|b| &*b.raw);
......@@ -1399,7 +1399,17 @@ fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value {
}
}
fn cx(&self) -> &'a CodegenCx<'ll, 'tcx> {
fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
self.cx
}
fn delete_basic_block(&self, bb: &'ll BasicBlock) {
unsafe {
llvm::LLVMDeleteBasicBlock(bb);
}
}
fn do_not_inline(&self, llret: &'ll Value) {
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
}
}
......@@ -15,18 +15,15 @@
//! closure.
use attributes;
use common::{CodegenCx};
use consts;
use declare;
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
use context::CodegenCx;
use value::Value;
use interfaces::*;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::LayoutOf;
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use rustc::ty::subst::Substs;
/// Codegens a reference to a fn/method item, monomorphizing and
......@@ -40,7 +37,7 @@ pub fn get_fn(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll Value {
let tcx = cx.tcx;
let tcx = cx.tcx();
debug!("get_fn(instance={:?})", instance);
......@@ -48,8 +45,8 @@ pub fn get_fn(
assert!(!instance.substs.has_escaping_bound_vars());
assert!(!instance.substs.has_param_types());
let sig = instance.fn_sig(cx.tcx);
if let Some(&llfn) = cx.instances.borrow().get(&instance) {
let sig = instance.fn_sig(cx.tcx());
if let Some(&llfn) = cx.instances().borrow().get(&instance) {
return llfn;
}
......@@ -58,9 +55,9 @@ pub fn get_fn(
// Create a fn pointer with the substituted signature.
let fn_ptr_ty = tcx.mk_fn_ptr(sig);
let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx);
let llptrty = cx.backend_type(cx.layout_of(fn_ptr_ty));
let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) {
let llfn = if let Some(llfn) = cx.get_declared_value(&sym) {
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
......@@ -86,13 +83,13 @@ pub fn get_fn(
// other weird situations. Annoying.
if cx.val_ty(llfn) != llptrty {
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
consts::ptrcast(llfn, llptrty)
cx.static_ptrcast(llfn, llptrty)
} else {
debug!("get_fn: not casting pointer!");
llfn
}
} else {
let llfn = declare::declare_fn(cx, &sym, sig);
let llfn = cx.declare_fn(&sym, sig);
assert_eq!(cx.val_ty(llfn), llptrty);
debug!("get_fn: not casting pointer!");
......
......@@ -17,21 +17,23 @@
use rustc::middle::lang_items::LangItem;
use abi;
use base;
use builder::Builder;
use consts;
use declare;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use interfaces::{BackendTypes, BuilderMethods, ConstMethods, BaseTypeMethods};
use interfaces::*;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{HasDataLayout, LayoutOf};
use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size};
use rustc::mir::interpret::{Scalar, AllocType, Allocation};
use rustc::hir;
use mir::constant::const_alloc_to_llvm;
use mir::place::PlaceRef;
use libc::{c_uint, c_char};
use syntax::symbol::LocalInternedString;
use syntax::ast::Mutability;
use syntax_pos::{Span, DUMMY_SP};
pub use context::CodegenCx;
......@@ -48,13 +50,13 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo
ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP)
}
pub struct OperandBundleDef<'a, Value> {
pub struct OperandBundleDef<'a, V> {
pub name: &'a str,
pub val: Value
pub val: V
}
impl<'a, Value> OperandBundleDef<'a, Value> {
pub fn new(name: &'a str, val: Value) -> Self {
impl<'a, V> OperandBundleDef<'a, V> {
pub fn new(name: &'a str, val: V) -> Self {
OperandBundleDef {
name,
val
......@@ -190,24 +192,24 @@ pub enum TypeKind {
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef<'ll, &'ll Value>,
pub struct Funclet<'a, V> {
cleanuppad: V,
operand: OperandBundleDef<'a, V>,
}
impl Funclet<'ll> {
pub fn new(cleanuppad: &'ll Value) -> Self {
impl<'a, V: CodegenObject> Funclet<'a, V> {
pub fn new(cleanuppad: V) -> Self {
Funclet {
cleanuppad,
operand: OperandBundleDef::new("funclet", cleanuppad),
}
}
pub fn cleanuppad(&self) -> &'ll Value {
pub fn cleanuppad(&self) -> V {
self.cleanuppad
}
pub fn bundle(&self) -> &OperandBundleDef<'ll, &'ll Value> {
pub fn bundle(&self) -> &OperandBundleDef<'a, V> {
&self.operand
}
}
......@@ -217,6 +219,8 @@ impl BackendTypes for CodegenCx<'ll, 'tcx> {
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Context = &'ll llvm::Context;
type DIScope = &'ll llvm::debuginfo::DIScope;
}
impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
......@@ -300,7 +304,7 @@ fn const_cstr(
s.len() as c_uint,
!null_terminated as Bool);
let sym = self.generate_local_symbol_name("str");
let g = declare::define_global(&self, &sym[..], self.val_ty(sc)).unwrap_or_else(||{
let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
......@@ -415,6 +419,79 @@ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
}
}
}
fn scalar_to_backend(
&self,
cv: Scalar,
layout: &layout::Scalar,
llty: &'ll Type,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(self).bytes());
let llval = self.const_uint_big(self.type_ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
self.static_bitcast(llval, llty)
}
},
Scalar::Ptr(ptr) => {
let alloc_type = self.tcx.alloc_map.lock().get(ptr.alloc_id);
let base_addr = match alloc_type {
Some(AllocType::Memory(alloc)) => {
let init = const_alloc_to_llvm(self, alloc);
if alloc.mutability == Mutability::Mutable {
self.static_addr_of_mut(init, alloc.align, None)
} else {
self.static_addr_of(init, alloc.align, None)
}
}
Some(AllocType::Function(fn_instance)) => {
self.get_fn(fn_instance)
}
Some(AllocType::Static(def_id)) => {
assert!(self.tcx.is_static(def_id).is_some());
self.get_static(def_id)
}
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()),
&self.const_usize(ptr.offset.bytes()),
1,
) };
if layout.value != layout::Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
self.static_bitcast(llval, llty)
}
}
}
}
fn from_const_alloc(
&self,
layout: TyLayout<'tcx>,
alloc: &Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, layout.align, None);
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()),
&self.const_usize(offset.bytes()),
1,
)};
let llval = self.static_bitcast(llval, self.type_ptr_to(layout.llvm_type(self)));
PlaceRef::new_sized(llval, layout, alloc.align)
}
}
pub fn val_ty(v: &'ll Value) -> &'ll Type {
......@@ -466,20 +543,23 @@ pub fn langcall(tcx: TyCtxt,
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift(
bx: &Builder<'a, 'll, 'tcx>,
lhs: &'ll Value,
rhs: &'ll Value
) -> &'ll Value {
pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
bx.shl(lhs, rhs)
}
pub fn build_unchecked_rshift(
bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value
) -> &'ll Value {
pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs_t: Ty<'tcx>,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
......@@ -491,26 +571,29 @@ pub fn build_unchecked_rshift(
}
}
fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value {
fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
rhs: Bx::Value
) -> Bx::Value {
let rhs_llty = bx.cx().val_ty(rhs);
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
}
pub fn shift_mask_val(
bx: &Builder<'a, 'll, 'tcx>,
llty: &'ll Type,
mask_llty: &'ll Type,
pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
llty: Bx::Type,
mask_llty: Bx::Type,
invert: bool
) -> &'ll Value {
) -> Bx::Value {
let kind = bx.cx().type_kind(llty);
match kind {
TypeKind::Integer => {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = bx.cx().int_width(llty) - 1;
if invert {
bx.cx.const_int(mask_llty, !val as i64)
bx.cx().const_int(mask_llty, !val as i64)
} else {
bx.cx.const_uint(mask_llty, val)
bx.cx().const_uint(mask_llty, val)
}
},
TypeKind::Vector => {
......
......@@ -16,7 +16,6 @@
use base;
use monomorphize::MonoItem;
use common::CodegenCx;
use declare;
use monomorphize::Instance;
use syntax_pos::Span;
use syntax_pos::symbol::LocalInternedString;
......@@ -24,7 +23,7 @@
use type_of::LayoutLlvmExt;
use value::Value;
use rustc::ty::{self, Ty};
use interfaces::{BaseTypeMethods, DerivedTypeMethods, StaticMethods};
use interfaces::*;
use rustc::ty::layout::{Align, LayoutOf};
......@@ -79,7 +78,7 @@ fn check_and_apply_linkage(
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = declare::declare_global(cx, &sym, llty2);
let g1 = cx.declare_global(&sym, llty2);
llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
// Declare an internal global `extern_with_linkage_foo` which
......@@ -90,7 +89,7 @@ fn check_and_apply_linkage(
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{
if let Some(span) = span {
cx.sess().span_fatal(
span,
......@@ -107,7 +106,7 @@ fn check_and_apply_linkage(
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
declare::declare_global(cx, &sym, llty)
cx.declare_global(&sym, llty)
}
}
......@@ -139,14 +138,14 @@ fn static_addr_of_mut(
let gv = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
let gv = declare::define_global(&self, &name[..],
let gv = self.define_global(&name[..],
self.val_ty(cv)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
},
_ => declare::define_private_global(&self, self.val_ty(cv)),
_ => self.define_private_global(self.val_ty(cv)),
};
llvm::LLVMSetInitializer(gv, cv);
set_global_alignment(&self, gv, align);
......@@ -206,11 +205,11 @@ fn get_static(&self, def_id: DefId) -> &'ll Value {
Node::Item(&hir::Item {
ref attrs, span, node: hir::ItemKind::Static(..), ..
}) => {
if declare::get_declared_value(&self, &sym[..]).is_some() {
if self.get_declared_value(&sym[..]).is_some() {
span_bug!(span, "Conflicting symbol names for static?");
}
let g = declare::define_global(&self, &sym[..], llty).unwrap();
let g = self.define_global(&sym[..], llty).unwrap();
if !self.tcx.is_reachable_non_generic(def_id) {
unsafe {
......
......@@ -15,7 +15,6 @@
use debuginfo;
use callee;
use base;
use declare;
use monomorphize::Instance;
use value::Value;
......@@ -23,6 +22,7 @@
use type_::Type;
use type_of::PointeeInfo;
use interfaces::*;
use libc::c_uint;
use rustc_data_structures::base_n;
use rustc_data_structures::small_c_str::SmallCStr;
......@@ -315,21 +315,108 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
}
}
impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
pub fn sess<'a>(&'a self) -> &'a Session {
&self.tcx.sess
}
}
impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>,
ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>
{
&self.vtables
}
fn instances(&self) -> &RefCell<FxHashMap<Instance<'tcx>, &'ll Value>> {
&self.instances
}
fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
callee::get_fn(&&self,instance)
}
fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value {
llvm::get_param(llfn, index)
}
fn eh_personality(&self) -> &'ll Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if base::wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
self.declare_cfn(name, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
self.eh_personality.set(Some(llfn));
llfn
}
// Returns a Value of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
fn eh_unwind_resume(&self) -> &'ll Value {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
return llfn;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}
let sig = ty::Binder::bind(tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
tcx.types.never,
false,
hir::Unsafety::Unsafe,
Abi::C
));
let llfn = self.declare_fn("rust_eh_unwind_resume", sig);
attributes::unwind(llfn, true);
attributes::apply_target_cpu_attr(self, llfn);
unwresume.set(Some(llfn));
llfn
}
fn sess(&self) -> &Session {
&self.tcx.sess
}
fn check_overflow(&self) -> bool {
self.check_overflow
}
}
impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
......@@ -349,7 +436,7 @@ fn declare_intrinsic(
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(&self, $name, self.type_func(&[], $ret));
let f = self.declare_cfn($name, self.type_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
......@@ -357,7 +444,7 @@ fn declare_intrinsic(
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(&self, $name, self.type_variadic_func(&[], $ret));
let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
......@@ -365,7 +452,7 @@ fn declare_intrinsic(
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(&self, $name, self.type_func(&[$($arg),*], $ret));
let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
......@@ -668,83 +755,6 @@ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}
pub fn eh_personality(&self) -> &'b Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if base::wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
declare::declare_cfn(self, name, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
self.eh_personality.set(Some(llfn));
llfn
}
// Returns a Value of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
pub fn eh_unwind_resume(&self) -> &'b Value {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
return llfn;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}
let sig = ty::Binder::bind(tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
tcx.types.never,
false,
hir::Unsafety::Unsafe,
Abi::C
));
let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", sig);
attributes::unwind(llfn, true);
attributes::apply_target_cpu_attr(self, llfn);
unwresume.set(Some(llfn));
llfn
}
}
impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> {
......
......@@ -13,7 +13,7 @@
use super::utils::{DIB, span_start};
use llvm;
use llvm::debuginfo::DIScope;
use llvm::debuginfo::{DIScope, DISubprogram};
use common::CodegenCx;
use rustc::mir::{Mir, SourceScope};
......@@ -27,15 +27,15 @@
use syntax_pos::BytePos;
#[derive(Clone, Copy, Debug)]
pub struct MirDebugScope<'ll> {
pub scope_metadata: Option<&'ll DIScope>,
pub struct MirDebugScope<D> {
pub scope_metadata: Option<D>,
// Start and end offsets of the file to which this DIScope belongs.
// These are used to quickly determine whether some span refers to the same file.
pub file_start_pos: BytePos,
pub file_end_pos: BytePos,
}
impl MirDebugScope<'ll> {
impl<D> MirDebugScope<D> {
pub fn is_valid(&self) -> bool {
self.scope_metadata.is_some()
}
......@@ -46,8 +46,8 @@ pub fn is_valid(&self) -> bool {
pub fn create_mir_scopes(
cx: &CodegenCx<'ll, '_>,
mir: &Mir,
debug_context: &FunctionDebugContext<'ll>,
) -> IndexVec<SourceScope, MirDebugScope<'ll>> {
debug_context: &FunctionDebugContext<&'ll DISubprogram>,
) -> IndexVec<SourceScope, MirDebugScope<&'ll DIScope>> {
let null_scope = MirDebugScope {
scope_metadata: None,
file_start_pos: BytePos(0),
......@@ -82,9 +82,9 @@ pub fn create_mir_scopes(
fn make_mir_scope(cx: &CodegenCx<'ll, '_>,
mir: &Mir,
has_variables: &BitSet<SourceScope>,
debug_context: &FunctionDebugContextData<'ll>,
debug_context: &FunctionDebugContextData<&'ll DISubprogram>,
scope: SourceScope,
scopes: &mut IndexVec<SourceScope, MirDebugScope<'ll>>) {
scopes: &mut IndexVec<SourceScope, MirDebugScope<&'ll DIScope>>) {
if scopes[scope].is_valid() {
return;
}
......
......@@ -14,10 +14,9 @@
use common::CodegenCx;
use builder::Builder;
use declare;
use rustc::session::config::DebugInfo;
use value::Value;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods};
use interfaces::*;
use syntax::attr;
......@@ -58,7 +57,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
let llvm_type = cx.type_array(cx.type_i8(),
section_contents.len() as u64);
let section_var = declare::define_global(cx, section_var_name,
let section_var = cx.define_global(section_var_name,
llvm_type).unwrap_or_else(||{
bug!("symbol `{}` is already defined", section_var_name)
});
......
......@@ -1968,6 +1968,68 @@ pub fn create_global_var_metadata(
}
}
/// Creates debug information for the given vtable, which is for the
/// given type.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_vtable_metadata(
cx: &CodegenCx<'ll, 'tcx>,
ty: ty::Ty<'tcx>,
vtable: &'ll Value,
) {
if cx.dbg_cx.is_none() {
return;
}
let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP);
unsafe {
// LLVMRustDIBuilderCreateStructType() wants an empty array. A null
// pointer will lead to hard to trace and debug LLVM assertions
// later on in llvm/lib/IR/Value.cpp.
let empty_array = create_DIArray(DIB(cx), &[]);
let name = const_cstr!("vtable");
// Create a new one each time. We don't want metadata caching
// here, because each vtable will refer to a unique containing
// type.
let vtable_type = llvm::LLVMRustDIBuilderCreateStructType(
DIB(cx),
NO_SCOPE_METADATA,
name.as_ptr(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
Size::ZERO.bits(),
cx.tcx.data_layout.pointer_align.abi_bits() as u32,
DIFlags::FlagArtificial,
None,
empty_array,
0,
Some(type_metadata),
name.as_ptr()
);
llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx),
NO_SCOPE_METADATA,
name.as_ptr(),
// LLVM 3.9
// doesn't accept
// null here, so
// pass the name
// as the linkage
// name.
name.as_ptr(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
vtable_type,
true,
vtable,
None,
0);
}
}
// Creates an "extension" of an existing DIScope into another file.
pub fn extend_scope_to_file(
cx: &CodegenCx<'ll, '_>,
......@@ -1983,61 +2045,3 @@ pub fn extend_scope_to_file(
file_metadata)
}
}
impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
/// Creates debug information for the given vtable, which is for the
/// given type.
///
/// Adds the created metadata nodes directly to the crate's IR.
fn create_vtable_metadata(
&self,
ty: ty::Ty<'tcx>,
vtable: &'ll Value,
) {
if self.dbg_cx.is_none() {
return;
}
let type_metadata = type_metadata(&self, ty, syntax_pos::DUMMY_SP);
unsafe {
// LLVMRustDIBuilderCreateStructType() wants an empty array. A null
// pointer will lead to hard to trace and debug LLVM assertions
// later on in llvm/lib/IR/Value.cpp.
let empty_array = create_DIArray(DIB(&self), &[]);
let name = const_cstr!("vtable");
// Create a new one each time. We don't want metadata caching
// here, because each vtable will refer to a unique containing
// type.
let vtable_type = llvm::LLVMRustDIBuilderCreateStructType(
DIB(&self),
NO_SCOPE_METADATA,
name.as_ptr(),
unknown_file_metadata(&self),
UNKNOWN_LINE_NUMBER,
Size::ZERO.bits(),
self.tcx.data_layout.pointer_align.abi_bits() as u32,
DIFlags::FlagArtificial,
None,
empty_array,
0,
Some(type_metadata),
name.as_ptr()
);
llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(&self),
NO_SCOPE_METADATA,
name.as_ptr(),
ptr::null(),
unknown_file_metadata(&self),
UNKNOWN_LINE_NUMBER,
vtable_type,
true,
vtable,
None,
0);
}
}
}
......@@ -17,7 +17,7 @@
use llvm;
use llvm::debuginfo::DIScope;
use builder::Builder;
use interfaces::BuilderMethods;
use interfaces::*;
use libc::c_uint;
use syntax_pos::{Span, Pos};
......@@ -25,8 +25,8 @@
/// Sets the current debug location at the beginning of the span.
///
/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...).
pub fn set_source_location(
debug_context: &FunctionDebugContext<'ll>,
pub fn set_source_location<D>(
debug_context: &FunctionDebugContext<D>,
bx: &Builder<'_, 'll, '_>,
scope: Option<&'ll DIScope>,
span: Span,
......@@ -41,7 +41,7 @@ pub fn set_source_location(
};
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span));
let loc = span_start(bx.cx(), span);
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
} else {
......@@ -56,7 +56,7 @@ pub fn set_source_location(
/// they are disabled when beginning to codegen a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is codegened.
pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) {
pub fn start_emitting_source_locations<D>(dbg_context: &FunctionDebugContext<D>) {
if let FunctionDebugContext::RegularContext(ref data) = *dbg_context {
data.source_locations_enabled.set(true);
}
......
......@@ -14,6 +14,7 @@
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty};
use interfaces::*;
use rustc::hir;
......
......@@ -19,6 +19,7 @@
use llvm;
use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray};
use common::{CodegenCx};
use interfaces::*;
use syntax_pos::{self, Span};
......
......@@ -23,7 +23,7 @@
use llvm;
use llvm::AttributePlace::Function;
use rustc::ty::{self, PolyFnSig};
use rustc::ty::layout::LayoutOf;
use rustc::ty::layout::{self, LayoutOf};
use rustc::session::config::Sanitizer;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_target::spec::PanicStrategy;
......@@ -31,22 +31,9 @@
use attributes;
use context::CodegenCx;
use type_::Type;
use interfaces::*;
use value::Value;
/// Declare a global value.
///
/// If there’s a value with the same name already declared, the function will
/// return its Value instead.
pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value {
debug!("declare_global(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe {
llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty)
}
}
/// Declare a function.
///
/// If there’s a value with the same name already declared, the function will
......@@ -108,127 +95,148 @@ fn declare_raw_fn(
llfn
}
impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
/// Declare a global value.
///
/// If there’s a value with the same name already declared, the function will
/// return its Value instead.
fn declare_global(
&self,
name: &str, ty: &'ll Type
) -> &'ll Value {
debug!("declare_global(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe {
llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty)
}
}
/// Declare a C ABI function.
///
/// Only use this for foreign function ABIs and glue. For Rust functions use
/// `declare_fn` instead.
///
/// If there’s a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
pub fn declare_cfn(
cx: &CodegenCx<'ll, '_>,
name: &str,
fn_type: &'ll Type
) -> &'ll Value {
declare_raw_fn(cx, name, llvm::CCallConv, fn_type)
}
/// Declare a C ABI function.
///
/// Only use this for foreign function ABIs and glue. For Rust functions use
/// `declare_fn` instead.
///
/// If there’s a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
fn declare_cfn(
&self,
name: &str,
fn_type: &'ll Type
) -> &'ll Value {
declare_raw_fn(self, name, llvm::CCallConv, fn_type)
}
/// Declare a Rust function.
///
/// If there’s a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
pub fn declare_fn(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
sig: PolyFnSig<'tcx>,
) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig);
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
/// Declare a Rust function.
///
/// If there’s a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
fn declare_fn(
&self,
name: &str,
sig: PolyFnSig<'tcx>,
) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig);
let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let fty = FnType::new(self, sig, &[]);
let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self));
if self.layout_of(sig.output()).abi == layout::Abi::Uninhabited {
llvm::Attribute::NoReturn.apply_llfn(Function, llfn);
}
let fty = FnType::new(cx, sig, &[]);
let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx));
if sig.abi != Abi::Rust && sig.abi != Abi::RustCall {
attributes::unwind(llfn, false);
}
if cx.layout_of(sig.output()).abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(Function, llfn);
}
fty.apply_attrs_llfn(llfn);
if sig.abi != Abi::Rust && sig.abi != Abi::RustCall {
attributes::unwind(llfn, false);
llfn
}
fty.apply_attrs_llfn(llfn);
llfn
}
/// Declare a global with an intention to define it.
///
/// Use this function when you intend to define a global. This function will
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
if get_defined_value(cx, name).is_some() {
None
} else {
Some(declare_global(cx, name, ty))
/// Declare a global with an intention to define it.
///
/// Use this function when you intend to define a global. This function will
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
fn define_global(
&self,
name: &str,
ty: &'ll Type
) -> Option<&'ll Value> {
if self.get_defined_value(name).is_some() {
None
} else {
Some(self.declare_global(name, ty))
}
}
}
/// Declare a private global
///
/// Use this function when you intend to define a global without a name.
pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty)
/// Declare a private global
///
/// Use this function when you intend to define a global without a name.
fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty)
}
}
}
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_fn(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
if get_defined_value(cx, name).is_some() {
cx.sess().fatal(&format!("symbol `{}` already defined", name))
} else {
declare_fn(cx, name, fn_sig)
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
fn define_fn(
&self,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
if self.get_defined_value(name).is_some() {
self.sess().fatal(&format!("symbol `{}` already defined", name))
} else {
self.declare_fn(name, fn_sig)
}
}
}
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_internal_fn(
cx: &CodegenCx<'ll, 'tcx>,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
let llfn = define_fn(cx, name, fn_sig);
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
llfn
}
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
fn define_internal_fn(
&self,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
let llfn = self.define_fn(name, fn_sig);
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
llfn
}
/// Get declared value by name.
pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) }
}
/// Get declared value by name.
fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) }
}
/// Get defined or externally defined (AvailableExternally linkage) value by
/// name.
pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
get_declared_value(cx, name).and_then(|val|{
let declaration = unsafe {
llvm::LLVMIsDeclaration(val) != 0
};
if !declaration {
Some(val)
} else {
None
}
})
/// Get defined or externally defined (AvailableExternally linkage) value by
/// name.
fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
self.get_declared_value(name).and_then(|val|{
let declaration = unsafe {
llvm::LLVMIsDeclaration(val) != 0
};
if !declaration {
Some(val)
} else {
None
}
})
}
}
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::HasCodegen;
use abi::FnType;
use rustc::ty::{FnSig, Instance, Ty};
pub trait AbiMethods<'tcx> {
fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;
fn new_vtable(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;
fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>;
}
pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> {
fn apply_attrs_callsite(&self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
}
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::backend::Backend;
use super::HasCodegen;
use mir::place::PlaceRef;
use rustc::hir::{GlobalAsm, InlineAsm};
pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
fn codegen_inline_asm(
&self,
ia: &InlineAsm,
outputs: Vec<PlaceRef<'tcx, Self::Value>>,
inputs: Vec<Self::Value>,
) -> bool;
}
pub trait AsmMethods<'tcx>: Backend<'tcx> {
fn codegen_global_asm(&self, ga: &GlobalAsm);
}
......@@ -15,9 +15,11 @@
pub trait BackendTypes {
type Value: CodegenObject;
type BasicBlock;
type BasicBlock: Copy;
type Type: CodegenObject;
type Context;
type DIScope: Copy;
}
pub trait Backend<'tcx>:
......
......@@ -8,25 +8,35 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::abi::AbiBuilderMethods;
use super::asm::AsmBuilderMethods;
use super::debuginfo::DebugInfoBuilderMethods;
use super::intrinsic::IntrinsicCallMethods;
use super::type_::ArgTypeMethods;
use super::HasCodegen;
use builder::MemFlags;
use common::*;
use libc::c_char;
use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::session::Session;
use rustc::ty::layout::{Align, Size};
use std::borrow::Cow;
use std::ops::Range;
use syntax::ast::AsmDialect;
pub trait BuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> {
pub trait BuilderMethods<'a, 'tcx: 'a>:
HasCodegen<'tcx>
+ DebugInfoBuilderMethods<'tcx>
+ ArgTypeMethods<'tcx>
+ AbiBuilderMethods<'tcx>
+ IntrinsicCallMethods<'tcx>
+ AsmBuilderMethods<'tcx>
{
fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self;
fn with_cx(cx: &'a Self::CodegenCx) -> Self;
fn build_sibling_block<'b>(&self, name: &'b str) -> Self;
fn sess(&self) -> &Session;
fn cx(&self) -> &'a Self::CodegenCx; // FIXME(eddyb) remove 'a
fn cx(&self) -> &Self::CodegenCx;
fn llfn(&self) -> Self::Value;
fn llbb(&self) -> Self::BasicBlock;
fn count_insn(&self, category: &str);
......@@ -45,7 +55,7 @@ fn invoke(
args: &[Self::Value],
then: Self::BasicBlock,
catch: Self::BasicBlock,
bundle: Option<&OperandBundleDef<Self::Value>>,
funclet: Option<&Funclet<Self::Value>>,
) -> Self::Value;
fn unreachable(&self);
fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
......@@ -252,7 +262,10 @@ fn call(
&self,
llfn: Self::Value,
args: &[Self::Value],
bundle: Option<&OperandBundleDef<Self::Value>>,
funclet: Option<&Funclet<Self::Value>>,
) -> Self::Value;
fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn delete_basic_block(&self, bb: Self::BasicBlock);
fn do_not_inline(&self, llret: Self::Value);
}
......@@ -9,6 +9,10 @@
// except according to those terms.
use super::Backend;
use mir::place::PlaceRef;
use rustc::mir::interpret::Allocation;
use rustc::mir::interpret::Scalar;
use rustc::ty::layout;
use syntax::symbol::LocalInternedString;
pub trait ConstMethods<'tcx>: Backend<'tcx> {
......@@ -39,4 +43,17 @@ pub trait ConstMethods<'tcx>: Backend<'tcx> {
fn is_const_integral(&self, v: Self::Value) -> bool;
fn is_const_real(&self, v: Self::Value) -> bool;
fn scalar_to_backend(
&self,
cv: Scalar,
layout: &layout::Scalar,
llty: Self::Type,
) -> Self::Value;
fn from_const_alloc(
&self,
layout: layout::TyLayout<'tcx>,
alloc: &Allocation,
offset: layout::Size,
) -> PlaceRef<'tcx, Self::Value>;
}
......@@ -9,8 +9,53 @@
// except according to those terms.
use super::backend::Backend;
use rustc::ty::Ty;
use super::HasCodegen;
use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind};
use monomorphize::Instance;
use rustc::hir::def_id::CrateNum;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc_data_structures::indexed_vec::IndexVec;
use syntax::ast::Name;
use syntax_pos::{SourceFile, Span};
pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value);
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
llfn: Self::Value,
mir: &mir::Mir,
) -> FunctionDebugContext<Self::DIScope>;
fn create_mir_scopes(
&self,
mir: &mir::Mir,
debug_context: &FunctionDebugContext<Self::DIScope>,
) -> IndexVec<mir::SourceScope, MirDebugScope<Self::DIScope>>;
fn extend_scope_to_file(
&self,
scope_metadata: Self::DIScope,
file: &SourceFile,
defining_crate: CrateNum,
) -> Self::DIScope;
}
pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
fn declare_local(
&self,
dbg_context: &FunctionDebugContext<Self::DIScope>,
variable_name: Name,
variable_type: Ty<'tcx>,
scope_metadata: Self::DIScope,
variable_access: VariableAccess<'_, Self::Value>,
variable_kind: VariableKind,
span: Span,
);
fn set_source_location(
&self,
debug_context: &FunctionDebugContext<Self::DIScope>,
scope: Option<Self::DIScope>,
span: Span,
);
}
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::backend::Backend;
use rustc::ty;
pub trait DeclareMethods<'tcx>: Backend<'tcx> {
fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value;
fn declare_cfn(&self, name: &str, fn_type: Self::Type) -> Self::Value;
fn declare_fn(&self, name: &str, sig: ty::PolyFnSig<'tcx>) -> Self::Value;
fn define_global(&self, name: &str, ty: Self::Type) -> Option<Self::Value>;
fn define_private_global(&self, ty: Self::Type) -> Self::Value;
fn define_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value;
fn define_internal_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value;
fn get_declared_value(&self, name: &str) -> Option<Self::Value>;
fn get_defined_value(&self, name: &str) -> Option<Self::Value>;
}
......@@ -9,13 +9,13 @@
// except according to those terms.
use super::backend::Backend;
use super::builder::BuilderMethods;
use super::HasCodegen;
use abi::FnType;
use mir::operand::OperandRef;
use rustc::ty::Ty;
use syntax_pos::Span;
pub trait IntrinsicCallMethods<'a, 'tcx: 'a>: BuilderMethods<'a, 'tcx> {
pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
fn codegen_intrinsic_call(
&self,
callee_ty: Ty<'tcx>,
......
......@@ -9,6 +9,8 @@
// except according to those terms.
use super::backend::Backend;
use libc::c_uint;
use rustc::session::Session;
use rustc::ty::{self, Instance, Ty};
use rustc::util::nodemap::FxHashMap;
use std::cell::RefCell;
......@@ -17,5 +19,11 @@ pub trait MiscMethods<'tcx>: Backend<'tcx> {
fn vtables(
&self,
) -> &RefCell<FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), Self::Value>>;
fn check_overflow(&self) -> bool;
fn instances(&self) -> &RefCell<FxHashMap<Instance<'tcx>, Self::Value>>;
fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value;
fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value;
fn eh_personality(&self) -> Self::Value;
fn eh_unwind_resume(&self) -> Self::Value;
fn sess(&self) -> &Session;
}
......@@ -8,23 +8,31 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod abi;
mod asm;
mod backend;
mod builder;
mod consts;
mod debuginfo;
mod declare;
mod intrinsic;
mod misc;
mod statics;
mod type_;
pub use self::abi::{AbiBuilderMethods, AbiMethods};
pub use self::asm::{AsmBuilderMethods, AsmMethods};
pub use self::backend::{Backend, BackendTypes};
pub use self::builder::BuilderMethods;
pub use self::consts::ConstMethods;
pub use self::debuginfo::DebugInfoMethods;
pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
pub use self::declare::DeclareMethods;
pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods};
pub use self::misc::MiscMethods;
pub use self::statics::StaticMethods;
pub use self::type_::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods};
pub use self::type_::{
ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
};
use std::fmt;
......@@ -35,6 +43,10 @@ pub trait CodegenMethods<'tcx>:
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
{
}
......@@ -45,6 +57,10 @@ impl<'tcx, T> CodegenMethods<'tcx> for T where
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
{}
pub trait HasCodegen<'tcx>: Backend<'tcx> {
......@@ -54,6 +70,7 @@ pub trait HasCodegen<'tcx>: Backend<'tcx> {
BasicBlock = Self::BasicBlock,
Type = Self::Type,
Context = Self::Context,
DIScope = Self::DIScope,
>;
}
......
......@@ -9,11 +9,14 @@
// except according to those terms.
use super::backend::Backend;
use super::HasCodegen;
use common::TypeKind;
use mir::place::PlaceRef;
use rustc::ty::layout::TyLayout;
use rustc::ty::layout::{self, Align, Size};
use rustc::ty::Ty;
use rustc::util::nodemap::FxHashMap;
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
use std::cell::RefCell;
use syntax::ast;
......@@ -70,6 +73,10 @@ pub trait DerivedTypeMethods<'tcx>: Backend<'tcx> {
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
fn scalar_pair_element_backend_type<'a>(
......@@ -80,6 +87,22 @@ fn scalar_pair_element_backend_type<'a>(
) -> Self::Type;
}
pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> {
fn store_fn_arg(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
idx: &mut usize,
dst: PlaceRef<'tcx, Self::Value>,
);
fn store_arg_ty(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
val: Self::Value,
dst: PlaceRef<'tcx, Self::Value>,
);
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> Self::Type;
}
pub trait TypeMethods<'tcx>:
BaseTypeMethods<'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx>
{
......
......@@ -20,7 +20,6 @@
use base::*;
use common::*;
use context::CodegenCx;
use declare;
use glue;
use type_::Type;
use type_of::LayoutLlvmExt;
......@@ -87,7 +86,7 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu
Some(cx.get_intrinsic(&llvm_name))
}
impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_codegen_llvm/context.rs
......@@ -274,12 +273,12 @@ fn codegen_intrinsic_call(
},
"volatile_store" => {
let dst = args[0].deref(cx);
args[1].val.volatile_store(&self, dst);
args[1].val.volatile_store(self, dst);
return;
},
"unaligned_volatile_store" => {
let dst = args[0].deref(cx);
args[1].val.unaligned_volatile_store(&self, dst);
args[1].val.unaligned_volatile_store(self, dst);
return;
},
"prefetch_read_data" | "prefetch_write_data" |
......@@ -451,7 +450,7 @@ fn codegen_intrinsic_call(
},
"discriminant_value" => {
args[0].deref(cx).codegen_get_discr(&self, ret_ty)
args[0].deref(cx).codegen_get_discr(self, ret_ty)
}
name if name.starts_with("simd_") => {
......@@ -600,7 +599,7 @@ fn codegen_intrinsic_call(
"nontemporal_store" => {
let dst = args[0].deref(cx);
args[1].val.nontemporal_store(&self, dst);
args[1].val.nontemporal_store(self, dst);
return;
}
......@@ -716,9 +715,10 @@ fn modify_as_needed<'ll, 'tcx>(
let val = match intr.definition {
intrinsics::IntrinsicDef::Named(name) => {
let f = declare::declare_cfn(cx,
name,
cx.type_func(&inputs, outputs));
let f = cx.declare_cfn(
name,
cx.type_func(&inputs, outputs),
);
self.call(f, &llargs, None)
}
};
......@@ -745,7 +745,7 @@ fn modify_as_needed<'ll, 'tcx>(
let ptr = self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
self.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(&self, llval, result.layout)
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val.store(self, result);
}
}
......@@ -801,11 +801,11 @@ fn try_intrinsic(
local_ptr: &'ll Value,
dest: &'ll Value,
) {
if bx.sess().no_landing_pads() {
if bx.cx().sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
} else if wants_msvc_seh(bx.cx().sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
......@@ -1003,7 +1003,7 @@ fn gen_fn<'ll, 'tcx>(
hir::Unsafety::Unsafe,
Abi::Rust
));
let llfn = declare::define_internal_fn(cx, name, rust_fn_sig);
let llfn = cx.define_internal_fn(name, rust_fn_sig);
attributes::from_fn_attrs(cx, llfn, None);
let bx = Builder::new_block(cx, llfn, "entry-block");
codegen(bx);
......@@ -1058,7 +1058,7 @@ fn generic_simd_intrinsic(
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
bx.cx().sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
......@@ -1229,7 +1229,7 @@ fn simd_simple_float_intrinsic(
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
bx.cx().sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
......@@ -1447,7 +1447,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
let f = bx.cx().declare_cfn(&llvm_intrinsic,
bx.cx().type_func(&[
llvm_pointer_vec_ty,
alignment_ty,
......@@ -1549,7 +1549,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
let f = bx.cx().declare_cfn(&llvm_intrinsic,
bx.cx().type_func(&[llvm_elem_vec_ty,
llvm_pointer_vec_ty,
alignment_ty,
......
......@@ -8,16 +8,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, FnTypeExt};
use abi::FnType;
use callee;
use builder::Builder;
use monomorphize;
use value::Value;
use interfaces::*;
use rustc::ty::{self, Ty};
use rustc::ty::layout::HasTyCtxt;
#[derive(Copy, Clone, Debug)]
pub struct VirtualIndex(u64);
......@@ -31,15 +28,18 @@ pub fn from_index(index: usize) -> Self {
VirtualIndex(index as u64 + 3)
}
pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>,
llvtable: &'ll Value,
fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value {
pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
self,
bx: &Bx,
llvtable: Bx::Value,
fn_ty: &FnType<'tcx, Ty<'tcx>>
) -> Bx::Value {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(
llvtable,
bx.cx().type_ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
......
......@@ -18,13 +18,14 @@
use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext};
use rustc::mir::traversal;
use rustc::ty;
use rustc::ty::layout::LayoutOf;
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use type_of::LayoutLlvmExt;
use super::FunctionCx;
use value::Value;
use interfaces::*;
pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet<mir::Local> {
pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
fx: &FunctionCx<'a, 'tcx, Bx>
) -> BitSet<mir::Local> {
let mir = fx.mir;
let mut analyzer = LocalAnalyzer::new(fx);
......@@ -53,8 +54,8 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet<mir:
analyzer.non_ssa_locals
}
struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, V: 'll> {
fx: &'mir FunctionCx<'a, 'll, 'tcx, V>,
struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
fx: &'mir FunctionCx<'a, 'tcx, Bx>,
dominators: Dominators<mir::BasicBlock>,
non_ssa_locals: BitSet<mir::Local>,
// The location of the first visited direct assignment to each
......@@ -62,8 +63,8 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, V: 'll> {
first_assignment: IndexVec<mir::Local, Location>
}
impl LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> Self {
impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self {
let invalid_location =
mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location();
let mut analyzer = LocalAnalyzer {
......@@ -104,7 +105,8 @@ fn assign(&mut self, local: mir::Local, location: Location) {
}
}
impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
for LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
place: &mir::Place<'tcx>,
......@@ -143,7 +145,7 @@ fn visit_terminator_kind(&mut self,
_ => None,
};
if let Some((def_id, args)) = check {
if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() {
if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() {
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
......@@ -175,20 +177,20 @@ fn visit_place(&mut self,
_ => false
};
if is_consume {
let base_ty = proj.base.ty(self.fx.mir, cx.tcx);
let base_ty = proj.base.ty(self.fx.mir, cx.tcx());
let base_ty = self.fx.monomorphize(&base_ty);
// ZSTs don't require any actual memory access.
let elem_ty = base_ty
.projection_ty(cx.tcx, &proj.elem)
.to_ty(cx.tcx);
.projection_ty(cx.tcx(), &proj.elem)
.to_ty(cx.tcx());
let elem_ty = self.fx.monomorphize(&elem_ty);
if cx.layout_of(elem_ty).is_zst() {
return;
}
if let mir::ProjectionElem::Field(..) = proj.elem {
let layout = cx.layout_of(base_ty.to_ty(cx.tcx));
let layout = cx.layout_of(base_ty.to_ty(cx.tcx()));
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
......@@ -254,8 +256,8 @@ fn visit_local(&mut self,
}
PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx);
let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx));
let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx());
let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx()));
// Only need the place if we're actually dropping it.
if self.fx.cx.type_needs_drop(ty) {
......
......@@ -8,82 +8,21 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm;
use rustc::mir::interpret::{ErrorHandled, read_target_uint};
use rustc_mir::const_eval::const_field;
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType};
use rustc::mir::interpret::{GlobalId, Pointer, Allocation, ConstValue};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, HasTyCtxt};
use builder::Builder;
use common::{CodegenCx};
use type_of::LayoutLlvmExt;
use type_::Type;
use syntax::ast::Mutability;
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size};
use common::CodegenCx;
use syntax::source_map::Span;
use value::Value;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, StaticMethods};
use interfaces::*;
use super::super::callee;
use super::FunctionCx;
pub fn scalar_to_llvm(
cx: &CodegenCx<'ll, '_>,
cv: Scalar,
layout: &layout::Scalar,
llty: &'ll Type,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() };
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(cx).bytes());
cx.const_undef(cx.type_ix(0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(cx).bytes());
let llval = cx.const_uint_big(cx.type_ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
cx.static_bitcast(llval, llty)
}
},
Scalar::Ptr(ptr) => {
let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id);
let base_addr = match alloc_type {
Some(AllocType::Memory(alloc)) => {
let init = const_alloc_to_llvm(cx, alloc);
if alloc.mutability == Mutability::Mutable {
cx.static_addr_of_mut(init, alloc.align, None)
} else {
cx.static_addr_of(init, alloc.align, None)
}
}
Some(AllocType::Function(fn_instance)) => {
callee::get_fn(cx, fn_instance)
}
Some(AllocType::Static(def_id)) => {
assert!(cx.tcx.is_static(def_id).is_some());
cx.get_static(def_id)
}
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
cx.static_bitcast(base_addr, cx.type_i8p()),
&cx.const_usize(ptr.offset.bytes()),
1,
) };
if layout.value != layout::Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
cx.static_bitcast(llval, llty)
}
}
}
}
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
let dl = cx.data_layout();
......@@ -101,8 +40,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
dl.endian,
&alloc.bytes[offset..(offset + pointer_size)],
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(scalar_to_llvm(
cx,
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
......@@ -138,10 +76,10 @@ pub fn codegen_static_initializer(
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn fully_evaluate(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
constant: &'tcx ty::Const<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> {
match constant.val {
......@@ -161,7 +99,7 @@ fn fully_evaluate(
pub fn eval_mir_constant(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
constant: &mir::Constant<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> {
let c = self.monomorphize(&constant.literal);
......@@ -171,11 +109,11 @@ pub fn eval_mir_constant(
/// process constant containing SIMD shuffle indices
pub fn simd_shuffle_indices(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
span: Span,
ty: Ty<'tcx>,
constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>,
) -> (&'ll Value, Ty<'tcx>) {
) -> (Bx::Value, Ty<'tcx>) {
constant
.and_then(|c| {
let field_ty = c.ty.builtin_index().unwrap();
......@@ -198,9 +136,9 @@ pub fn simd_shuffle_indices(
layout::Abi::Scalar(ref x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
};
Ok(scalar_to_llvm(
bx.cx(), prim, scalar,
layout.immediate_llvm_type(bx.cx()),
Ok(bx.cx().scalar_to_backend(
prim, scalar,
bx.cx().immediate_backend_type(layout),
))
} else {
bug!("simd shuffle field {:?}", field)
......@@ -216,7 +154,7 @@ pub fn simd_shuffle_indices(
);
// We've errored, so we don't have to produce working code.
let ty = self.monomorphize(&ty);
let llty = bx.cx().layout_of(ty).llvm_type(bx.cx());
let llty = bx.cx().backend_type(bx.cx().layout_of(ty));
(bx.cx().const_undef(llty), ty)
})
}
......
......@@ -9,8 +9,7 @@
// except according to those terms.
use libc::c_uint;
use llvm::{self, BasicBlock};
use llvm::debuginfo::DIScope;
use llvm;
use llvm_util;
use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt};
......@@ -18,13 +17,11 @@
use rustc::ty::subst::Substs;
use rustc::session::config::DebugInfo;
use base;
use builder::Builder;
use common::{CodegenCx, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext};
use common::Funclet;
use monomorphize::Instance;
use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode};
use value::Value;
use interfaces::{BuilderMethods, ConstMethods, DerivedTypeMethods};
use abi::{FnType, PassMode};
use interfaces::*;
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords;
......@@ -43,16 +40,16 @@
use self::operand::{OperandRef, OperandValue};
/// Master context for codegenning from MIR.
pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>,
mir: &'a mir::Mir<'tcx>,
debug_context: FunctionDebugContext<'ll>,
debug_context: FunctionDebugContext<Bx::DIScope>,
llfn: V,
llfn: Bx::Value,
cx: &'a CodegenCx<'ll, 'tcx>,
cx: &'a Bx::CodegenCx,
fn_ty: FnType<'tcx, Ty<'tcx>>,
......@@ -63,25 +60,24 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
/// don't really care about it very much. Anyway, this value
/// contains an alloca into which the personality is stored and
/// then later loaded when generating the DIVERGE_BLOCK.
personality_slot: Option<PlaceRef<'tcx, V>>,
personality_slot: Option<PlaceRef<'tcx, Bx::Value,>>,
/// A `Block` for each MIR `BasicBlock`
blocks: IndexVec<mir::BasicBlock, &'ll BasicBlock>,
blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
/// The funclet status of each basic block
cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
/// When targeting MSVC, this stores the cleanup info for each funclet
/// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets'
/// head block in RPO.
funclets: &'a IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>,
/// BB. This is initialized as we compute the funclets' head block in RPO.
funclets: IndexVec<mir::BasicBlock, Option<Funclet<'static, Bx::Value>>>,
/// This stores the landing-pad block for a given BB, computed lazily on GNU
/// and eagerly on MSVC.
landing_pads: IndexVec<mir::BasicBlock, Option<&'ll BasicBlock>>,
landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
/// Cached unreachable block
unreachable_block: Option<&'ll BasicBlock>,
unreachable_block: Option<Bx::BasicBlock>,
/// The location where each MIR arg/var/tmp/ret is stored. This is
/// usually an `PlaceRef` representing an alloca, but not always:
......@@ -98,20 +94,20 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
///
/// Avoiding allocs can also be important for certain intrinsics,
/// notably `expect`.
locals: IndexVec<mir::Local, LocalRef<'tcx, V>>,
locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
/// Debug information for MIR scopes.
scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<Bx::DIScope>>,
/// If this function is being monomorphized, this contains the type substitutions used.
param_substs: &'tcx Substs<'tcx>,
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn monomorphize<T>(&self, value: &T) -> T
where T: TypeFoldable<'tcx>
{
self.cx.tcx.subst_and_normalize_erasing_regions(
self.cx.tcx().subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
value,
......@@ -120,14 +116,14 @@ pub fn monomorphize<T>(&self, value: &T) -> T
pub fn set_debug_loc(
&mut self,
bx: &Builder<'_, 'll, '_>,
bx: &Bx,
source_info: mir::SourceInfo
) {
let (scope, span) = self.debug_loc(source_info);
debuginfo::set_source_location(&self.debug_context, bx, scope, span);
bx.set_source_location(&self.debug_context, scope, span);
}
pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) {
pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
// Bail out if debug info emission is not enabled.
match self.debug_context {
FunctionDebugContext::DebugInfoDisabled |
......@@ -167,16 +163,17 @@ pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScop
// corresponding to span's containing source scope. If so, we need to create a DIScope
// "extension" into that file.
fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos)
-> Option<&'ll DIScope> {
-> Option<Bx::DIScope> {
let scope_metadata = self.scopes[scope_id].scope_metadata;
if pos < self.scopes[scope_id].file_start_pos ||
pos >= self.scopes[scope_id].file_end_pos {
let cm = self.cx.sess().source_map();
let sm = self.cx.sess().source_map();
let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate;
Some(debuginfo::extend_scope_to_file(self.cx,
scope_metadata.unwrap(),
&cm.lookup_char_pos(pos).file,
defining_crate))
Some(self.cx.extend_scope_to_file(
scope_metadata.unwrap(),
&sm.lookup_char_pos(pos).file,
defining_crate,
))
} else {
scope_metadata
}
......@@ -193,11 +190,11 @@ enum LocalRef<'tcx, V> {
Operand(Option<OperandRef<'tcx, V>>),
}
impl LocalRef<'tcx, &'ll Value> {
fn new_operand(
cx: &CodegenCx<'ll, 'tcx>,
impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
fn new_operand<Cx: CodegenMethods<'tcx, Value = V>>(
cx: &Cx,
layout: TyLayout<'tcx>,
) -> LocalRef<'tcx, &'ll Value> {
) -> LocalRef<'tcx, V> {
if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
......@@ -211,18 +208,18 @@ fn new_operand(
///////////////////////////////////////////////////////////////////////////
pub fn codegen_mir(
cx: &'a CodegenCx<'ll, 'tcx>,
llfn: &'ll Value,
pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
llfn: Bx::Value,
mir: &'a Mir<'tcx>,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
) {
let fn_ty = FnType::new(cx, sig, &[]);
let fn_ty = cx.new_fn_type(sig, &[]);
debug!("fn_ty: {:?}", fn_ty);
let debug_context =
debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir);
let bx = Builder::new_block(cx, llfn, "start");
cx.create_function_debug_context(instance, sig, llfn, mir);
let bx = Bx::new_block(cx, llfn, "start");
if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
bx.set_personality_fn(cx.eh_personality());
......@@ -232,7 +229,7 @@ pub fn codegen_mir(
// Allocate a `Block` for every basic block, except
// the start block, if nothing loops back to it.
let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
let block_bxs: IndexVec<mir::BasicBlock, &'ll BasicBlock> =
let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> =
mir.basic_blocks().indices().map(|bb| {
if bb == mir::START_BLOCK && !reentrant_start_block {
bx.llbb()
......@@ -242,7 +239,7 @@ pub fn codegen_mir(
}).collect();
// Compute debuginfo scopes from MIR scopes.
let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context);
let scopes = cx.create_mir_scopes(mir, &debug_context);
let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs);
let mut fx = FunctionCx {
......@@ -256,7 +253,7 @@ pub fn codegen_mir(
unreachable_block: None,
cleanup_kinds,
landing_pads,
funclets: &funclets,
funclets,
scopes,
locals: IndexVec::new(),
debug_context,
......@@ -272,7 +269,7 @@ pub fn codegen_mir(
fx.locals = {
let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals);
let mut allocate_local = |local| {
let allocate_local = |local| {
let decl = &mir.local_decls[local];
let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
assert!(!layout.ty.has_erasable_regions());
......@@ -280,7 +277,8 @@ pub fn codegen_mir(
if let Some(name) = decl.name {
// User variable
let debug_scope = fx.scopes[decl.visibility_scope];
let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full;
let dbg = debug_scope.is_valid() &&
bx.cx().sess().opts.debuginfo == DebugInfo::Full;
if !memory_locals.contains(local) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
......@@ -300,7 +298,7 @@ pub fn codegen_mir(
span: decl.source_info.span,
scope: decl.visibility_scope,
});
declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(),
bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(),
VariableAccess::DirectVariable { alloca: place.llval },
VariableKind::LocalVariable, span);
}
......@@ -310,7 +308,7 @@ pub fn codegen_mir(
// Temporary or return place
if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = llvm::get_param(llfn, 0);
let llretptr = fx.cx.get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
} else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
......@@ -363,24 +361,22 @@ pub fn codegen_mir(
// Unreachable block
if !visited.contains(bb.index()) {
debug!("codegen_mir: block {:?} was not visited", bb);
unsafe {
llvm::LLVMDeleteBasicBlock(fx.blocks[bb]);
}
bx.delete_basic_block(fx.blocks[bb]);
}
}
}
fn create_funclets(
fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
mir: &'a Mir<'tcx>,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
block_bxs: &IndexVec<mir::BasicBlock, &'ll BasicBlock>)
-> (IndexVec<mir::BasicBlock, Option<&'ll BasicBlock>>,
IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>)
block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>)
-> (IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
IndexVec<mir::BasicBlock, Option<Funclet<'static, Bx::Value>>>)
{
block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
match *cleanup_kind {
CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {}
_ => return (None, None)
}
......@@ -439,12 +435,15 @@ fn create_funclets(
/// Produce, for each argument, a `Value` pointing at the
/// argument's value. As arguments are places, these are always
/// indirect.
fn arg_local_refs(
bx: &Builder<'a, 'll, 'tcx>,
fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>,
scopes: &IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
fx: &FunctionCx<'a, 'tcx, Bx>,
scopes: &IndexVec<
mir::SourceScope,
debuginfo::MirDebugScope<Bx::DIScope>
>,
memory_locals: &BitSet<mir::Local>,
) -> Vec<LocalRef<'tcx, &'ll Value>> {
) -> Vec<LocalRef<'tcx, Bx::Value>> {
let mir = fx.mir;
let tcx = bx.tcx();
let mut idx = 0;
......@@ -452,7 +451,7 @@ fn arg_local_refs(
// Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full {
let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full {
arg_scope.scope_metadata
} else {
None
......@@ -486,7 +485,7 @@ fn arg_local_refs(
if arg.pad.is_some() {
llarg_idx += 1;
}
arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i));
bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i));
}
// Now that we have one alloca that contains the aggregate value,
......@@ -495,8 +494,7 @@ fn arg_local_refs(
let variable_access = VariableAccess::DirectVariable {
alloca: place.llval
};
declare_local(
bx,
bx.declare_local(
&fx.debug_context,
arg_decl.name.unwrap_or(keywords::Invalid.name()),
arg_ty, scope,
......@@ -525,18 +523,18 @@ fn arg_local_refs(
return local(OperandRef::new_zst(bx.cx(), arg.layout));
}
PassMode::Direct(_) => {
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(llarg, &name);
llarg_idx += 1;
return local(
OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
}
PassMode::Pair(..) => {
let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(a, &(name.clone() + ".0"));
llarg_idx += 1;
let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(b, &(name + ".1"));
llarg_idx += 1;
......@@ -553,16 +551,16 @@ fn arg_local_refs(
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up.
// FIXME: lifetimes
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(llarg, &name);
llarg_idx += 1;
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
} else if arg.is_unsized_indirect() {
// As the storage for the indirect argument lives during
// the whole function call, we just copy the fat pointer.
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
let indirect_operand = OperandValue::Pair(llarg, llextra);
......@@ -571,7 +569,7 @@ fn arg_local_refs(
tmp
} else {
let tmp = PlaceRef::alloca(bx, arg.layout, &name);
arg.store_fn_arg(bx, &mut llarg_idx, tmp);
bx.store_fn_arg(arg, &mut llarg_idx, tmp);
tmp
};
arg_scope.map(|scope| {
......@@ -585,8 +583,7 @@ fn arg_local_refs(
alloca: place.llval
};
declare_local(
bx,
bx.declare_local(
&fx.debug_context,
arg_decl.name.unwrap_or(keywords::Invalid.name()),
arg.layout.ty,
......@@ -658,8 +655,7 @@ fn arg_local_refs(
alloca: env_ptr,
address_operations: &ops
};
declare_local(
bx,
bx.declare_local(
&fx.debug_context,
decl.debug_name,
ty,
......@@ -680,7 +676,7 @@ fn arg_local_refs(
mod analyze;
mod block;
mod constant;
pub mod constant;
pub mod place;
pub mod operand;
mod rvalue;
......
......@@ -14,10 +14,7 @@
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use base;
use common::CodegenCx;
use builder::{Builder, MemFlags};
use value::Value;
use type_of::LayoutLlvmExt;
use builder::MemFlags;
use glue;
use interfaces::*;
......@@ -25,7 +22,6 @@
use std::fmt;
use super::{FunctionCx, LocalRef};
use super::constant::scalar_to_llvm;
use super::place::PlaceRef;
/// The representation of a Rust value. The enum variant is in fact
......@@ -61,13 +57,13 @@ pub struct OperandRef<'tcx, V> {
pub layout: TyLayout<'tcx>,
}
impl fmt::Debug for OperandRef<'tcx, &'ll Value> {
impl<V: CodegenObject> fmt::Debug for OperandRef<'tcx, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
}
}
impl<'tcx, V: CodegenObject> OperandRef<'tcx, V> {
impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
pub fn new_zst<Cx: CodegenMethods<'tcx, Value = V>>(
cx: &Cx,
layout: TyLayout<'tcx>
......@@ -78,12 +74,11 @@ pub fn new_zst<Cx: CodegenMethods<'tcx, Value = V>>(
layout
}
}
}
impl OperandRef<'tcx, &'ll Value> {
pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
val: &'tcx ty::Const<'tcx>)
-> Result<OperandRef<'tcx, &'ll Value>, ErrorHandled> {
pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
val: &'tcx ty::Const<'tcx>
) -> Result<Self, ErrorHandled> {
let layout = bx.cx().layout_of(val.ty);
if layout.is_zst() {
......@@ -97,11 +92,10 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
layout::Abi::Scalar(ref x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
};
let llval = scalar_to_llvm(
bx.cx(),
let llval = bx.cx().scalar_to_backend(
x,
scalar,
layout.immediate_llvm_type(bx.cx()),
bx.cx().immediate_backend_type(layout),
);
OperandValue::Immediate(llval)
},
......@@ -110,23 +104,20 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
layout::Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
};
let a_llval = scalar_to_llvm(
bx.cx(),
let a_llval = bx.cx().scalar_to_backend(
a,
a_scalar,
layout.scalar_pair_element_llvm_type(bx.cx(), 0, true),
bx.cx().scalar_pair_element_backend_type(layout, 0, true),
);
let b_layout = layout.scalar_pair_element_llvm_type(bx.cx(), 1, true);
let b_llval = scalar_to_llvm(
bx.cx(),
let b_llval = bx.cx().scalar_to_backend(
b,
b_scalar,
b_layout,
bx.cx().scalar_pair_element_backend_type(layout, 1, true),
);
OperandValue::Pair(a_llval, b_llval)
},
ConstValue::ByRef(_, alloc, offset) => {
return Ok(bx.load_operand(PlaceRef::from_const_alloc(bx, layout, alloc, offset)));
return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset)));
},
};
......@@ -138,14 +129,17 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
/// Asserts that this operand refers to a scalar and returns
/// a reference to its value.
pub fn immediate(self) -> &'ll Value {
pub fn immediate(self) -> V {
match self.val {
OperandValue::Immediate(s) => s,
_ => bug!("not immediate: {:?}", self)
}
}
pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> {
pub fn deref<Cx: CodegenMethods<'tcx, Value = V>>(
self,
cx: &Cx
) -> PlaceRef<'tcx, V> {
let projected_ty = self.layout.ty.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
let (llptr, llextra) = match self.val {
......@@ -164,9 +158,12 @@ pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> {
/// If this operand is a `Pair`, we return an aggregate with the two values.
/// For other cases, see `immediate`.
pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value {
pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx
) -> V {
if let OperandValue::Pair(a, b) = self.val {
let llty = self.layout.llvm_type(bx.cx());
let llty = bx.cx().backend_type(self.layout);
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
self, llty);
// Reconstruct the immediate aggregate.
......@@ -180,10 +177,11 @@ pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value
}
/// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>,
llval: &'ll Value,
layout: TyLayout<'tcx>)
-> OperandRef<'tcx, &'ll Value> {
pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
llval: V,
layout: TyLayout<'tcx>
) -> Self {
let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
llval, layout);
......@@ -198,11 +196,11 @@ pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>,
OperandRef { val, layout }
}
pub fn extract_field(
pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Builder<'a, 'll, 'tcx>,
i: usize,
) -> OperandRef<'tcx, &'ll Value> {
bx: &Bx,
i: usize
) -> Self {
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);
......@@ -244,11 +242,11 @@ pub fn extract_field(
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
match val {
OperandValue::Immediate(ref mut llval) => {
*llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx()));
*llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
}
OperandValue::Pair(ref mut a, ref mut b) => {
*a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx(), 0, true));
*b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx(), 1, true));
*a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
*b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
}
OperandValue::Ref(..) => bug!()
}
......@@ -264,46 +262,39 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx,
dest: PlaceRef<'tcx, Bx::Value>
dest: PlaceRef<'tcx, V>
) {
self.store_with_flags(bx, dest, MemFlags::empty());
}
}
impl OperandValue<&'ll Value> {
pub fn volatile_store(
pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>
bx: &Bx,
dest: PlaceRef<'tcx, V>
) {
self.store_with_flags(bx, dest, MemFlags::VOLATILE);
}
pub fn unaligned_volatile_store(
pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>,
bx: &Bx,
dest: PlaceRef<'tcx, V>,
) {
self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
}
}
impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> {
pub fn nontemporal_store(
pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>
bx: &Bx,
dest: PlaceRef<'tcx, V>
) {
self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
}
}
impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx,
dest: PlaceRef<'tcx, Bx::Value>,
dest: PlaceRef<'tcx, V>,
flags: MemFlags,
) {
debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
......@@ -333,13 +324,10 @@ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
}
}
}
}
impl OperandValue<&'ll Value> {
pub fn store_unsized(
pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
indirect_dest: PlaceRef<'tcx, &'ll Value>
bx: &Bx,
indirect_dest: PlaceRef<'tcx, V>
) {
debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
let flags = MemFlags::empty();
......@@ -370,12 +358,12 @@ pub fn store_unsized(
}
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
fn maybe_codegen_consume_direct(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> Option<OperandRef<'tcx, &'ll Value>>
{
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn maybe_codegen_consume_direct(
&mut self,
bx: &Bx,
place: &mir::Place<'tcx>
) -> Option<OperandRef<'tcx, Bx::Value>> {
debug!("maybe_codegen_consume_direct(place={:?})", place);
// watch out for locals that do not have an
......@@ -419,11 +407,11 @@ fn maybe_codegen_consume_direct(&mut self,
None
}
pub fn codegen_consume(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> OperandRef<'tcx, &'ll Value>
{
pub fn codegen_consume(
&mut self,
bx: &Bx,
place: &mir::Place<'tcx>
) -> OperandRef<'tcx, Bx::Value> {
debug!("codegen_consume(place={:?})", place);
let ty = self.monomorphized_place_ty(place);
......@@ -443,11 +431,11 @@ pub fn codegen_consume(&mut self,
bx.load_operand(self.codegen_place(bx, place))
}
pub fn codegen_operand(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx, &'ll Value>
{
pub fn codegen_operand(
&mut self,
bx: &Bx,
operand: &mir::Operand<'tcx>
) -> OperandRef<'tcx, Bx::Value> {
debug!("codegen_operand(operand={:?})", operand);
match *operand {
......@@ -475,7 +463,7 @@ pub fn codegen_operand(&mut self,
// We've errored, so we don't have to produce working code.
let layout = bx.cx().layout_of(ty);
bx.load_operand(PlaceRef::new_sized(
bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))),
bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
layout,
layout.align,
))
......
......@@ -8,17 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::LLVMConstInBoundsGEP;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use builder::{Builder, MemFlags};
use common::{CodegenCx, IntPredicate};
use builder::MemFlags;
use common::IntPredicate;
use type_of::LayoutLlvmExt;
use value::Value;
use glue;
use mir::constant::const_alloc_to_llvm;
use interfaces::*;
......@@ -40,12 +37,12 @@ pub struct PlaceRef<'tcx, V> {
pub align: Align,
}
impl PlaceRef<'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(
llval: &'ll Value,
llval: V,
layout: TyLayout<'tcx>,
align: Align,
) -> PlaceRef<'tcx, &'ll Value> {
) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
PlaceRef {
llval,
......@@ -55,46 +52,34 @@ pub fn new_sized(
}
}
pub fn from_const_alloc(
bx: &Builder<'a, 'll, 'tcx>,
pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
layout: TyLayout<'tcx>,
alloc: &mir::interpret::Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(bx.cx(), alloc);
let base_addr = bx.cx().static_addr_of(init, layout.align, None);
let llval = unsafe { LLVMConstInBoundsGEP(
bx.cx().static_bitcast(base_addr, bx.cx().type_i8p()),
&bx.cx().const_usize(offset.bytes()),
1,
)};
let llval = bx.cx().static_bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
PlaceRef::new_sized(llval, layout, alloc.align)
}
pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-> PlaceRef<'tcx, &'ll Value> {
name: &str
) -> Self {
debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
Self::new_sized(tmp, layout, layout.align)
}
/// Returns a place for an indirect reference to an unsized place.
pub fn alloca_unsized_indirect(
bx: &Builder<'a, 'll, 'tcx>,
pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
layout: TyLayout<'tcx>,
name: &str,
) -> PlaceRef<'tcx, &'ll Value> {
) -> Self {
debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
let ptr_layout = bx.cx().layout_of(ptr_ty);
Self::alloca(bx, ptr_layout, name)
}
pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
&self,
cx: &Cx
) -> V {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert_eq!(count, 0);
......@@ -114,7 +99,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self, bx: &Bx,
ix: usize,
) -> PlaceRef<'tcx, Bx::Value> {
) -> Self {
let cx = bx.cx();
let field = self.layout.field(cx, ix);
let offset = self.layout.fields.offset(ix);
......@@ -216,17 +201,14 @@ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
align: effective_field_align,
}
}
}
impl PlaceRef<'tcx, &'ll Value> {
/// Obtain the actual discriminant of a value.
pub fn codegen_get_discr(
pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
cast_to: Ty<'tcx>
) -> &'ll Value {
let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
) -> V {
let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
if self.layout.abi.is_uninhabited() {
return bx.cx().const_undef(cast_to);
}
......@@ -234,7 +216,7 @@ pub fn codegen_get_discr(
layout::Variants::Single { index } => {
let discr_val = self.layout.ty.ty_adt_def().map_or(
index.as_u32() as u128,
|def| def.discriminant_for_variant(bx.cx().tcx, index).val);
|def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
return bx.cx().const_uint_big(cast_to, discr_val);
}
layout::Variants::Tagged { .. } |
......@@ -262,7 +244,7 @@ pub fn codegen_get_discr(
niche_start,
..
} => {
let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
let niche_llty = bx.cx().immediate_backend_type(discr.layout);
if niche_variants.start() == niche_variants.end() {
// FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_start == 0 {
......@@ -290,7 +272,11 @@ pub fn codegen_get_discr(
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Bx,
variant_index: VariantIdx
) {
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
return;
}
......@@ -304,7 +290,7 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
.discriminant_for_variant(bx.tcx(), variant_index)
.val;
bx.store(
bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.llval,
ptr.align);
}
......@@ -315,8 +301,8 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
..
} => {
if variant_index != dataful_variant {
if bx.sess().target.target.arch == "arm" ||
bx.sess().target.target.arch == "aarch64" {
if bx.cx().sess().target.target.arch == "arm" ||
bx.cx().sess().target.target.arch == "aarch64" {
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0);
......@@ -326,7 +312,7 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
}
let niche = self.project_field(bx, 0);
let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = (niche_value as u128)
.wrapping_add(niche_start);
......@@ -343,8 +329,11 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
}
}
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-> PlaceRef<'tcx, &'ll Value> {
pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Bx,
llindex: V
) -> Self {
PlaceRef {
llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
llextra: None,
......@@ -353,36 +342,40 @@ pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
}
}
pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
-> PlaceRef<'tcx, &'ll Value> {
pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Bx,
variant_index: VariantIdx
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
// Cast to the appropriate variant struct type.
let variant_ty = downcast.layout.llvm_type(bx.cx());
let variant_ty = bx.cx().backend_type(downcast.layout);
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast
}
pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
bx.lifetime_start(self.llval, self.layout.size);
}
pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
bx.lifetime_end(self.llval, self.layout.size);
}
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_place(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> PlaceRef<'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_place(
&mut self,
bx: &Bx,
place: &mir::Place<'tcx>
) -> PlaceRef<'tcx, Bx::Value> {
debug!("codegen_place(place={:?})", place);
let cx = bx.cx();
let tcx = cx.tcx;
let tcx = cx.tcx();
if let mir::Place::Local(index) = *place {
match self.locals[index] {
......@@ -390,7 +383,7 @@ pub fn codegen_place(&mut self,
return place;
}
LocalRef::UnsizedPlace(place) => {
return bx.load_operand(place).deref(&cx);
return bx.load_operand(place).deref(cx);
}
LocalRef::Operand(..) => {
bug!("using operand local {:?} as place", place);
......@@ -410,7 +403,7 @@ pub fn codegen_place(&mut self,
match bx.tcx().const_eval(param_env.and(cid)) {
Ok(val) => match val.val {
mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
PlaceRef::from_const_alloc(bx, layout, alloc, offset)
bx.cx().from_const_alloc(layout, alloc, offset)
}
_ => bug!("promoteds should have an allocation: {:?}", val),
},
......@@ -422,7 +415,7 @@ pub fn codegen_place(&mut self,
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
bx.call(fnname, &[], None);
let llval = bx.cx().const_undef(
bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
bx.cx().type_ptr_to(bx.cx().backend_type(layout))
);
PlaceRef::new_sized(llval, layout, layout.align)
}
......@@ -471,8 +464,7 @@ pub fn codegen_place(&mut self,
let mut subslice = cg_base.project_index(bx,
bx.cx().const_usize(from as u64));
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
.projection_ty(tcx, &projection.elem)
.to_ty(bx.tcx());
.projection_ty(tcx, &projection.elem).to_ty(tcx);
subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
......@@ -483,7 +475,7 @@ pub fn codegen_place(&mut self,
// Cast the place pointer type to the new
// array or slice type (*[%_; new_len]).
subslice.llval = bx.pointercast(subslice.llval,
bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
subslice
}
......@@ -498,7 +490,7 @@ pub fn codegen_place(&mut self,
}
pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx;
let tcx = self.cx.tcx();
let place_ty = place.ty(self.mir, tcx);
self.monomorphize(&place_ty.to_ty(tcx))
}
......
......@@ -10,20 +10,18 @@
use rustc::mir;
use asm;
use builder::Builder;
use interfaces::BuilderMethods;
use super::FunctionCx;
use super::LocalRef;
use super::OperandValue;
use value::Value;
use interfaces::*;
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_statement(&mut self,
bx: Builder<'a, 'll, 'tcx>,
statement: &mir::Statement<'tcx>)
-> Builder<'a, 'll, 'tcx> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_statement(
&mut self,
bx: Bx,
statement: &mir::Statement<'tcx>
) -> Bx {
debug!("codegen_statement(statement={:?})", statement);
self.set_debug_loc(&bx, statement.source_info);
......@@ -91,16 +89,16 @@ pub fn codegen_statement(&mut self,
if let OperandValue::Immediate(_) = op.val {
acc.push(op.immediate());
} else {
span_err!(bx.sess(), span.to_owned(), E0669,
span_err!(bx.cx().sess(), span.to_owned(), E0669,
"invalid value for constraint in inline assembly");
}
acc
});
if input_vals.len() == inputs.len() {
let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals);
let res = bx.codegen_inline_asm(asm, outputs, input_vals);
if !res {
span_err!(bx.sess(), statement.source_info.span, E0668,
span_err!(bx.cx().sess(), statement.source_info.span, E0668,
"malformed inline assembly");
}
}
......
......@@ -14,11 +14,9 @@
//! item-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
use asm;
use attributes;
use base;
use context::CodegenCx;
use declare;
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
......@@ -29,7 +27,7 @@
use rustc::ty::TypeFoldable;
use rustc::ty::layout::LayoutOf;
use std::fmt;
use interfaces::StaticMethods;
use interfaces::*;
pub use rustc::mir::mono::MonoItem;
......@@ -59,7 +57,7 @@ fn define(&self, cx: &CodegenCx<'a, 'tcx>) {
MonoItem::GlobalAsm(node_id) => {
let item = cx.tcx.hir.expect_item(node_id);
if let hir::ItemKind::GlobalAsm(ref ga) = item.node {
asm::codegen_global_asm(cx, ga);
cx.codegen_global_asm(ga);
} else {
span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
}
......@@ -132,7 +130,7 @@ fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let ty = instance.ty(cx.tcx);
let llty = cx.layout_of(ty).llvm_type(cx);
let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| {
let g = cx.define_global(symbol_name, llty).unwrap_or_else(|| {
cx.sess().span_fatal(cx.tcx.def_span(def_id),
&format!("symbol `{}` is already defined", symbol_name))
});
......@@ -155,7 +153,7 @@ fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let mono_sig = instance.fn_sig(cx.tcx);
let attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
let lldecl = declare::declare_fn(cx, symbol_name, mono_sig);
let lldecl = cx.declare_fn(symbol_name, mono_sig);
unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
base::set_link_section(lldecl, &attrs);
if linkage == Linkage::LinkOnceODR ||
......
......@@ -24,9 +24,11 @@
use rustc::util::nodemap::FxHashMap;
use rustc::ty::{self, Ty};
use rustc::ty::layout::TyLayout;
use rustc_target::abi::call::{CastTarget, FnType, Reg};
use rustc_data_structures::small_c_str::SmallCStr;
use common::{self, TypeKind};
use type_of::LayoutLlvmExt;
use abi::{LlvmType, FnTypeExt};
use std::fmt;
use std::cell::RefCell;
......@@ -395,7 +397,7 @@ fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type {
layout.llvm_type(&self)
layout.llvm_type(self)
}
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type {
layout.immediate_llvm_type(self)
......@@ -411,4 +413,16 @@ fn scalar_pair_element_backend_type<'a>(
) -> &'ll Type {
layout.scalar_pair_element_llvm_type(self, index, immediate)
}
fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.ptr_to_llvm_type(self)
}
fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
ty.llvm_type(self)
}
}
......@@ -16,7 +16,7 @@
use rustc_target::abi::FloatTy;
use rustc_mir::monomorphize::item::DefPathBasedNames;
use type_::Type;
use interfaces::{BaseTypeMethods, DerivedTypeMethods};
use interfaces::*;
use std::fmt::Write;
......@@ -266,7 +266,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
ty::ParamEnv::reveal_all(),
&sig,
);
FnType::new(cx, sig, &[]).ptr_to_llvm_type(cx)
cx.fn_ptr_backend_type(&FnType::new(cx, sig, &[]))
}
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO)
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册