diff --git a/src/Cargo.lock b/src/Cargo.lock index a0bb92867ff4e65a4cb48dd43df49a2c9394c51d..8b7f3591a2505fc6ce463c3fe68c487fb851f8eb 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2122,9 +2122,20 @@ dependencies = [ "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_codegen_ssa 0.0.0", "rustc_llvm 0.0.0", ] +[[package]] +name = "rustc_codegen_ssa" +version = "0.0.0" +dependencies = [ + "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustc_codegen_utils" version = "0.0.0" @@ -2132,13 +2143,11 @@ dependencies = [ "flate2 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc_allocator 0.0.0", "rustc_data_structures 0.0.0", "rustc_incremental 0.0.0", "rustc_metadata 0.0.0", "rustc_mir 0.0.0", "rustc_target 0.0.0", - "serialize 0.0.0", "syntax 0.0.0", "syntax_pos 0.0.0", ] diff --git a/src/librustc_codegen_llvm/Cargo.toml b/src/librustc_codegen_llvm/Cargo.toml index b711502b14b7fb1af4222d0859fbf82a03e75d06..34017009c2838eae75707709b2e53e02dc2cc58e 100644 --- a/src/librustc_codegen_llvm/Cargo.toml +++ b/src/librustc_codegen_llvm/Cargo.toml @@ -13,6 +13,7 @@ test = false cc = "1.0.1" num_cpus = "1.0" rustc-demangle = "0.1.4" +rustc_codegen_ssa = { path = "../librustc_codegen_ssa" } rustc_llvm = { path = "../librustc_llvm" } memmap = "0.6" diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 03b0b04d4014694f5571b97745f9cffd48504ef3..76fc5a6eeec7f7f40d857f159a80a93e11bddec6 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -9,18 +9,20 @@ // except according to those terms. use llvm::{self, AttributePlace}; -use base; -use builder::{Builder, MemFlags}; -use common::C_usize; +use rustc_codegen_ssa::MemFlags; +use builder::Builder; use context::CodegenCx; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; +use rustc_target::abi::call::ArgType; + +use rustc_codegen_ssa::traits::*; use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi}; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, Instance}; use rustc::ty::layout; use libc::c_uint; @@ -110,16 +112,16 @@ pub trait LlvmType { impl LlvmType for Reg { fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { match self.kind { - RegKind::Integer => Type::ix(cx, self.size.bits()), + RegKind::Integer => cx.type_ix(self.size.bits()), RegKind::Float => { match self.size.bits() { - 32 => Type::f32(cx), - 64 => Type::f64(cx), + 32 => cx.type_f32(), + 64 => cx.type_f64(), _ => bug!("unsupported float: {:?}", self) } } RegKind::Vector => { - Type::vector(Type::i8(cx), self.size.bytes()) + cx.type_vector(cx.type_i8(), self.size.bytes()) } } } @@ -143,7 +145,7 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return Type::array(rest_ll_unit, rest_count); + return cx.type_array(rest_ll_unit, rest_count); } } @@ -158,17 +160,27 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { if rem_bytes != 0 { // Only integers can be really split further. assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(Type::ix(cx, rem_bytes * 8)); + args.push(cx.type_ix(rem_bytes * 8)); } - Type::struct_(cx, &args, false) + cx.type_struct(&args, false) } } pub trait ArgTypeExt<'ll, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>); - fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>); + fn store( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value>, + ); + fn store_fn_arg( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + idx: &mut usize, + dst: PlaceRef<'tcx, &'ll Value>, + ); } impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { @@ -182,11 +194,15 @@ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) { + fn store( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value>, + ) { if self.is_ignore() { return; } - let cx = bx.cx; if self.is_sized_indirect() { OperandValue::Ref(val, None, self.layout.align).store(bx, dst) } else if self.is_unsized_indirect() { @@ -196,7 +212,8 @@ fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to()); + let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx())); + let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); bx.store(val, cast_dst, self.layout.align); } else { // The actual return type is a struct, but the ABI @@ -214,22 +231,23 @@ fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let scratch_size = cast.size(cx); - let scratch_align = cast.align(cx); - let llscratch = bx.alloca(cast.llvm_type(cx), "abi_cast", scratch_align); + let scratch_size = cast.size(bx.cx()); + let scratch_align = cast.align(bx.cx()); + let llscratch = bx.alloca(cast.llvm_type(bx.cx()), "abi_cast", scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... bx.store(val, llscratch, scratch_align); // ...and then memcpy it to the intended destination. - base::call_memcpy(bx, - bx.pointercast(dst.llval, Type::i8p(cx)), - self.layout.align, - bx.pointercast(llscratch, Type::i8p(cx)), - scratch_align, - C_usize(cx, self.layout.size.bytes()), - MemFlags::empty()); + bx.memcpy( + dst.llval, + self.layout.align, + llscratch, + scratch_align, + bx.cx().const_usize(self.layout.size.bytes()), + MemFlags::empty() + ); bx.lifetime_end(llscratch, scratch_size); } @@ -238,7 +256,12 @@ fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, } } - fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) { + fn store_fn_arg( + &self, + bx: &mut Builder<'a, 'll, 'tcx>, + idx: &mut usize, + dst: PlaceRef<'tcx, &'ll Value>, + ) { let mut next = || { let val = llvm::get_param(bx.llfn(), *idx as c_uint); *idx += 1; @@ -259,6 +282,27 @@ fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceR } } +impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn store_fn_arg( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, dst: PlaceRef<'tcx, Self::Value> + ) { + ty.store_fn_arg(self, idx, dst) + } + fn store_arg_ty( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ) { + ty.store(self, val, dst) + } + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.memory_ty(self.cx()) + } +} + pub trait FnTypeExt<'tcx> { fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self; fn new(cx: &CodegenCx<'ll, 'tcx>, @@ -280,7 +324,7 @@ fn adjust_for_abi(&mut self, fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value); + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value); } impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { @@ -614,14 +658,14 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { ); let llreturn_ty = match self.ret.mode { - PassMode::Ignore => Type::void(cx), + PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(self.ret.memory_ty(cx).ptr_to()); - Type::void(cx) + llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() } }; @@ -647,15 +691,15 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(), + PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } if self.variadic { - Type::variadic_func(&llargument_tys, llreturn_ty) + cx.type_variadic_func(&llargument_tys, llreturn_ty) } else { - Type::func(&llargument_tys, llreturn_ty) + cx.type_func(&llargument_tys, llreturn_ty) } } @@ -717,7 +761,7 @@ fn apply_attrs_llfn(&self, llfn: &'ll Value) { } } - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { + fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { let mut i = 0; let mut apply = |attrs: &ArgAttributes| { attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); @@ -736,7 +780,7 @@ fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value // by the LLVM verifier. if let layout::Int(..) = scalar.value { if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx.cx); + let range = scalar.valid_range_exclusive(bx.cx()); if range.start != range.end { bx.range_metadata(callsite, range); } @@ -769,3 +813,29 @@ fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value } } } + +impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> { + FnType::new(&self, sig, extra_args) + } + fn new_vtable( + &self, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>] + ) -> FnType<'tcx, Ty<'tcx>> { + FnType::new_vtable(&self, sig, extra_args) + } + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> { + FnType::of_instance(&self, instance) + } +} + +impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn apply_attrs_callsite( + &mut self, + ty: &FnType<'tcx, Ty<'tcx>>, + callsite: Self::Value + ) { + ty.apply_attrs_callsite(self, callsite) + } +} diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index f1bb41bcebacf3e3f85608d9b92cff01725bee62..efbe7cad1383559c6c0bda3501c050cc60d1641e 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -9,126 +9,123 @@ // except according to those terms. use llvm; -use common::*; -use type_::Type; +use context::CodegenCx; use type_of::LayoutLlvmExt; use builder::Builder; use value::Value; use rustc::hir; +use rustc_codegen_ssa::traits::*; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use std::ffi::CString; -use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; -// Take an inline assembly expression and splat it out via LLVM -pub fn codegen_inline_asm( - bx: &Builder<'a, 'll, 'tcx>, - ia: &hir::InlineAsm, - outputs: Vec>, - mut inputs: Vec<&'ll Value> -) -> bool { - let mut ext_constraints = vec![]; - let mut output_types = vec![]; - - // Prepare the output operands - let mut indirect_outputs = vec![]; - for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { - if out.is_rw { - inputs.push(place.load(bx).immediate()); - ext_constraints.push(i.to_string()); + +impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn codegen_inline_asm( + &mut self, + ia: &hir::InlineAsm, + outputs: Vec>, + mut inputs: Vec<&'ll Value> + ) -> bool { + let mut ext_constraints = vec![]; + let mut output_types = vec![]; + + // Prepare the output operands + let mut indirect_outputs = vec![]; + for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() { + if out.is_rw { + inputs.push(self.load_operand(place).immediate()); + ext_constraints.push(i.to_string()); + } + if out.is_indirect { + indirect_outputs.push(self.load_operand(place).immediate()); + } else { + output_types.push(place.layout.llvm_type(self.cx())); + } } - if out.is_indirect { - indirect_outputs.push(place.load(bx).immediate()); - } else { - output_types.push(place.layout.llvm_type(bx.cx)); + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; } - } - if !indirect_outputs.is_empty() { - indirect_outputs.extend_from_slice(&inputs); - inputs = indirect_outputs; - } - let clobbers = ia.clobbers.iter() - .map(|s| format!("~{{{}}}", &s)); - - // Default per-arch clobbers - // Basically what clang does - let arch_clobbers = match &bx.sess().target.target.arch[..] { - "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], - "mips" | "mips64" => vec!["~{$1}"], - _ => Vec::new() - }; - - let all_constraints = - ia.outputs.iter().map(|out| out.constraint.to_string()) - .chain(ia.inputs.iter().map(|s| s.to_string())) - .chain(ext_constraints) - .chain(clobbers) - .chain(arch_clobbers.iter().map(|s| s.to_string())) - .collect::>().join(","); - - debug!("Asm Constraints: {}", &all_constraints); - - // Depending on how many outputs we have, the return type is different - let num_outputs = output_types.len(); - let output_type = match num_outputs { - 0 => Type::void(bx.cx), - 1 => output_types[0], - _ => Type::struct_(bx.cx, &output_types, false) - }; - - let dialect = match ia.dialect { - AsmDialect::Att => llvm::AsmDialect::Att, - AsmDialect::Intel => llvm::AsmDialect::Intel, - }; - - let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); - let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = bx.inline_asm_call( - asm.as_ptr(), - constraint_cstr.as_ptr(), - &inputs, - output_type, - ia.volatile, - ia.alignstack, - dialect - ); - if r.is_none() { - return false; - } - let r = r.unwrap(); + let clobbers = ia.clobbers.iter() + .map(|s| format!("~{{{}}}", &s)); + + // Default per-arch clobbers + // Basically what clang does + let arch_clobbers = match &self.cx().sess().target.target.arch[..] { + "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], + "mips" | "mips64" => vec!["~{$1}"], + _ => Vec::new() + }; + + let all_constraints = + ia.outputs.iter().map(|out| out.constraint.to_string()) + .chain(ia.inputs.iter().map(|s| s.to_string())) + .chain(ext_constraints) + .chain(clobbers) + .chain(arch_clobbers.iter().map(|s| s.to_string())) + .collect::>().join(","); + + debug!("Asm Constraints: {}", &all_constraints); + + // Depending on how many outputs we have, the return type is different + let num_outputs = output_types.len(); + let output_type = match num_outputs { + 0 => self.cx().type_void(), + 1 => output_types[0], + _ => self.cx().type_struct(&output_types, false) + }; + + let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); + let constraint_cstr = CString::new(all_constraints).unwrap(); + let r = self.inline_asm_call( + &asm, + &constraint_cstr, + &inputs, + output_type, + ia.volatile, + ia.alignstack, + ia.dialect + ); + if r.is_none() { + return false; + } + let r = r.unwrap(); - // Again, based on how many outputs we have - let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); - for (i, (_, &place)) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) }; - OperandValue::Immediate(v).store(bx, place); - } + // Again, based on how many outputs we have + let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); + for (i, (_, &place)) in outputs.enumerate() { + let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; + OperandValue::Immediate(v).store(self, place); + } - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - unsafe { - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx, - key.as_ptr() as *const c_char, key.len() as c_uint); + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + unsafe { + let key = "srcloc"; + let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx, + key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32); - llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1)); - } + llvm::LLVMSetMetadata(r, kind, + llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1)); + } - return true; + true + } } -pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ga: &hir::GlobalAsm) { - let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); - unsafe { - llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr()); +impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn codegen_global_asm(&self, ga: &hir::GlobalAsm) { + let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); + unsafe { + llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr()); + } } } diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index f45b3728bc1b074aae9d7018d9f978656eb13b1d..38ab1302cfa7cb5b525749a01fe30c9fedd38176 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -21,6 +21,7 @@ use rustc_data_structures::sync::Lrc; use rustc_data_structures::fx::FxHashMap; use rustc_target::spec::PanicStrategy; +use rustc_codegen_ssa::traits::*; use attributes; use llvm::{self, Attribute}; diff --git a/src/librustc_codegen_llvm/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs index 54245a36017ab9e5631040292287495957d49730..76c50711639a4a10db73931dbedb64409465390d 100644 --- a/src/librustc_codegen_llvm/back/archive.rs +++ b/src/librustc_codegen_llvm/back/archive.rs @@ -18,6 +18,7 @@ use std::str; use back::bytecode::RLIB_BYTECODE_EXTENSION; +use rustc_codegen_ssa::back::archive::find_library; use libc; use llvm::archive_ro::{ArchiveRO, Child}; use llvm::{self, ArchiveKind}; @@ -52,7 +53,6 @@ enum Addition { }, } - fn is_relevant_child(c: &Child) -> bool { match c.name() { Some(name) => !name.contains("SYMDEF"), @@ -107,7 +107,7 @@ fn src_archive(&mut self) -> Option<&ArchiveRO> { /// Adds all of the contents of a native library to this archive. This will /// search in the relevant locations for a library named `name`. pub fn add_native_library(&mut self, name: &str) { - let location = ::rustc_codegen_utils::find_library(name, &self.config.lib_search_paths, + let location = find_library(name, &self.config.lib_search_paths, self.config.sess); self.add_archive(&location, |_| false).unwrap_or_else(|e| { self.config.sess.fatal(&format!("failed to add native library {}: {}", diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 111637b6aa967800090441756dced4a984e29572..20f05d110877a242184bbaa6a358a327ee0b21e9 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -9,9 +9,12 @@ // except according to those terms. use back::wasm; -use cc::windows_registry; use super::archive::{ArchiveBuilder, ArchiveConfig}; use super::bytecode::RLIB_BYTECODE_EXTENSION; +use rustc_codegen_ssa::back::linker::Linker; +use rustc_codegen_ssa::back::link::{remove, ignored_for_lto, each_linked_rlib, linker_and_flavor, + get_linker}; +use rustc_codegen_ssa::back::command::Command; use super::rpath::RPathConfig; use super::rpath; use metadata::METADATA_FILENAME; @@ -20,17 +23,15 @@ use rustc::session::filesearch; use rustc::session::search_paths::PathKind; use rustc::session::Session; -use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; +use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; -use {CodegenResults, CrateInfo}; +use rustc_codegen_ssa::CodegenResults; use rustc::util::common::time; use rustc_fs_util::fix_windows_verbatim_for_gcc; use rustc::hir::def_id::CrateNum; use tempfile::{Builder as TempFileBuilder, TempDir}; use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor}; use rustc_data_structures::fx::FxHashSet; -use rustc_codegen_utils::linker::Linker; -use rustc_codegen_utils::command::Command; use context::get_reloc_model; use llvm; @@ -50,69 +51,6 @@ invalid_output_for_target, filename_for_metadata, out_filename, check_file_is_writeable}; -// The third parameter is for env vars, used on windows to set up the -// path for MSVC to find its DLLs, and gcc to find its bundled -// toolchain -pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { - let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); - - // If our linker looks like a batch script on Windows then to execute this - // we'll need to spawn `cmd` explicitly. This is primarily done to handle - // emscripten where the linker is `emcc.bat` and needs to be spawned as - // `cmd /c emcc.bat ...`. - // - // This worked historically but is needed manually since #42436 (regression - // was tagged as #42791) and some more info can be found on #44443 for - // emscripten itself. - let mut cmd = match linker.to_str() { - Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), - _ => match flavor { - LinkerFlavor::Lld(f) => Command::lld(linker, f), - LinkerFlavor::Msvc - if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => - { - Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) - }, - _ => Command::new(linker), - } - }; - - // The compiler's sysroot often has some bundled tools, so add it to the - // PATH for the child. - let mut new_path = sess.host_filesearch(PathKind::All) - .get_tools_search_paths(); - let mut msvc_changed_path = false; - if sess.target.target.options.is_like_msvc { - if let Some(ref tool) = msvc_tool { - cmd.args(tool.args()); - for &(ref k, ref v) in tool.env() { - if k == "PATH" { - new_path.extend(env::split_paths(v)); - msvc_changed_path = true; - } else { - cmd.env(k, v); - } - } - } - } - - if !msvc_changed_path { - if let Some(path) = env::var_os("PATH") { - new_path.extend(env::split_paths(&path)); - } - } - cmd.env("PATH", env::join_paths(new_path).unwrap()); - - (linker.to_path_buf(), cmd) -} - -pub fn remove(sess: &Session, path: &Path) { - if let Err(e) = fs::remove_file(path) { - sess.err(&format!("failed to remove {}: {}", - path.display(), - e)); - } -} /// Perform the linkage portion of the compilation phase. This will generate all /// of the requested outputs for this compilation session. @@ -214,60 +152,6 @@ fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { false } -pub(crate) fn each_linked_rlib(sess: &Session, - info: &CrateInfo, - f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { - let crates = info.used_crates_static.iter(); - let fmts = sess.dependency_formats.borrow(); - let fmts = fmts.get(&config::CrateType::Executable) - .or_else(|| fmts.get(&config::CrateType::Staticlib)) - .or_else(|| fmts.get(&config::CrateType::Cdylib)) - .or_else(|| fmts.get(&config::CrateType::ProcMacro)); - let fmts = match fmts { - Some(f) => f, - None => return Err("could not find formats for rlibs".to_string()) - }; - for &(cnum, ref path) in crates { - match fmts.get(cnum.as_usize() - 1) { - Some(&Linkage::NotLinked) | - Some(&Linkage::IncludedFromDylib) => continue, - Some(_) => {} - None => return Err("could not find formats for rlibs".to_string()) - } - let name = &info.crate_name[&cnum]; - let path = match *path { - LibSource::Some(ref p) => p, - LibSource::MetadataOnly => { - return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", - name)) - } - LibSource::None => { - return Err(format!("could not find rlib for: `{}`", name)) - } - }; - f(cnum, &path); - } - Ok(()) -} - -/// Returns a boolean indicating whether the specified crate should be ignored -/// during LTO. -/// -/// Crates ignored during LTO are not lumped together in the "massive object -/// file" that we create and are linked in their normal rlib states. See -/// comments below for what crates do not participate in LTO. -/// -/// It's unusual for a crate to not participate in LTO. Typically only -/// compiler-specific and unstable crates have a reason to not participate in -/// LTO. -pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { - // If our target enables builtin function lowering in LLVM then the - // crates providing these functions don't participate in LTO (e.g. - // no_builtins or compiler builtins crates). - !sess.target.target.options.no_builtins && - (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum)) -} - fn link_binary_output(sess: &Session, codegen_results: &CodegenResults, crate_type: config::CrateType, @@ -352,8 +236,11 @@ fn archive_config<'a>(sess: &'a Session, /// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a /// directory being searched for `extern crate` (observing an incomplete file). /// The returned path is the temporary file containing the complete metadata. -fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir) - -> PathBuf { +fn emit_metadata<'a>( + sess: &'a Session, + codegen_results: &CodegenResults, + tmpdir: &TempDir +) -> PathBuf { let out_filename = tmpdir.path().join(METADATA_FILENAME); let result = fs::write(&out_filename, &codegen_results.metadata.raw_data); @@ -575,69 +462,6 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { } } -pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { - fn infer_from( - sess: &Session, - linker: Option, - flavor: Option, - ) -> Option<(PathBuf, LinkerFlavor)> { - match (linker, flavor) { - (Some(linker), Some(flavor)) => Some((linker, flavor)), - // only the linker flavor is known; use the default linker for the selected flavor - (None, Some(flavor)) => Some((PathBuf::from(match flavor { - LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, - LinkerFlavor::Gcc => "cc", - LinkerFlavor::Ld => "ld", - LinkerFlavor::Msvc => "link.exe", - LinkerFlavor::Lld(_) => "lld", - }), flavor)), - (Some(linker), None) => { - let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { - sess.fatal("couldn't extract file stem from specified linker"); - }).to_owned(); - - let flavor = if stem == "emcc" { - LinkerFlavor::Em - } else if stem == "gcc" || stem.ends_with("-gcc") { - LinkerFlavor::Gcc - } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { - LinkerFlavor::Ld - } else if stem == "link" || stem == "lld-link" { - LinkerFlavor::Msvc - } else if stem == "lld" || stem == "rust-lld" { - LinkerFlavor::Lld(sess.target.target.options.lld_flavor) - } else { - // fall back to the value in the target spec - sess.target.target.linker_flavor - }; - - Some((linker, flavor)) - }, - (None, None) => None, - } - } - - // linker and linker flavor specified via command line have precedence over what the target - // specification specifies - if let Some(ret) = infer_from( - sess, - sess.opts.cg.linker.clone(), - sess.opts.debugging_opts.linker_flavor, - ) { - return ret; - } - - if let Some(ret) = infer_from( - sess, - sess.target.target.options.linker.clone().map(PathBuf::from), - Some(sess.target.target.linker_flavor), - ) { - return ret; - } - - bug!("Not enough information provided to determine how to invoke the linker"); -} - // Create a dynamic library or executable // // This will invoke the system linker/cc to create the resulting file. This diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 60b06c579cb402e66066338e52d55eb5b93579d2..b5ebd0409da38f68021e1ce442e2a1261a6fa662 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -9,13 +9,14 @@ // except according to those terms. use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; -use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; -use back::write::{self, DiagnosticHandlers, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::symbol_export; +use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinShared, ThinModule}; +use rustc_codegen_ssa::traits::*; +use back::write::{self, DiagnosticHandlers, with_llvm_pmb, save_temp_bitcode, get_llvm_opt_level}; use errors::{FatalError, Handler}; use llvm::archive_ro::ArchiveRO; -use llvm::{True, False}; -use llvm; -use memmap; +use llvm::{self, True, False}; use rustc::dep_graph::WorkProduct; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::LOCAL_CRATE; @@ -23,9 +24,9 @@ use rustc::session::config::{self, Lto}; use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; -use rustc_codegen_utils::symbol_export; use time_graph::Timeline; -use {ModuleCodegen, ModuleLlvm, ModuleKind}; +use {ModuleLlvm, LlvmCodegenBackend}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use libc; @@ -47,71 +48,16 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } } -pub(crate) enum LtoModuleCodegen { - Fat { - module: Option, - _serialized_bitcode: Vec, - }, - - Thin(ThinModule), -} - -impl LtoModuleCodegen { - pub fn name(&self) -> &str { - match *self { - LtoModuleCodegen::Fat { .. } => "everything", - LtoModuleCodegen::Thin(ref m) => m.name(), - } - } - - /// Optimize this module within the given codegen context. - /// - /// This function is unsafe as it'll return a `ModuleCodegen` still - /// points to LLVM data structures owned by this `LtoModuleCodegen`. - /// It's intended that the module returned is immediately code generated and - /// dropped, and then this LTO module is dropped. - pub(crate) unsafe fn optimize(&mut self, - cgcx: &CodegenContext, - timeline: &mut Timeline) - -> Result - { - match *self { - LtoModuleCodegen::Fat { ref mut module, .. } => { - let module = module.take().unwrap(); - { - let config = cgcx.config(module.kind); - let llmod = module.module_llvm.llmod(); - let tm = &*module.module_llvm.tm; - run_pass_manager(cgcx, tm, llmod, config, false); - timeline.record("fat-done"); - } - Ok(module) - } - LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline), - } - } - - /// A "gauge" of how costly it is to optimize this module, used to sort - /// biggest modules first. - pub fn cost(&self) -> u64 { - match *self { - // Only one module with fat LTO, so the cost doesn't matter. - LtoModuleCodegen::Fat { .. } => 0, - LtoModuleCodegen::Thin(ref m) => m.cost(), - } - } -} - /// Performs LTO, which in the case of full LTO means merging all modules into /// a single one and returning it for further optimizing. For ThinLTO, it will /// do the global analysis necessary and return two lists, one of the modules /// the need optimization and another for modules that can simply be copied over /// from the incr. comp. cache. -pub(crate) fn run(cgcx: &CodegenContext, - modules: Vec, - cached_modules: Vec<(SerializedModule, WorkProduct)>, +pub(crate) fn run(cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { let diag_handler = cgcx.create_diag_handler(); let export_threshold = match cgcx.lto { @@ -230,13 +176,13 @@ pub(crate) fn run(cgcx: &CodegenContext, } } -fn fat_lto(cgcx: &CodegenContext, +fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, - mut modules: Vec, - mut serialized_modules: Vec<(SerializedModule, CString)>, + mut modules: Vec>, + mut serialized_modules: Vec<(SerializedModule, CString)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result, FatalError> + -> Result>, FatalError> { info!("going for a fat lto"); @@ -303,7 +249,7 @@ fn fat_lto(cgcx: &CodegenContext, serialized_bitcode.push(bc_decoded); } drop(linker); - cgcx.save_temp_bitcode(&module, "lto.input"); + save_temp_bitcode(&cgcx, &module, "lto.input"); // Internalize everything that *isn't* in our whitelist to help strip out // more modules and such @@ -312,14 +258,14 @@ fn fat_lto(cgcx: &CodegenContext, llvm::LLVMRustRunRestrictionPass(llmod, ptr as *const *const libc::c_char, symbol_white_list.len() as libc::size_t); - cgcx.save_temp_bitcode(&module, "lto.after-restriction"); + save_temp_bitcode(&cgcx, &module, "lto.after-restriction"); } if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } - cgcx.save_temp_bitcode(&module, "lto.after-nounwind"); + save_temp_bitcode(&cgcx, &module, "lto.after-nounwind"); } timeline.record("passes"); } @@ -386,14 +332,14 @@ fn drop(&mut self) { /// calculating the *index* for ThinLTO. This index will then be shared amongst /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. -fn thin_lto(cgcx: &CodegenContext, +fn thin_lto(cgcx: &CodegenContext, diag_handler: &Handler, - modules: Vec, - serialized_modules: Vec<(SerializedModule, CString)>, - cached_modules: Vec<(SerializedModule, WorkProduct)>, + modules: Vec>, + serialized_modules: Vec<(SerializedModule, CString)>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { unsafe { info!("going for that thin, thin LTO"); @@ -556,9 +502,8 @@ fn thin_lto(cgcx: &CodegenContext, } } -fn run_pass_manager(cgcx: &CodegenContext, - tm: &llvm::TargetMachine, - llmod: &llvm::Module, +pub(crate) fn run_pass_manager(cgcx: &CodegenContext, + module: &ModuleCodegen, config: &ModuleConfig, thin: bool) { // Now we have one massive module inside of llmod. Time to run the @@ -569,7 +514,7 @@ fn run_pass_manager(cgcx: &CodegenContext, debug!("running the pass manager"); unsafe { let pm = llvm::LLVMCreatePassManager(); - llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod); + llvm::LLVMRustAddAnalysisPasses(module.module_llvm.tm, pm, module.module_llvm.llmod()); if config.verify_llvm_ir { let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); @@ -588,12 +533,13 @@ fn run_pass_manager(cgcx: &CodegenContext, // Note that in general this shouldn't matter too much as you typically // only turn on ThinLTO when you're compiling with optimizations // otherwise. - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let opt_level = match opt_level { llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less, level => level, }; - with_llvm_pmb(llmod, config, opt_level, false, &mut |b| { + with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| { if thin { llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm); } else { @@ -615,29 +561,14 @@ fn run_pass_manager(cgcx: &CodegenContext, llvm::LLVMRustAddPass(pm, pass.unwrap()); } - time_ext(cgcx.time_passes, None, "LTO passes", || llvm::LLVMRunPassManager(pm, llmod)); + time_ext(cgcx.time_passes, None, "LTO passes", || + llvm::LLVMRunPassManager(pm, module.module_llvm.llmod())); llvm::LLVMDisposePassManager(pm); } debug!("lto done"); } -pub enum SerializedModule { - Local(ModuleBuffer), - FromRlib(Vec), - FromUncompressedFile(memmap::Mmap), -} - -impl SerializedModule { - fn data(&self) -> &[u8] { - match *self { - SerializedModule::Local(ref m) => m.data(), - SerializedModule::FromRlib(ref m) => m, - SerializedModule::FromUncompressedFile(ref m) => m, - } - } -} - pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); unsafe impl Send for ModuleBuffer {} @@ -649,8 +580,10 @@ pub fn new(m: &llvm::Module) -> ModuleBuffer { llvm::LLVMRustModuleBufferCreate(m) }) } +} - pub fn data(&self) -> &[u8] { +impl ModuleBufferMethods for ModuleBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustModuleBufferPtr(self.0); let len = llvm::LLVMRustModuleBufferLen(self.0); @@ -665,19 +598,7 @@ fn drop(&mut self) { } } -pub struct ThinModule { - shared: Arc, - idx: usize, -} - -struct ThinShared { - data: ThinData, - thin_buffers: Vec, - serialized_modules: Vec, - module_names: Vec, -} - -struct ThinData(&'static mut llvm::ThinLTOData); +pub struct ThinData(&'static mut llvm::ThinLTOData); unsafe impl Send for ThinData {} unsafe impl Sync for ThinData {} @@ -702,8 +623,10 @@ pub fn new(m: &llvm::Module) -> ThinBuffer { ThinBuffer(buffer) } } +} - pub fn data(&self) -> &[u8] { +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; let len = llvm::LLVMRustThinLTOBufferLen(self.0); @@ -720,161 +643,142 @@ fn drop(&mut self) { } } -impl ThinModule { - fn name(&self) -> &str { - self.shared.module_names[self.idx].to_str().unwrap() - } - - fn cost(&self) -> u64 { - // Yes, that's correct, we're using the size of the bytecode as an - // indicator for how costly this codegen unit is. - self.data().len() as u64 - } - - fn data(&self) -> &[u8] { - let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); - a.unwrap_or_else(|| { - let len = self.shared.thin_buffers.len(); - self.shared.serialized_modules[self.idx - len].data() - }) - } - - unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) - -> Result - { - let diag_handler = cgcx.create_diag_handler(); - let tm = (cgcx.tm_factory)().map_err(|e| { - write::llvm_err(&diag_handler, &e) - })?; - - // Right now the implementation we've got only works over serialized - // modules, so we create a fresh new LLVM context and parse the module - // into that context. One day, however, we may do this for upstream - // crates but for locally codegened modules we may be able to reuse - // that LLVM Context and Module. - let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( +pub unsafe fn optimize_thin_module( + thin_module: &mut ThinModule, + cgcx: &CodegenContext, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + let tm = (cgcx.tm_factory.0)().map_err(|e| { + write::llvm_err(&diag_handler, &e) + })?; + + // Right now the implementation we've got only works over serialized + // modules, so we create a fresh new LLVM context and parse the module + // into that context. One day, however, we may do this for upstream + // crates but for locally codegened modules we may be able to reuse + // that LLVM Context and Module. + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + llcx, + thin_module.data().as_ptr(), + thin_module.data().len(), + thin_module.shared.module_names[thin_module.idx].as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for thin LTO module"; + write::llvm_err(&diag_handler, msg) + })? as *const _; + let module = ModuleCodegen { + module_llvm: ModuleLlvm { + llmod_raw, llcx, - self.data().as_ptr(), - self.data().len(), - self.shared.module_names[self.idx].as_ptr(), - ).ok_or_else(|| { - let msg = "failed to parse bitcode for thin LTO module"; - write::llvm_err(&diag_handler, msg) - })? as *const _; - let module = ModuleCodegen { - module_llvm: ModuleLlvm { - llmod_raw, - llcx, - tm, - }, - name: self.name().to_string(), - kind: ModuleKind::Regular, - }; - { - let llmod = module.module_llvm.llmod(); - cgcx.save_temp_bitcode(&module, "thin-lto-input"); - - // Before we do much else find the "main" `DICompileUnit` that we'll be - // using below. If we find more than one though then rustc has changed - // in a way we're not ready for, so generate an ICE by returning - // an error. - let mut cu1 = ptr::null_mut(); - let mut cu2 = ptr::null_mut(); - llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); - if !cu2.is_null() { - let msg = "multiple source DICompileUnits found"; - return Err(write::llvm_err(&diag_handler, msg)) - } + tm, + }, + name: thin_module.name().to_string(), + kind: ModuleKind::Regular, + }; + { + let llmod = module.module_llvm.llmod(); + save_temp_bitcode(&cgcx, &module, "thin-lto-input"); + + // Before we do much else find the "main" `DICompileUnit` that we'll be + // using below. If we find more than one though then rustc has changed + // in a way we're not ready for, so generate an ICE by returning + // an error. + let mut cu1 = ptr::null_mut(); + let mut cu2 = ptr::null_mut(); + llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); + if !cu2.is_null() { + let msg = "multiple source DICompileUnits found"; + return Err(write::llvm_err(&diag_handler, msg)) + } - // Like with "fat" LTO, get some better optimizations if landing pads - // are disabled by removing all landing pads. - if cgcx.no_landing_pads { - llvm::LLVMRustMarkAllFunctionsNounwind(llmod); - cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind"); - timeline.record("nounwind"); - } + // Like with "fat" LTO, get some better optimizations if landing pads + // are disabled by removing all landing pads. + if cgcx.no_landing_pads { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind"); + timeline.record("nounwind"); + } - // Up next comes the per-module local analyses that we do for Thin LTO. - // Each of these functions is basically copied from the LLVM - // implementation and then tailored to suit this implementation. Ideally - // each of these would be supported by upstream LLVM but that's perhaps - // a patch for another day! - // - // You can find some more comments about these functions in the LLVM - // bindings we've got (currently `PassWrapper.cpp`) - if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module"; - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-rename"); - timeline.record("rename"); - if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module"; - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve"); - timeline.record("resolve"); - if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module"; - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize"); - timeline.record("internalize"); - if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module"; - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-import"); - timeline.record("import"); - - // Ok now this is a bit unfortunate. This is also something you won't - // find upstream in LLVM's ThinLTO passes! This is a hack for now to - // work around bugs in LLVM. - // - // First discovered in #45511 it was found that as part of ThinLTO - // importing passes LLVM will import `DICompileUnit` metadata - // information across modules. This means that we'll be working with one - // LLVM module that has multiple `DICompileUnit` instances in it (a - // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of - // bugs in LLVM's backend which generates invalid DWARF in a situation - // like this: - // - // https://bugs.llvm.org/show_bug.cgi?id=35212 - // https://bugs.llvm.org/show_bug.cgi?id=35562 - // - // While the first bug there is fixed the second ended up causing #46346 - // which was basically a resurgence of #45511 after LLVM's bug 35212 was - // fixed. - // - // This function below is a huge hack around this problem. The function - // below is defined in `PassWrapper.cpp` and will basically "merge" - // all `DICompileUnit` instances in a module. Basically it'll take all - // the objects, rewrite all pointers of `DISubprogram` to point to the - // first `DICompileUnit`, and then delete all the other units. - // - // This is probably mangling to the debug info slightly (but hopefully - // not too much) but for now at least gets LLVM to emit valid DWARF (or - // so it appears). Hopefully we can remove this once upstream bugs are - // fixed in LLVM. - llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); - cgcx.save_temp_bitcode(&module, "thin-lto-after-patch"); - timeline.record("patch"); - - // Alright now that we've done everything related to the ThinLTO - // analysis it's time to run some optimizations! Here we use the same - // `run_pass_manager` as the "fat" LTO above except that we tell it to - // populate a thin-specific pass manager, which presumably LLVM treats a - // little differently. - info!("running thin lto passes over {}", module.name); - let config = cgcx.config(module.kind); - run_pass_manager(cgcx, module.module_llvm.tm, llmod, config, true); - cgcx.save_temp_bitcode(&module, "thin-lto-after-pm"); - timeline.record("thin-done"); + // Up next comes the per-module local analyses that we do for Thin LTO. + // Each of these functions is basically copied from the LLVM + // implementation and then tailored to suit this implementation. Ideally + // each of these would be supported by upstream LLVM but that's perhaps + // a patch for another day! + // + // You can find some more comments about these functions in the LLVM + // bindings we've got (currently `PassWrapper.cpp`) + if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) } + save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); + timeline.record("rename"); + if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); + timeline.record("resolve"); + if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); + timeline.record("internalize"); + if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module"; + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); + timeline.record("import"); - Ok(module) + // Ok now this is a bit unfortunate. This is also something you won't + // find upstream in LLVM's ThinLTO passes! This is a hack for now to + // work around bugs in LLVM. + // + // First discovered in #45511 it was found that as part of ThinLTO + // importing passes LLVM will import `DICompileUnit` metadata + // information across modules. This means that we'll be working with one + // LLVM module that has multiple `DICompileUnit` instances in it (a + // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of + // bugs in LLVM's backend which generates invalid DWARF in a situation + // like this: + // + // https://bugs.llvm.org/show_bug.cgi?id=35212 + // https://bugs.llvm.org/show_bug.cgi?id=35562 + // + // While the first bug there is fixed the second ended up causing #46346 + // which was basically a resurgence of #45511 after LLVM's bug 35212 was + // fixed. + // + // This function below is a huge hack around this problem. The function + // below is defined in `PassWrapper.cpp` and will basically "merge" + // all `DICompileUnit` instances in a module. Basically it'll take all + // the objects, rewrite all pointers of `DISubprogram` to point to the + // first `DICompileUnit`, and then delete all the other units. + // + // This is probably mangling to the debug info slightly (but hopefully + // not too much) but for now at least gets LLVM to emit valid DWARF (or + // so it appears). Hopefully we can remove this once upstream bugs are + // fixed in LLVM. + llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); + save_temp_bitcode(cgcx, &module, "thin-lto-after-patch"); + timeline.record("patch"); + + // Alright now that we've done everything related to the ThinLTO + // analysis it's time to run some optimizations! Here we use the same + // `run_pass_manager` as the "fat" LTO above except that we tell it to + // populate a thin-specific pass manager, which presumably LLVM treats a + // little differently. + info!("running thin lto passes over {}", module.name); + let config = cgcx.config(module.kind); + run_pass_manager(cgcx, &module, config, true); + save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); + timeline.record("thin-done"); } + Ok(module) } #[derive(Debug, Default)] diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 8973852caa86b351d594d5ae1e730198ade33fe6..7945d381760a1b082a711014135b2948a5585ff4 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -10,58 +10,35 @@ use attributes; use back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; -use back::lto::{self, ThinBuffer, SerializedModule}; -use back::link::{self, get_linker, remove}; +use back::lto::ThinBuffer; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler}; +use rustc_codegen_ssa::traits::*; use base; use consts; -use memmap; -use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, - in_incr_comp_dir, in_incr_comp_dir_sess}; -use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; -use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; -use rustc::middle::cstore::EncodedMetadata; -use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::config::{self, OutputType, Passes, Lto}; use rustc::session::Session; -use rustc::util::nodemap::FxHashMap; -use time_graph::{self, TimeGraph, Timeline}; +use time_graph::Timeline; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; -use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, - CachedModuleCodegen}; -use CrateInfo; -use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; -use rustc::ty::TyCtxt; -use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry}; +use ModuleLlvm; +use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; +use rustc::util::common::time_ext; use rustc_fs_util::{path2cstr, link_or_copy}; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::svh::Svh; -use rustc_codegen_utils::command::Command; -use rustc_codegen_utils::linker::LinkerInfo; -use rustc_codegen_utils::symbol_export::ExportedSymbols; -use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; -use errors::emitter::{Emitter}; -use syntax::attr; -use syntax::ext::hygiene::Mark; -use syntax_pos::MultiSpan; -use syntax_pos::symbol::Symbol; +use errors::{self, Handler, FatalError}; use type_::Type; use context::{is_pie_binary, get_reloc_model}; -use common::{C_bytes_in_context, val_ty}; -use jobserver::{Client, Acquired}; +use common; +use LlvmCodegenBackend; use rustc_demangle; -use std::any::Any; use std::ffi::{CString, CStr}; use std::fs; use std::io::{self, Write}; -use std::mem; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::str; use std::sync::Arc; -use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; -use std::time::Instant; -use std::thread; use libc::{c_uint, c_void, c_char, size_t}; pub const RELOC_MODEL_ARGS : [(&str, llvm::RelocMode); 7] = [ @@ -88,8 +65,6 @@ ("local-exec", llvm::ThreadLocalMode::LocalExec), ]; -const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; - pub fn llvm_err(handler: &errors::Handler, msg: &str) -> FatalError { match llvm::last_error() { Some(err) => handler.fatal(&format!("{}: {}", msg, err)), @@ -116,7 +91,7 @@ pub fn write_output_file( } } -fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { +pub(crate) fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { match optimize { config::OptLevel::No => llvm::CodeGenOptLevel::None, config::OptLevel::Less => llvm::CodeGenOptLevel::Less, @@ -126,7 +101,7 @@ fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { } } -fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { +pub(crate) fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { match optimize { config::OptLevel::Size => llvm::CodeGenOptSizeDefault, config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, @@ -224,212 +199,31 @@ pub fn target_machine_factory(sess: &Session, find_features: bool) }) } -/// Module-specific configuration for `optimize_and_codegen`. -pub struct ModuleConfig { - /// Names of additional optimization passes to run. - passes: Vec, - /// Some(level) to optimize at a certain level, or None to run - /// absolutely no optimizations (used for the metadata module). - pub opt_level: Option, - - /// Some(level) to optimize binary size, or None to not affect program size. - opt_size: Option, - - pgo_gen: Option, - pgo_use: String, - - // Flags indicating which outputs to produce. - pub emit_pre_thin_lto_bc: bool, - emit_no_opt_bc: bool, - emit_bc: bool, - emit_bc_compressed: bool, - emit_lto_bc: bool, - emit_ir: bool, - emit_asm: bool, - emit_obj: bool, - // Miscellaneous flags. These are mostly copied from command-line - // options. - pub verify_llvm_ir: bool, - no_prepopulate_passes: bool, - no_builtins: bool, - time_passes: bool, - vectorize_loop: bool, - vectorize_slp: bool, - merge_functions: bool, - inline_threshold: Option, - // Instead of creating an object file by doing LLVM codegen, just - // make the object file bitcode. Provides easy compatibility with - // emscripten's ecc compiler, when used as the linker. - obj_is_bitcode: bool, - no_integrated_as: bool, - embed_bitcode: bool, - embed_bitcode_marker: bool, -} - -impl ModuleConfig { - fn new(passes: Vec) -> ModuleConfig { - ModuleConfig { - passes, - opt_level: None, - opt_size: None, - - pgo_gen: None, - pgo_use: String::new(), - - emit_no_opt_bc: false, - emit_pre_thin_lto_bc: false, - emit_bc: false, - emit_bc_compressed: false, - emit_lto_bc: false, - emit_ir: false, - emit_asm: false, - emit_obj: false, - obj_is_bitcode: false, - embed_bitcode: false, - embed_bitcode_marker: false, - no_integrated_as: false, - - verify_llvm_ir: false, - no_prepopulate_passes: false, - no_builtins: false, - time_passes: false, - vectorize_loop: false, - vectorize_slp: false, - merge_functions: false, - inline_threshold: None - } - } - - fn set_flags(&mut self, sess: &Session, no_builtins: bool) { - self.verify_llvm_ir = sess.verify_llvm_ir(); - self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; - self.no_builtins = no_builtins || sess.target.target.options.no_builtins; - self.time_passes = sess.time_passes(); - self.inline_threshold = sess.opts.cg.inline_threshold; - self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || - sess.opts.debugging_opts.cross_lang_lto.enabled(); - let embed_bitcode = sess.target.target.options.embed_bitcode || - sess.opts.debugging_opts.embed_bitcode; - if embed_bitcode { - match sess.opts.optimize { - config::OptLevel::No | - config::OptLevel::Less => { - self.embed_bitcode_marker = embed_bitcode; - } - _ => self.embed_bitcode = embed_bitcode, - } - } - - // Copy what clang does by turning on loop vectorization at O2 and - // slp vectorization at O3. Otherwise configure other optimization aspects - // of this pass manager builder. - // Turn off vectorization for emscripten, as it's not very well supported. - self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && - (sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive) && - !sess.target.target.options.is_like_emscripten; - - self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && - sess.opts.optimize == config::OptLevel::Aggressive && - !sess.target.target.options.is_like_emscripten; - - self.merge_functions = sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive; - } - - pub fn bitcode_needed(&self) -> bool { - self.emit_bc || self.obj_is_bitcode - || self.emit_bc_compressed || self.embed_bitcode - } -} - -/// Assembler name and command used by codegen when no_integrated_as is enabled -struct AssemblerCommand { - name: PathBuf, - cmd: Command, -} - -/// Additional resources used by optimize_and_codegen (not module specific) -#[derive(Clone)] -pub struct CodegenContext { - // Resources needed when running LTO - pub time_passes: bool, - pub lto: Lto, - pub no_landing_pads: bool, - pub save_temps: bool, - pub fewer_names: bool, - pub exported_symbols: Option>, - pub opts: Arc, - pub crate_types: Vec, - pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, - output_filenames: Arc, - regular_module_config: Arc, - metadata_module_config: Arc, - allocator_module_config: Arc, - pub tm_factory: Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync>, - pub msvc_imps_needed: bool, - pub target_pointer_width: String, - debuginfo: config::DebugInfo, - - // Number of cgus excluding the allocator/metadata modules - pub total_cgus: usize, - // Handler to use for diagnostics produced during codegen. - pub diag_emitter: SharedEmitter, - // LLVM passes added by plugins. - pub plugin_passes: Vec, - // LLVM optimizations for which we want to print remarks. - pub remark: Passes, - // Worker thread number - pub worker: usize, - // The incremental compilation session directory, or None if we are not - // compiling incrementally - pub incr_comp_session_dir: Option, - // Used to update CGU re-use information during the thinlto phase. - pub cgu_reuse_tracker: CguReuseTracker, - // Channel back to the main control thread to send messages to - coordinator_send: Sender>, - // A reference to the TimeGraph so we can register timings. None means that - // measuring is disabled. - time_graph: Option, - // The assembler command if no_integrated_as option is enabled, None otherwise - assembler_cmd: Option>, -} - -impl CodegenContext { - pub fn create_diag_handler(&self) -> Handler { - Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) - } - - pub(crate) fn config(&self, kind: ModuleKind) -> &ModuleConfig { - match kind { - ModuleKind::Regular => &self.regular_module_config, - ModuleKind::Metadata => &self.metadata_module_config, - ModuleKind::Allocator => &self.allocator_module_config, - } +pub(crate) fn save_temp_bitcode( + cgcx: &CodegenContext, + module: &ModuleCodegen, + name: &str +) { + if !cgcx.save_temps { + return } - - pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { - if !self.save_temps { - return - } - unsafe { - let ext = format!("{}.bc", name); - let cgu = Some(&module.name[..]); - let path = self.output_filenames.temp_path_ext(&ext, cgu); - let cstr = path2cstr(&path); - let llmod = module.module_llvm.llmod(); - llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); - } + unsafe { + let ext = format!("{}.bc", name); + let cgu = Some(&module.name[..]); + let path = cgcx.output_filenames.temp_path_ext(&ext, cgu); + let cstr = path2cstr(&path); + let llmod = module.module_llvm.llmod(); + llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); } } pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a Handler), + data: *mut (&'a CodegenContext, &'a Handler), llcx: &'a llvm::Context, } impl<'a> DiagnosticHandlers<'a> { - pub fn new(cgcx: &'a CodegenContext, + pub fn new(cgcx: &'a CodegenContext, handler: &'a Handler, llcx: &'a llvm::Context) -> Self { let data = Box::into_raw(Box::new((cgcx, handler))); @@ -452,7 +246,7 @@ fn drop(&mut self) { } } -unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, msg: &'b str, cookie: c_uint) { cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_owned()); @@ -464,7 +258,7 @@ fn drop(&mut self) { if user.is_null() { return } - let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) .expect("non-UTF8 SMDiagnostic"); @@ -476,7 +270,7 @@ fn drop(&mut self) { if user.is_null() { return } - let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -513,9 +307,9 @@ fn drop(&mut self) { } // Unsafe due to LLVM calls. -unsafe fn optimize(cgcx: &CodegenContext, +pub(crate) unsafe fn optimize(cgcx: &CodegenContext, diag_handler: &Handler, - module: &ModuleCodegen, + module: &ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result<(), FatalError> @@ -573,7 +367,8 @@ unsafe fn optimize(cgcx: &CodegenContext, if !config.no_prepopulate_passes { llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod); llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod); - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || (cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled()); have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; @@ -645,37 +440,9 @@ unsafe fn optimize(cgcx: &CodegenContext, Ok(()) } -fn generate_lto_work(cgcx: &CodegenContext, - modules: Vec, - import_only_modules: Vec<(SerializedModule, WorkProduct)>) - -> Vec<(WorkItem, u64)> -{ - let mut timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(CODEGEN_WORKER_TIMELINE, - CODEGEN_WORK_PACKAGE_KIND, - "generate lto") - }).unwrap_or(Timeline::noop()); - let (lto_modules, copy_jobs) = lto::run(cgcx, modules, import_only_modules, &mut timeline) - .unwrap_or_else(|e| e.raise()); - - let lto_modules = lto_modules.into_iter().map(|module| { - let cost = module.cost(); - (WorkItem::LTO(module), cost) - }); - - let copy_jobs = copy_jobs.into_iter().map(|wp| { - (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { - name: wp.cgu_name.clone(), - source: wp, - }), 0) - }); - - lto_modules.chain(copy_jobs).collect() -} - -unsafe fn codegen(cgcx: &CodegenContext, +pub(crate) unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, - module: ModuleCodegen, + module: ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result @@ -880,14 +647,14 @@ extern "C" fn demangle_callback(input_ptr: *const c_char, /// /// Basically all of this is us attempting to follow in the footsteps of clang /// on iOS. See #35968 for lots more info. -unsafe fn embed_bitcode(cgcx: &CodegenContext, +unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.module\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -904,10 +671,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = C_bytes_in_context(llcx, &[]); + let llconst = common::bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.cmdline\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -920,1279 +687,6 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); } -pub(crate) struct CompiledModules { - pub modules: Vec, - pub metadata_module: CompiledModule, - pub allocator_module: Option, -} - -fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { - sess.crate_types.borrow().contains(&config::CrateType::Rlib) && - sess.opts.output_types.contains_key(&OutputType::Exe) -} - -fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { - if sess.opts.incremental.is_none() { - return false - } - - match sess.lto() { - Lto::Fat | - Lto::No => false, - Lto::Thin | - Lto::ThinLocal => true, - } -} - -pub fn start_async_codegen(tcx: TyCtxt, - time_graph: Option, - metadata: EncodedMetadata, - coordinator_receive: Receiver>, - total_cgus: usize) - -> OngoingCodegen { - let sess = tcx.sess; - let crate_name = tcx.crate_name(LOCAL_CRATE); - let crate_hash = tcx.crate_hash(LOCAL_CRATE); - let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); - let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); - } - subsystem.to_string() - }); - - let linker_info = LinkerInfo::new(tcx); - let crate_info = CrateInfo::new(tcx); - - // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(vec![]); - let mut allocator_config = ModuleConfig::new(vec![]); - - if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { - match *sanitizer { - Sanitizer::Address => { - modules_config.passes.push("asan".to_owned()); - modules_config.passes.push("asan-module".to_owned()); - } - Sanitizer::Memory => { - modules_config.passes.push("msan".to_owned()) - } - Sanitizer::Thread => { - modules_config.passes.push("tsan".to_owned()) - } - _ => {} - } - } - - if sess.opts.debugging_opts.profile { - modules_config.passes.push("insert-gcov-profiling".to_owned()) - } - - modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); - modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); - - modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize)); - modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize)); - - // Save all versions of the bytecode if we're saving our temporaries. - if sess.opts.cg.save_temps { - modules_config.emit_no_opt_bc = true; - modules_config.emit_pre_thin_lto_bc = true; - modules_config.emit_bc = true; - modules_config.emit_lto_bc = true; - metadata_config.emit_bc = true; - allocator_config.emit_bc = true; - } - - // Emit compressed bitcode files for the crate if we're emitting an rlib. - // Whenever an rlib is created, the bitcode is inserted into the archive in - // order to allow LTO against it. - if need_crate_bitcode_for_rlib(sess) { - modules_config.emit_bc_compressed = true; - allocator_config.emit_bc_compressed = true; - } - - modules_config.emit_pre_thin_lto_bc = - need_pre_thin_lto_bitcode_for_incr_comp(sess); - - modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || - tcx.sess.target.target.options.no_integrated_as; - - for output_type in sess.opts.output_types.keys() { - match *output_type { - OutputType::Bitcode => { modules_config.emit_bc = true; } - OutputType::LlvmAssembly => { modules_config.emit_ir = true; } - OutputType::Assembly => { - modules_config.emit_asm = true; - // If we're not using the LLVM assembler, this function - // could be invoked specially with output_type_assembly, so - // in this case we still want the metadata object file. - if !sess.opts.output_types.contains_key(&OutputType::Assembly) { - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - } - } - OutputType::Object => { modules_config.emit_obj = true; } - OutputType::Metadata => { metadata_config.emit_obj = true; } - OutputType::Exe => { - modules_config.emit_obj = true; - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - }, - OutputType::Mir => {} - OutputType::DepInfo => {} - } - } - - modules_config.set_flags(sess, no_builtins); - metadata_config.set_flags(sess, no_builtins); - allocator_config.set_flags(sess, no_builtins); - - // Exclude metadata and allocator modules from time_passes output, since - // they throw off the "LLVM passes" measurement. - metadata_config.time_passes = false; - allocator_config.time_passes = false; - - let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let (codegen_worker_send, codegen_worker_receive) = channel(); - - let coordinator_thread = start_executing_work(tcx, - &crate_info, - shared_emitter, - codegen_worker_send, - coordinator_receive, - total_cgus, - sess.jobserver.clone(), - time_graph.clone(), - Arc::new(modules_config), - Arc::new(metadata_config), - Arc::new(allocator_config)); - - OngoingCodegen { - crate_name, - crate_hash, - metadata, - windows_subsystem, - linker_info, - crate_info, - - time_graph, - coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), - codegen_worker_receive, - shared_emitter_main, - future: coordinator_thread, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - } -} - -fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( - sess: &Session, - compiled_modules: &CompiledModules, -) -> FxHashMap { - let mut work_products = FxHashMap::default(); - - if sess.opts.incremental.is_none() { - return work_products; - } - - for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { - let mut files = vec![]; - - if let Some(ref path) = module.object { - files.push((WorkProductFileKind::Object, path.clone())); - } - if let Some(ref path) = module.bytecode { - files.push((WorkProductFileKind::Bytecode, path.clone())); - } - if let Some(ref path) = module.bytecode_compressed { - files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); - } - - if let Some((id, product)) = - copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) - { - work_products.insert(id, product); - } - } - - work_products -} - -fn produce_final_output_artifacts(sess: &Session, - compiled_modules: &CompiledModules, - crate_output: &OutputFilenames) { - let mut user_wants_bitcode = false; - let mut user_wants_objects = false; - - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &Path| { - if let Err(e) = fs::copy(from, to) { - sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); - } - }; - - let copy_if_one_unit = |output_type: OutputType, - keep_numbered: bool| { - if compiled_modules.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&compiled_modules.modules[0].name[..]); - let path = crate_output.temp_path(output_type, module_name); - copy_gracefully(&path, - &crate_output.path(output_type)); - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - remove(sess, &path); - } - } else { - let ext = crate_output.temp_path(output_type, None) - .extension() - .unwrap() - .to_str() - .unwrap() - .to_owned(); - - if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; - - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in crate_output.outputs.keys() { - match *output_type { - OutputType::Bitcode => { - user_wants_bitcode = true; - // Copy to .bc, but always keep the .0.bc. There is a later - // check to figure out if we should delete .0.bc files, or keep - // them for making an rlib. - copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | - OutputType::Metadata | - OutputType::Exe | - OutputType::DepInfo => {} - } - } - - // Clean up unwanted temporary files. - - // We create the following files by default: - // - #crate#.#module-name#.bc - // - #crate#.#module-name#.o - // - #crate#.crate.metadata.bc - // - #crate#.crate.metadata.o - // - #crate#.o (linked from crate.##.o) - // - #crate#.bc (copied from crate.##.bc) - // We may create additional files if requested by the user (through - // `-C save-temps` or `--emit=` flags). - - if !sess.opts.cg.save_temps { - // Remove the temporary .#module-name#.o objects. If the user didn't - // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .#module-name#.bc as - // well. - - // Specific rules for keeping .#module-name#.bc: - // - If the user requested bitcode (`user_wants_bitcode`), and - // codegen_units > 1, then keep it. - // - If the user requested bitcode but codegen_units == 1, then we - // can toss .#module-name#.bc because we copied it to .bc earlier. - // - If we're not building an rlib and the user didn't request - // bitcode, then delete .#module-name#.bc. - // If you change how this works, also update back::link::link_rlib, - // where .#module-name#.bc files are (maybe) deleted after making an - // rlib. - let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); - - let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; - - let keep_numbered_objects = needs_crate_object || - (user_wants_objects && sess.codegen_units() > 1); - - for module in compiled_modules.modules.iter() { - if let Some(ref path) = module.object { - if !keep_numbered_objects { - remove(sess, path); - } - } - - if let Some(ref path) = module.bytecode { - if !keep_numbered_bitcode { - remove(sess, path); - } - } - } - - if !user_wants_bitcode { - if let Some(ref path) = compiled_modules.metadata_module.bytecode { - remove(sess, &path); - } - - if let Some(ref allocator_module) = compiled_modules.allocator_module { - if let Some(ref path) = allocator_module.bytecode { - remove(sess, path); - } - } - } - } - - // We leave the following files around by default: - // - #crate#.o - // - #crate#.crate.metadata.o - // - #crate#.bc - // These are used in linking steps and will be cleaned up afterward. -} - -pub(crate) fn dump_incremental_data(_codegen_results: &CodegenResults) { - // FIXME(mw): This does not work at the moment because the situation has - // become more complicated due to incremental LTO. Now a CGU - // can have more than two caching states. - // println!("[incremental] Re-using {} out of {} modules", - // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), - // codegen_results.modules.len()); -} - -enum WorkItem { - /// Optimize a newly codegened, totally unoptimized module. - Optimize(ModuleCodegen), - /// Copy the post-LTO artifacts from the incremental cache to the output - /// directory. - CopyPostLtoArtifacts(CachedModuleCodegen), - /// Perform (Thin)LTO on the given module. - LTO(lto::LtoModuleCodegen), -} - -impl WorkItem { - fn module_kind(&self) -> ModuleKind { - match *self { - WorkItem::Optimize(ref m) => m.kind, - WorkItem::CopyPostLtoArtifacts(_) | - WorkItem::LTO(_) => ModuleKind::Regular, - } - } - - fn name(&self) -> String { - match *self { - WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), - WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), - WorkItem::LTO(ref m) => format!("lto: {}", m.name()), - } - } -} - -enum WorkItemResult { - Compiled(CompiledModule), - NeedsLTO(ModuleCodegen), -} - -fn execute_work_item(cgcx: &CodegenContext, - work_item: WorkItem, - timeline: &mut Timeline) - -> Result -{ - let module_config = cgcx.config(work_item.module_kind()); - - match work_item { - WorkItem::Optimize(module) => { - execute_optimize_work_item(cgcx, module, module_config, timeline) - } - WorkItem::CopyPostLtoArtifacts(module) => { - execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) - } - WorkItem::LTO(module) => { - execute_lto_work_item(cgcx, module, module_config, timeline) - } - } -} - -fn execute_optimize_work_item(cgcx: &CodegenContext, - module: ModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - optimize(cgcx, &diag_handler, &module, module_config, timeline)?; - } - - let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); - - // After we've done the initial round of optimizations we need to - // decide whether to synchronously codegen this module or ship it - // back to the coordinator thread for further LTO processing (which - // has to wait for all the initial modules to be optimized). - // - // Here we dispatch based on the `cgcx.lto` and kind of module we're - // codegenning... - let needs_lto = match cgcx.lto { - Lto::No => false, - - // If the linker does LTO, we don't have to do it. Note that we - // keep doing full LTO, if it is requested, as not to break the - // assumption that the output will be a single module. - Lto::Thin | Lto::ThinLocal if linker_does_lto => false, - - // Here we've got a full crate graph LTO requested. We ignore - // this, however, if the crate type is only an rlib as there's - // no full crate graph to process, that'll happen later. - // - // This use case currently comes up primarily for targets that - // require LTO so the request for LTO is always unconditionally - // passed down to the backend, but we don't actually want to do - // anything about it yet until we've got a final product. - Lto::Fat | Lto::Thin => { - cgcx.crate_types.len() != 1 || - cgcx.crate_types[0] != config::CrateType::Rlib - } - - // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator modules if - // it shows up. This is due to various linker shenanigans that - // we'll encounter later. - Lto::ThinLocal => { - module.kind != ModuleKind::Allocator - } - }; - - // Metadata modules never participate in LTO regardless of the lto - // settings. - let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; - - if needs_lto { - Ok(WorkItemResult::NeedsLTO(module)) - } else { - let module = unsafe { - codegen(cgcx, &diag_handler, module, module_config, timeline)? - }; - Ok(WorkItemResult::Compiled(module)) - } -} - -fn execute_copy_from_cache_work_item(cgcx: &CodegenContext, - module: CachedModuleCodegen, - module_config: &ModuleConfig, - _: &mut Timeline) - -> Result -{ - let incr_comp_session_dir = cgcx.incr_comp_session_dir - .as_ref() - .unwrap(); - let mut object = None; - let mut bytecode = None; - let mut bytecode_compressed = None; - for (kind, saved_file) in &module.source.saved_files { - let obj_out = match kind { - WorkProductFileKind::Object => { - let path = cgcx.output_filenames.temp_path(OutputType::Object, - Some(&module.name)); - object = Some(path.clone()); - path - } - WorkProductFileKind::Bytecode => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)); - bytecode = Some(path.clone()); - path - } - WorkProductFileKind::BytecodeCompressed => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)) - .with_extension(RLIB_BYTECODE_EXTENSION); - bytecode_compressed = Some(path.clone()); - path - } - }; - let source_file = in_incr_comp_dir(&incr_comp_session_dir, - &saved_file); - debug!("copying pre-existing module `{}` from {:?} to {}", - module.name, - source_file, - obj_out.display()); - if let Err(err) = link_or_copy(&source_file, &obj_out) { - let diag_handler = cgcx.create_diag_handler(); - diag_handler.err(&format!("unable to copy {} to {}: {}", - source_file.display(), - obj_out.display(), - err)); - } - } - - assert_eq!(object.is_some(), module_config.emit_obj); - assert_eq!(bytecode.is_some(), module_config.emit_bc); - assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); - - Ok(WorkItemResult::Compiled(CompiledModule { - name: module.name, - kind: ModuleKind::Regular, - object, - bytecode, - bytecode_compressed, - })) -} - -fn execute_lto_work_item(cgcx: &CodegenContext, - mut module: lto::LtoModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - let module = module.optimize(cgcx, timeline)?; - let module = codegen(cgcx, &diag_handler, module, module_config, timeline)?; - Ok(WorkItemResult::Compiled(module)) - } -} - -enum Message { - Token(io::Result), - NeedsLTO { - result: ModuleCodegen, - worker_id: usize, - }, - Done { - result: Result, - worker_id: usize, - }, - CodegenDone { - llvm_work_item: WorkItem, - cost: u64, - }, - AddImportOnlyModule { - module_data: SerializedModule, - work_product: WorkProduct, - }, - CodegenComplete, - CodegenItem, - CodegenAborted, -} - -struct Diagnostic { - msg: String, - code: Option, - lvl: Level, -} - -#[derive(PartialEq, Clone, Copy, Debug)] -enum MainThreadWorkerState { - Idle, - Codegenning, - LLVMing, -} - -fn start_executing_work(tcx: TyCtxt, - crate_info: &CrateInfo, - shared_emitter: SharedEmitter, - codegen_worker_send: Sender, - coordinator_receive: Receiver>, - total_cgus: usize, - jobserver: Client, - time_graph: Option, - modules_config: Arc, - metadata_config: Arc, - allocator_config: Arc) - -> thread::JoinHandle> { - let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); - let sess = tcx.sess; - - // Compute the set of symbols we need to retain when doing LTO (if we need to) - let exported_symbols = { - let mut exported_symbols = FxHashMap::default(); - - let copy_symbols = |cnum| { - let symbols = tcx.exported_symbols(cnum) - .iter() - .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) - .collect(); - Arc::new(symbols) - }; - - match sess.lto() { - Lto::No => None, - Lto::ThinLocal => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - Some(Arc::new(exported_symbols)) - } - Lto::Fat | Lto::Thin => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - for &cnum in tcx.crates().iter() { - exported_symbols.insert(cnum, copy_symbols(cnum)); - } - Some(Arc::new(exported_symbols)) - } - } - }; - - // First up, convert our jobserver into a helper thread so we can use normal - // mpsc channels to manage our messages and such. - // After we've requested tokens then we'll, when we can, - // get tokens on `coordinator_receive` which will - // get managed in the main loop below. - let coordinator_send2 = coordinator_send.clone(); - let helper = jobserver.into_helper_thread(move |token| { - drop(coordinator_send2.send(Box::new(Message::Token(token)))); - }).expect("failed to spawn helper thread"); - - let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { - if link::ignored_for_lto(sess, crate_info, cnum) { - return - } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); - })); - - let assembler_cmd = if modules_config.no_integrated_as { - // HACK: currently we use linker (gcc) as our assembler - let (linker, flavor) = link::linker_and_flavor(sess); - - let (name, mut cmd) = get_linker(sess, &linker, flavor); - cmd.args(&sess.target.target.options.asm_args); - - Some(Arc::new(AssemblerCommand { name, cmd })) - } else { - None - }; - - let cgcx = CodegenContext { - crate_types: sess.crate_types.borrow().clone(), - each_linked_rlib_for_lto, - lto: sess.lto(), - no_landing_pads: sess.no_landing_pads(), - fewer_names: sess.fewer_names(), - save_temps: sess.opts.cg.save_temps, - opts: Arc::new(sess.opts.clone()), - time_passes: sess.time_passes(), - exported_symbols, - plugin_passes: sess.plugin_llvm_passes.borrow().clone(), - remark: sess.opts.cg.remark.clone(), - worker: 0, - incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), - cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), - coordinator_send, - diag_emitter: shared_emitter.clone(), - time_graph, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - regular_module_config: modules_config, - metadata_module_config: metadata_config, - allocator_module_config: allocator_config, - tm_factory: target_machine_factory(tcx.sess, false), - total_cgus, - msvc_imps_needed: msvc_imps_needed(tcx), - target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), - debuginfo: tcx.sess.opts.debuginfo, - assembler_cmd, - }; - - // This is the "main loop" of parallel work happening for parallel codegen. - // It's here that we manage parallelism, schedule work, and work with - // messages coming from clients. - // - // There are a few environmental pre-conditions that shape how the system - // is set up: - // - // - Error reporting only can happen on the main thread because that's the - // only place where we have access to the compiler `Session`. - // - LLVM work can be done on any thread. - // - Codegen can only happen on the main thread. - // - Each thread doing substantial work most be in possession of a `Token` - // from the `Jobserver`. - // - The compiler process always holds one `Token`. Any additional `Tokens` - // have to be requested from the `Jobserver`. - // - // Error Reporting - // =============== - // The error reporting restriction is handled separately from the rest: We - // set up a `SharedEmitter` the holds an open channel to the main thread. - // When an error occurs on any thread, the shared emitter will send the - // error message to the receiver main thread (`SharedEmitterMain`). The - // main thread will periodically query this error message queue and emit - // any error messages it has received. It might even abort compilation if - // has received a fatal error. In this case we rely on all other threads - // being torn down automatically with the main thread. - // Since the main thread will often be busy doing codegen work, error - // reporting will be somewhat delayed, since the message queue can only be - // checked in between to work packages. - // - // Work Processing Infrastructure - // ============================== - // The work processing infrastructure knows three major actors: - // - // - the coordinator thread, - // - the main thread, and - // - LLVM worker threads - // - // The coordinator thread is running a message loop. It instructs the main - // thread about what work to do when, and it will spawn off LLVM worker - // threads as open LLVM WorkItems become available. - // - // The job of the main thread is to codegen CGUs into LLVM work package - // (since the main thread is the only thread that can do this). The main - // thread will block until it receives a message from the coordinator, upon - // which it will codegen one CGU, send it to the coordinator and block - // again. This way the coordinator can control what the main thread is - // doing. - // - // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is - // available, it will spawn off a new LLVM worker thread and let it process - // that a WorkItem. When a LLVM worker thread is done with its WorkItem, - // it will just shut down, which also frees all resources associated with - // the given LLVM module, and sends a message to the coordinator that the - // has been completed. - // - // Work Scheduling - // =============== - // The scheduler's goal is to minimize the time it takes to complete all - // work there is, however, we also want to keep memory consumption low - // if possible. These two goals are at odds with each other: If memory - // consumption were not an issue, we could just let the main thread produce - // LLVM WorkItems at full speed, assuring maximal utilization of - // Tokens/LLVM worker threads. However, since codegen usual is faster - // than LLVM processing, the queue of LLVM WorkItems would fill up and each - // WorkItem potentially holds on to a substantial amount of memory. - // - // So the actual goal is to always produce just enough LLVM WorkItems as - // not to starve our LLVM worker threads. That means, once we have enough - // WorkItems in our queue, we can block the main thread, so it does not - // produce more until we need them. - // - // Doing LLVM Work on the Main Thread - // ---------------------------------- - // Since the main thread owns the compiler processes implicit `Token`, it is - // wasteful to keep it blocked without doing any work. Therefore, what we do - // in this case is: We spawn off an additional LLVM worker thread that helps - // reduce the queue. The work it is doing corresponds to the implicit - // `Token`. The coordinator will mark the main thread as being busy with - // LLVM work. (The actual work happens on another OS thread but we just care - // about `Tokens`, not actual threads). - // - // When any LLVM worker thread finishes while the main thread is marked as - // "busy with LLVM work", we can do a little switcheroo: We give the Token - // of the just finished thread to the LLVM worker thread that is working on - // behalf of the main thread's implicit Token, thus freeing up the main - // thread again. The coordinator can then again decide what the main thread - // should do. This allows the coordinator to make decisions at more points - // in time. - // - // Striking a Balance between Throughput and Memory Consumption - // ------------------------------------------------------------ - // Since our two goals, (1) use as many Tokens as possible and (2) keep - // memory consumption as low as possible, are in conflict with each other, - // we have to find a trade off between them. Right now, the goal is to keep - // all workers busy, which means that no worker should find the queue empty - // when it is ready to start. - // How do we do achieve this? Good question :) We actually never know how - // many `Tokens` are potentially available so it's hard to say how much to - // fill up the queue before switching the main thread to LLVM work. Also we - // currently don't have a means to estimate how long a running LLVM worker - // will still be busy with it's current WorkItem. However, we know the - // maximal count of available Tokens that makes sense (=the number of CPU - // cores), so we can take a conservative guess. The heuristic we use here - // is implemented in the `queue_full_enough()` function. - // - // Some Background on Jobservers - // ----------------------------- - // It's worth also touching on the management of parallelism here. We don't - // want to just spawn a thread per work item because while that's optimal - // parallelism it may overload a system with too many threads or violate our - // configuration for the maximum amount of cpu to use for this process. To - // manage this we use the `jobserver` crate. - // - // Job servers are an artifact of GNU make and are used to manage - // parallelism between processes. A jobserver is a glorified IPC semaphore - // basically. Whenever we want to run some work we acquire the semaphore, - // and whenever we're done with that work we release the semaphore. In this - // manner we can ensure that the maximum number of parallel workers is - // capped at any one point in time. - // - // LTO and the coordinator thread - // ------------------------------ - // - // The final job the coordinator thread is responsible for is managing LTO - // and how that works. When LTO is requested what we'll to is collect all - // optimized LLVM modules into a local vector on the coordinator. Once all - // modules have been codegened and optimized we hand this to the `lto` - // module for further optimization. The `lto` module will return back a list - // of more modules to work on, which the coordinator will continue to spawn - // work for. - // - // Each LLVM module is automatically sent back to the coordinator for LTO if - // necessary. There's already optimizations in place to avoid sending work - // back to the coordinator if LTO isn't requested. - return thread::spawn(move || { - // We pretend to be within the top-level LLVM time-passes task here: - set_time_depth(1); - - let max_workers = ::num_cpus::get(); - let mut worker_id_counter = 0; - let mut free_worker_ids = Vec::new(); - let mut get_worker_id = |free_worker_ids: &mut Vec| { - if let Some(id) = free_worker_ids.pop() { - id - } else { - let id = worker_id_counter; - worker_id_counter += 1; - id - } - }; - - // This is where we collect codegen units that have gone all the way - // through codegen and LLVM. - let mut compiled_modules = vec![]; - let mut compiled_metadata_module = None; - let mut compiled_allocator_module = None; - let mut needs_lto = Vec::new(); - let mut lto_import_only_modules = Vec::new(); - let mut started_lto = false; - let mut codegen_aborted = false; - - // This flag tracks whether all items have gone through codegens - let mut codegen_done = false; - - // This is the queue of LLVM work items that still need processing. - let mut work_items = Vec::<(WorkItem, u64)>::new(); - - // This are the Jobserver Tokens we currently hold. Does not include - // the implicit Token the compiler process owns no matter what. - let mut tokens = Vec::new(); - - let mut main_thread_worker_state = MainThreadWorkerState::Idle; - let mut running = 0; - - let mut llvm_start_time = None; - - // Run the message loop while there's still anything that needs message - // processing. Note that as soon as codegen is aborted we simply want to - // wait for all existing work to finish, so many of the conditions here - // only apply if codegen hasn't been aborted as they represent pending - // work to be done. - while !codegen_done || - running > 0 || - (!codegen_aborted && ( - work_items.len() > 0 || - needs_lto.len() > 0 || - lto_import_only_modules.len() > 0 || - main_thread_worker_state != MainThreadWorkerState::Idle - )) - { - - // While there are still CGUs to be codegened, the coordinator has - // to decide how to utilize the compiler processes implicit Token: - // For codegenning more CGU or for running them through LLVM. - if !codegen_done { - if main_thread_worker_state == MainThreadWorkerState::Idle { - if !queue_full_enough(work_items.len(), running, max_workers) { - // The queue is not full enough, codegen more items: - if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { - panic!("Could not send Message::CodegenItem to main thread") - } - main_thread_worker_state = MainThreadWorkerState::Codegenning; - } else { - // The queue is full enough to not let the worker - // threads starve. Use the implicit Token to do some - // LLVM work too. - let (item, _) = work_items.pop() - .expect("queue empty - queue_full_enough() broken?"); - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } - } - } else if codegen_aborted { - // don't queue up any more work if codegen was aborted, we're - // just waiting for our existing children to finish - } else { - // If we've finished everything related to normal codegen - // then it must be the case that we've got some LTO work to do. - // Perform the serial work here of figuring out what we're - // going to LTO and then push a bunch of work items onto our - // queue to do LTO - if work_items.len() == 0 && - running == 0 && - main_thread_worker_state == MainThreadWorkerState::Idle { - assert!(!started_lto); - assert!(needs_lto.len() + lto_import_only_modules.len() > 0); - started_lto = true; - let modules = mem::replace(&mut needs_lto, Vec::new()); - let import_only_modules = - mem::replace(&mut lto_import_only_modules, Vec::new()); - for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { - let insertion_index = work_items - .binary_search_by_key(&cost, |&(_, cost)| cost) - .unwrap_or_else(|e| e); - work_items.insert(insertion_index, (work, cost)); - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - } - } - - // In this branch, we know that everything has been codegened, - // so it's just a matter of determining whether the implicit - // Token is free to use for LLVM work. - match main_thread_worker_state { - MainThreadWorkerState::Idle => { - if let Some((item, _)) = work_items.pop() { - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } else { - // There is no unstarted work, so let the main thread - // take over for a running worker. Otherwise the - // implicit token would just go to waste. - // We reduce the `running` counter by one. The - // `tokens.truncate()` below will take care of - // giving the Token back. - debug_assert!(running > 0); - running -= 1; - main_thread_worker_state = MainThreadWorkerState::LLVMing; - } - } - MainThreadWorkerState::Codegenning => { - bug!("codegen worker should not be codegenning after \ - codegen was already completed") - } - MainThreadWorkerState::LLVMing => { - // Already making good use of that token - } - } - } - - // Spin up what work we can, only doing this while we've got available - // parallelism slots and work left to spawn. - while !codegen_aborted && work_items.len() > 0 && running < tokens.len() { - let (item, _) = work_items.pop().unwrap(); - - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - - spawn_work(cgcx, item); - running += 1; - } - - // Relinquish accidentally acquired extra tokens - tokens.truncate(running); - - let msg = coordinator_receive.recv().unwrap(); - match *msg.downcast::().ok().unwrap() { - // Save the token locally and the next turn of the loop will use - // this to spawn a new unit of work, or it may get dropped - // immediately if we have no more work to spawn. - Message::Token(token) => { - match token { - Ok(token) => { - tokens.push(token); - - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - // If the main thread token is used for LLVM work - // at the moment, we turn that thread into a regular - // LLVM worker thread, so the main thread is free - // to react to codegen demand. - main_thread_worker_state = MainThreadWorkerState::Idle; - running += 1; - } - } - Err(e) => { - let msg = &format!("failed to acquire jobserver token: {}", e); - shared_emitter.fatal(msg); - // Exit the coordinator thread - panic!("{}", msg) - } - } - } - - Message::CodegenDone { llvm_work_item, cost } => { - // We keep the queue sorted by estimated processing cost, - // so that more expensive items are processed earlier. This - // is good for throughput as it gives the main thread more - // time to fill up the queue and it avoids scheduling - // expensive items to the end. - // Note, however, that this is not ideal for memory - // consumption, as LLVM module sizes are not evenly - // distributed. - let insertion_index = - work_items.binary_search_by_key(&cost, |&(_, cost)| cost); - let insertion_index = match insertion_index { - Ok(idx) | Err(idx) => idx - }; - work_items.insert(insertion_index, (llvm_work_item, cost)); - - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - assert!(!codegen_aborted); - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - Message::CodegenComplete => { - codegen_done = true; - assert!(!codegen_aborted); - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - // If codegen is aborted that means translation was aborted due - // to some normal-ish compiler error. In this situation we want - // to exit as soon as possible, but we want to make sure all - // existing work has finished. Flag codegen as being done, and - // then conditions above will ensure no more work is spawned but - // we'll keep executing this loop until `running` hits 0. - Message::CodegenAborted => { - assert!(!codegen_aborted); - codegen_done = true; - codegen_aborted = true; - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - } - - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" - // - // Note that if the thread failed that means it panicked, so we - // abort immediately. - Message::Done { result: Ok(compiled_module), worker_id } => { - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - - free_worker_ids.push(worker_id); - - match compiled_module.kind { - ModuleKind::Regular => { - compiled_modules.push(compiled_module); - } - ModuleKind::Metadata => { - assert!(compiled_metadata_module.is_none()); - compiled_metadata_module = Some(compiled_module); - } - ModuleKind::Allocator => { - assert!(compiled_allocator_module.is_none()); - compiled_allocator_module = Some(compiled_module); - } - } - } - Message::NeedsLTO { result, worker_id } => { - assert!(!started_lto); - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - free_worker_ids.push(worker_id); - needs_lto.push(result); - } - Message::AddImportOnlyModule { module_data, work_product } => { - assert!(!started_lto); - assert!(!codegen_done); - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - lto_import_only_modules.push((module_data, work_product)); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - Message::Done { result: Err(()), worker_id: _ } => { - bug!("worker thread panicked"); - } - Message::CodegenItem => { - bug!("the coordinator should not receive codegen requests") - } - } - } - - if let Some(llvm_start_time) = llvm_start_time { - let total_llvm_time = Instant::now().duration_since(llvm_start_time); - // This is the top-level timing for all of LLVM, set the time-depth - // to zero. - set_time_depth(0); - print_time_passes_entry(cgcx.time_passes, - "LLVM passes", - total_llvm_time); - } - - // Regardless of what order these modules completed in, report them to - // the backend in the same order every time to ensure that we're handing - // out deterministic results. - compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); - - let compiled_metadata_module = compiled_metadata_module - .expect("Metadata module not compiled?"); - - Ok(CompiledModules { - modules: compiled_modules, - metadata_module: compiled_metadata_module, - allocator_module: compiled_allocator_module, - }) - }); - - // A heuristic that determines if we have enough LLVM WorkItems in the - // queue so that the main thread can do LLVM work instead of codegen - fn queue_full_enough(items_in_queue: usize, - workers_running: usize, - max_workers: usize) -> bool { - // Tune me, plz. - items_in_queue > 0 && - items_in_queue >= max_workers.saturating_sub(workers_running / 2) - } - - fn maybe_start_llvm_timer(config: &ModuleConfig, - llvm_start_time: &mut Option) { - // We keep track of the -Ztime-passes output manually, - // since the closure-based interface does not fit well here. - if config.time_passes { - if llvm_start_time.is_none() { - *llvm_start_time = Some(Instant::now()); - } - } - } -} - -pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; -pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = - time_graph::TimelineId(CODEGEN_WORKER_ID); -pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); -const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); - -fn spawn_work(cgcx: CodegenContext, work: WorkItem) { - let depth = time_depth(); - - thread::spawn(move || { - set_time_depth(depth); - - // Set up a destructor which will fire off a message that we're done as - // we exit. - struct Bomb { - coordinator_send: Sender>, - result: Option, - worker_id: usize, - } - impl Drop for Bomb { - fn drop(&mut self) { - let worker_id = self.worker_id; - let msg = match self.result.take() { - Some(WorkItemResult::Compiled(m)) => { - Message::Done { result: Ok(m), worker_id } - } - Some(WorkItemResult::NeedsLTO(m)) => { - Message::NeedsLTO { result: m, worker_id } - } - None => Message::Done { result: Err(()), worker_id } - }; - drop(self.coordinator_send.send(Box::new(msg))); - } - } - - let mut bomb = Bomb { - coordinator_send: cgcx.coordinator_send.clone(), - result: None, - worker_id: cgcx.worker, - }; - - // Execute the work itself, and if it finishes successfully then flag - // ourselves as a success as well. - // - // Note that we ignore any `FatalError` coming out of `execute_work_item`, - // as a diagnostic was already sent off to the main thread - just - // surface that there was an error in this worker. - bomb.result = { - let timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND, - &work.name()) - }); - let mut timeline = timeline.unwrap_or(Timeline::noop()); - execute_work_item(&cgcx, work, &mut timeline).ok() - }; - }); -} - -pub fn run_assembler(cgcx: &CodegenContext, handler: &Handler, assembly: &Path, object: &Path) { - let assembler = cgcx.assembler_cmd - .as_ref() - .expect("cgcx.assembler_cmd is missing?"); - - let pname = &assembler.name; - let mut cmd = assembler.cmd.clone(); - cmd.arg("-c").arg("-o").arg(object).arg(assembly); - debug!("{:?}", cmd); - - match cmd.output() { - Ok(prog) => { - if !prog.status.success() { - let mut note = prog.stderr.clone(); - note.extend_from_slice(&prog.stdout); - - handler.struct_err(&format!("linking with `{}` failed: {}", - pname.display(), - prog.status)) - .note(&format!("{:?}", &cmd)) - .note(str::from_utf8(¬e[..]).unwrap()) - .emit(); - handler.abort_if_errors(); - } - }, - Err(e) => { - handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); - handler.abort_if_errors(); - } - } -} - pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, config: &ModuleConfig, opt_level: llvm::CodeGenOptLevel, @@ -2204,7 +698,7 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, // reasonable defaults and prepare it to actually populate the pass // manager. let builder = llvm::LLVMPassManagerBuilderCreate(); - let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone); + let opt_size = config.opt_size.map(get_llvm_opt_size).unwrap_or(llvm::CodeGenOptSizeNone); let inline_threshold = config.inline_threshold; let pgo_gen_path = config.pgo_gen.as_ref().map(|s| { @@ -2272,295 +766,16 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, llvm::LLVMPassManagerBuilderDispose(builder); } - -enum SharedEmitterMessage { - Diagnostic(Diagnostic), - InlineAsmError(u32, String), - AbortIfErrors, - Fatal(String), -} - -#[derive(Clone)] -pub struct SharedEmitter { - sender: Sender, -} - -pub struct SharedEmitterMain { - receiver: Receiver, -} - -impl SharedEmitter { - pub fn new() -> (SharedEmitter, SharedEmitterMain) { - let (sender, receiver) = channel(); - - (SharedEmitter { sender }, SharedEmitterMain { receiver }) - } - - fn inline_asm_error(&self, cookie: u32, msg: String) { - drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); - } - - fn fatal(&self, msg: &str) { - drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); - } -} - -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }))); - for child in &db.children { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }))); - } - drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); - } -} - -impl SharedEmitterMain { - pub fn check(&self, sess: &Session, blocking: bool) { - loop { - let message = if blocking { - match self.receiver.recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - } else { - match self.receiver.try_recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - }; - - match message { - Ok(SharedEmitterMessage::Diagnostic(diag)) => { - let handler = sess.diagnostic(); - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - code.clone(), - diag.lvl); - } - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); - } - } - } - Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, &msg), - None => sess.err(&msg), - } - } - Ok(SharedEmitterMessage::AbortIfErrors) => { - sess.abort_if_errors(); - } - Ok(SharedEmitterMessage::Fatal(msg)) => { - sess.fatal(&msg); - } - Err(_) => { - break; - } - } - - } - } -} - -pub struct OngoingCodegen { - crate_name: Symbol, - crate_hash: Svh, - metadata: EncodedMetadata, - windows_subsystem: Option, - linker_info: LinkerInfo, - crate_info: CrateInfo, - time_graph: Option, - coordinator_send: Sender>, - codegen_worker_receive: Receiver, - shared_emitter_main: SharedEmitterMain, - future: thread::JoinHandle>, - output_filenames: Arc, -} - -impl OngoingCodegen { - pub(crate) fn join( - self, - sess: &Session - ) -> (CodegenResults, FxHashMap) { - self.shared_emitter_main.check(sess, true); - let compiled_modules = match self.future.join() { - Ok(Ok(compiled_modules)) => compiled_modules, - Ok(Err(())) => { - sess.abort_if_errors(); - panic!("expected abort due to worker thread errors") - }, - Err(_) => { - bug!("panic during codegen/LLVM phase"); - } - }; - - sess.cgu_reuse_tracker.check_expected_reuse(sess); - - sess.abort_if_errors(); - - if let Some(time_graph) = self.time_graph { - time_graph.dump(&format!("{}-timings", self.crate_name)); - } - - let work_products = - copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, - &compiled_modules); - produce_final_output_artifacts(sess, - &compiled_modules, - &self.output_filenames); - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.codegen_units() == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } - - (CodegenResults { - crate_name: self.crate_name, - crate_hash: self.crate_hash, - metadata: self.metadata, - windows_subsystem: self.windows_subsystem, - linker_info: self.linker_info, - crate_info: self.crate_info, - - modules: compiled_modules.modules, - allocator_module: compiled_modules.allocator_module, - metadata_module: compiled_modules.metadata_module, - }, work_products) - } - - pub(crate) fn submit_pre_codegened_module_to_llvm(&self, - tcx: TyCtxt, - module: ModuleCodegen) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - - // These are generally cheap and won't through off scheduling. - let cost = 0; - submit_codegened_module_to_llvm(tcx, module, cost); - } - - pub fn codegen_finished(&self, tcx: TyCtxt) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - drop(self.coordinator_send.send(Box::new(Message::CodegenComplete))); - } - - /// Consume this context indicating that codegen was entirely aborted, and - /// we need to exit as quickly as possible. - /// - /// This method blocks the current thread until all worker threads have - /// finished, and all worker threads should have exited or be real close to - /// exiting at this point. - pub fn codegen_aborted(self) { - // Signal to the coordinator it should spawn no more work and start - // shutdown. - drop(self.coordinator_send.send(Box::new(Message::CodegenAborted))); - drop(self.future.join()); - } - - pub fn check_for_errors(&self, sess: &Session) { - self.shared_emitter_main.check(sess, false); - } - - pub fn wait_for_signal_to_codegen_item(&self) { - match self.codegen_worker_receive.recv() { - Ok(Message::CodegenItem) => { - // Nothing to do - } - Ok(_) => panic!("unexpected message"), - Err(_) => { - // One of the LLVM threads must have panicked, fall through so - // error handling can be reached. - } - } - } -} - -// impl Drop for OngoingCodegen { -// fn drop(&mut self) { -// } -// } - -pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt, - module: ModuleCodegen, - cost: u64) { - let llvm_work_item = WorkItem::Optimize(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost, - }))); -} - -pub(crate) fn submit_post_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost: 0, - }))); -} - -pub(crate) fn submit_pre_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let filename = pre_lto_bitcode_filename(&module.name); - let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); - let file = fs::File::open(&bc_path).unwrap_or_else(|e| { - panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) - }); - - let mmap = unsafe { - memmap::Mmap::map(&file).unwrap_or_else(|e| { - panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) - }) - }; - - // Schedule the module to be loaded - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::AddImportOnlyModule { - module_data: SerializedModule::FromUncompressedFile(mmap), - work_product: module.source, - }))); -} - -pub(super) fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) -} - -fn msvc_imps_needed(tcx: TyCtxt) -> bool { - // This should never be true (because it's not supported). If it is true, - // something is wrong with commandline arg validation. - assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.opts.cg.prefer_dynamic)); - - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() -} - // Create a `__imp_ = &symbol` global for every public static `symbol`. // This is required to satisfy `dllimport` references to static data in .rlibs // when using MSVC linker. We do this only for data, as linker can fix up // code references on its own. // See #26591, #27438 -fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module) { +fn create_msvc_imps( + cgcx: &CodegenContext, + llcx: &llvm::Context, + llmod: &llvm::Module +) { if !cgcx.msvc_imps_needed { return } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 9971a41560335e940ed0e23acf61247611a798ca..78693a395b3907f03d0be552e3318d0b83f9bddd 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -24,585 +24,39 @@ //! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. use super::ModuleLlvm; -use super::ModuleCodegen; -use super::ModuleKind; -use super::CachedModuleCodegen; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::base::maybe_create_entry_wrapper; +use super::LlvmCodegenBackend; -use abi; -use back::write::{self, OngoingCodegen}; -use llvm::{self, TypeKind, get_param}; +use llvm; use metadata; -use rustc::dep_graph::cgu_reuse_tracker::CguReuse; -use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use rustc::middle::lang_items::StartFnLangItem; -use rustc::middle::weak_lang_items; -use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder}; +use rustc::mir::mono::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata}; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx}; -use rustc::ty::query::Providers; -use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::ty::TyCtxt; use rustc::middle::exported_symbols; -use rustc::util::common::{time, print_time_passes_entry}; -use rustc::util::profiling::ProfileCategory; -use rustc::session::config::{self, DebugInfo, EntryFnType, Lto}; -use rustc::session::Session; -use rustc_incremental; -use allocator; -use mir::place::PlaceRef; -use attributes; -use builder::{Builder, MemFlags}; -use callee; -use common::{C_bool, C_bytes_in_context, C_usize}; -use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{C_struct_in_context, C_array, val_ty}; -use consts; +use rustc::session::config::{self, DebugInfo}; +use builder::Builder; +use common; use context::CodegenCx; -use debuginfo; -use declare; -use meth; -use mir; -use monomorphize::Instance; -use monomorphize::partitioning::{CodegenUnit, CodegenUnitExt}; -use rustc_codegen_utils::symbol_names_test; -use time_graph; -use mono_item::{MonoItem, MonoItemExt}; -use type_::Type; -use type_of::LayoutLlvmExt; -use rustc::util::nodemap::FxHashMap; -use CrateInfo; +use monomorphize::partitioning::CodegenUnitExt; +use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::sync::Lrc; -use rustc_data_structures::indexed_vec::Idx; -use std::any::Any; -use std::cmp; +use rustc_codegen_ssa::traits::*; +use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm; + use std::ffi::CString; -use std::ops::{Deref, DerefMut}; -use std::sync::mpsc; -use std::time::{Instant, Duration}; -use syntax_pos::Span; +use std::time::Instant; use syntax_pos::symbol::InternedString; -use syntax::attr; -use rustc::hir::{self, CodegenFnAttrs}; +use rustc::hir::CodegenFnAttrs; use value::Value; -use mir::operand::OperandValue; - -use rustc_codegen_utils::check_for_rustc_errors_attr; - -pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll> { - cx: &'a CodegenCx<'ll, 'tcx>, - name: Option, - istart: usize, -} - -impl StatRecorder<'a, 'll, 'tcx> { - pub fn new(cx: &'a CodegenCx<'ll, 'tcx>, name: String) -> Self { - let istart = cx.stats.borrow().n_llvm_insns; - StatRecorder { - cx, - name: Some(name), - istart, - } - } -} - -impl Drop for StatRecorder<'a, 'll, 'tcx> { - fn drop(&mut self) { - if self.cx.sess().codegen_stats() { - let mut stats = self.cx.stats.borrow_mut(); - let iend = stats.n_llvm_insns; - stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); - stats.n_fns += 1; - // Reset LLVM insn count to avoid compound costs. - stats.n_llvm_insns = self.istart; - } - } -} - -pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, - signed: bool) - -> llvm::IntPredicate { - match op { - hir::BinOpKind::Eq => llvm::IntEQ, - hir::BinOpKind::Ne => llvm::IntNE, - hir::BinOpKind::Lt => if signed { llvm::IntSLT } else { llvm::IntULT }, - hir::BinOpKind::Le => if signed { llvm::IntSLE } else { llvm::IntULE }, - hir::BinOpKind::Gt => if signed { llvm::IntSGT } else { llvm::IntUGT }, - hir::BinOpKind::Ge => if signed { llvm::IntSGE } else { llvm::IntUGE }, - op => { - bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ - found {:?}", - op) - } - } -} - -pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate { - match op { - hir::BinOpKind::Eq => llvm::RealOEQ, - hir::BinOpKind::Ne => llvm::RealUNE, - hir::BinOpKind::Lt => llvm::RealOLT, - hir::BinOpKind::Le => llvm::RealOLE, - hir::BinOpKind::Gt => llvm::RealOGT, - hir::BinOpKind::Ge => llvm::RealOGE, - op => { - bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ - found {:?}", - op); - } - } -} - -pub fn compare_simd_types( - bx: &Builder<'a, 'll, 'tcx>, - lhs: &'ll Value, - rhs: &'ll Value, - t: Ty<'tcx>, - ret_ty: &'ll Type, - op: hir::BinOpKind -) -> &'ll Value { - let signed = match t.sty { - ty::Float(_) => { - let cmp = bin_op_to_fcmp_predicate(op); - return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); - }, - ty::Uint(_) => false, - ty::Int(_) => true, - _ => bug!("compare_simd_types: invalid SIMD type"), - }; - - let cmp = bin_op_to_icmp_predicate(op, signed); - // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension - // to get the correctly sized type. This will compile to a single instruction - // once the IR is converted to assembly if the SIMD instruction is supported - // by the target architecture. - bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) -} - -/// Retrieve the information we are losing (making dynamic) in an unsizing -/// adjustment. -/// -/// The `old_info` argument is a bit funny. It is intended for use -/// in an upcast, where the new vtable for an object will be derived -/// from the old one. -pub fn unsized_info( - cx: &CodegenCx<'ll, 'tcx>, - source: Ty<'tcx>, - target: Ty<'tcx>, - old_info: Option<&'ll Value>, -) -> &'ll Value { - let (source, target) = cx.tcx.struct_lockstep_tails(source, target); - match (&source.sty, &target.sty) { - (&ty::Array(_, len), &ty::Slice(_)) => { - C_usize(cx, len.unwrap_usize(cx.tcx)) - } - (&ty::Dynamic(..), &ty::Dynamic(..)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - old_info.expect("unsized_info: missing old info for trait upcast") - } - (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target)) - .field(cx, abi::FAT_PTR_EXTRA); - consts::ptrcast(meth::get_vtable(cx, source, data.principal()), - vtable_ptr.llvm_type(cx)) - } - _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", - source, - target), - } -} - -/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr( - bx: &Builder<'a, 'll, 'tcx>, - src: &'ll Value, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx> -) -> (&'ll Value, &'ll Value) { - debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(_, a, _), - &ty::Ref(_, b, _)) | - (&ty::Ref(_, a, _), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - let src_layout = bx.cx.layout_of(src_ty); - let dst_layout = bx.cx.layout_of(dst_ty); - let mut result = None; - for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx, i); - assert_eq!(src_layout.fields.offset(i).bytes(), 0); - assert_eq!(dst_layout.fields.offset(i).bytes(), 0); - if src_f.is_zst() { - continue; - } - assert_eq!(src_layout.size, src_f.size); - - let dst_f = dst_layout.field(bx.cx, i); - assert_ne!(src_f.ty, dst_f.ty); - assert_eq!(result, None); - result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); - } - let (lldata, llextra) = result.unwrap(); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)), - bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true))) - } - _ => bug!("unsize_thin_ptr: called on bad types"), - } -} - -/// Coerce `src`, which is a reference to a value of type `src_ty`, -/// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into( - bx: &Builder<'a, 'll, 'tcx>, - src: PlaceRef<'ll, 'tcx>, - dst: PlaceRef<'ll, 'tcx> -) { - let src_ty = src.layout.ty; - let dst_ty = dst.layout.ty; - let coerce_ptr = || { - let (base, info) = match src.load(bx).val { - OperandValue::Pair(base, info) => { - // fat-ptr to fat-ptr unsize preserves the vtable - // i.e. &'a fmt::Debug+Send => &'a fmt::Debug - // So we need to pointercast the base to ensure - // the types match up. - let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR); - (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info) - } - OperandValue::Immediate(base) => { - unsize_thin_ptr(bx, base, src_ty, dst_ty) - } - OperandValue::Ref(..) => bug!() - }; - OperandValue::Pair(base, info).store(bx, dst); - }; - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(..), &ty::Ref(..)) | - (&ty::Ref(..), &ty::RawPtr(..)) | - (&ty::RawPtr(..), &ty::RawPtr(..)) => { - coerce_ptr() - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - coerce_ptr() - } - - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() { - let src_f = src.project_field(bx, i); - let dst_f = dst.project_field(bx, i); - - if dst_f.layout.is_zst() { - continue; - } - - if src_f.layout.ty == dst_f.layout.ty { - memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align, - src_f.layout, MemFlags::empty()); - } else { - coerce_unsized_into(bx, src_f, dst_f); - } - } - } - _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", - src_ty, - dst_ty), - } -} - -pub fn cast_shift_expr_rhs( - cx: &Builder<'_, 'll, '_>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { - cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) -} - -fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, - lhs: &'ll Value, - rhs: &'ll Value, - trunc: F, - zext: G) - -> &'ll Value - where F: FnOnce(&'ll Value, &'ll Type) -> &'ll Value, - G: FnOnce(&'ll Value, &'ll Type) -> &'ll Value -{ - // Shifts may have any size int on the rhs - if op.is_shift() { - let mut rhs_llty = val_ty(rhs); - let mut lhs_llty = val_ty(lhs); - if rhs_llty.kind() == TypeKind::Vector { - rhs_llty = rhs_llty.element_type() - } - if lhs_llty.kind() == TypeKind::Vector { - lhs_llty = lhs_llty.element_type() - } - let rhs_sz = rhs_llty.int_width(); - let lhs_sz = lhs_llty.int_width(); - if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) - } else if lhs_sz > rhs_sz { - // FIXME (#1877: If in the future shifting by negative - // values is no longer undefined then this is wrong. - zext(rhs, lhs_llty) - } else { - rhs - } - } else { - rhs - } -} - -/// Returns whether this session's target will use SEH-based unwinding. -/// -/// This is only true for MSVC targets, and even then the 64-bit MSVC target -/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as -/// 64-bit MinGW) instead of "full SEH". -pub fn wants_msvc_seh(sess: &Session) -> bool { - sess.target.target.options.is_like_msvc -} - -pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) { - let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume"); - bx.call(assume_intrinsic, &[val], None); -} - -pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value { - if val_ty(val) == Type::i1(bx.cx) { - bx.zext(val, Type::i8(bx.cx)) - } else { - val - } -} - -pub fn to_immediate( - bx: &Builder<'_, 'll, '_>, - val: &'ll Value, - layout: layout::TyLayout, -) -> &'ll Value { - if let layout::Abi::Scalar(ref scalar) = layout.abi { - return to_immediate_scalar(bx, val, scalar); - } - val -} - -pub fn to_immediate_scalar( - bx: &Builder<'_, 'll, '_>, - val: &'ll Value, - scalar: &layout::Scalar, -) -> &'ll Value { - if scalar.is_bool() { - return bx.trunc(val, Type::i1(bx.cx)); - } - val -} - -pub fn call_memcpy( - bx: &Builder<'_, 'll, '_>, - dst: &'ll Value, - dst_align: Align, - src: &'ll Value, - src_align: Align, - n_bytes: &'ll Value, - flags: MemFlags, -) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = bx.load(src, src_align); - let ptr = bx.pointercast(dst, val_ty(val).ptr_to()); - bx.store_with_flags(val, ptr, dst_align, flags); - return; - } - let cx = bx.cx; - let src_ptr = bx.pointercast(src, Type::i8p(cx)); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let size = bx.intcast(n_bytes, cx.isize_ty, false); - let volatile = flags.contains(MemFlags::VOLATILE); - bx.memcpy(dst_ptr, dst_align.abi(), src_ptr, src_align.abi(), size, volatile); -} - -pub fn memcpy_ty( - bx: &Builder<'_, 'll, 'tcx>, - dst: &'ll Value, - dst_align: Align, - src: &'ll Value, - src_align: Align, - layout: TyLayout<'tcx>, - flags: MemFlags, -) { - let size = layout.size.bytes(); - if size == 0 { - return; - } - - call_memcpy(bx, dst, dst_align, src, src_align, C_usize(bx.cx, size), flags); -} - -pub fn call_memset( - bx: &Builder<'_, 'll, '_>, - ptr: &'ll Value, - fill_byte: &'ll Value, - size: &'ll Value, - align: &'ll Value, - volatile: bool, -) -> &'ll Value { - let ptr_width = &bx.cx.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key); - let volatile = C_bool(bx.cx, volatile); - bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) -} - -pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) { - let _s = if cx.sess().codegen_stats() { - let mut instance_name = String::new(); - DefPathBasedNames::new(cx.tcx, true, true) - .push_def_path(instance.def_id(), &mut instance_name); - Some(StatRecorder::new(cx, instance_name)) - } else { - None - }; - - // this is an info! to allow collecting monomorphization statistics - // and to allow finding the last function before LLVM aborts from - // release builds. - info!("codegen_instance({})", instance); - - let sig = instance.fn_sig(cx.tcx); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - - let lldecl = cx.instances.borrow().get(&instance).cloned().unwrap_or_else(|| - bug!("Instance `{:?}` not already declared", instance)); - - cx.stats.borrow_mut().n_closures += 1; - - let mir = cx.tcx.instance_mir(instance.def); - mir::codegen_mir(cx, lldecl, &mir, instance, sig); -} - -pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { - let sect = match attrs.link_section { - Some(name) => name, - None => return, - }; - unsafe { - let buf = SmallCStr::new(§.as_str()); - llvm::LLVMSetSection(llval, buf.as_ptr()); - } -} - -/// Create the `main` function which will initialize the rust runtime and call -/// users main function. -fn maybe_create_entry_wrapper(cx: &CodegenCx) { - let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { - Some((id, span, _)) => { - (cx.tcx.hir.local_def_id(id), span) - } - None => return, - }; - - let instance = Instance::mono(cx.tcx, main_def_id); - - if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) { - // We want to create the wrapper in the same codegen unit as Rust's main - // function. - return; - } - - let main_llfn = callee::get_fn(cx, instance); - - let et = cx.sess().entry_fn.get().map(|e| e.2); - match et { - Some(EntryFnType::Main) => create_entry_fn(cx, span, main_llfn, main_def_id, true), - Some(EntryFnType::Start) => create_entry_fn(cx, span, main_llfn, main_def_id, false), - None => {} // Do nothing. - } - - fn create_entry_fn( - cx: &CodegenCx<'ll, '_>, - sp: Span, - rust_main: &'ll Value, - rust_main_def_id: DefId, - use_start_lang_item: bool, - ) { - let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); - - let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); - // Given that `main()` has no arguments, - // then its return type cannot have - // late-bound regions, since late-bound - // regions must appear in the argument - // listing. - let main_ret_ty = cx.tcx.erase_regions( - &main_ret_ty.no_bound_vars().unwrap(), - ); - - if declare::get_defined_value(cx, "main").is_some() { - // FIXME: We should be smart and show a better diagnostic here. - cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") - .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") - .emit(); - cx.sess().abort_if_errors(); - bug!(); - } - let llfn = declare::declare_cfn(cx, "main", llfty); - - // `main` should respect same config for frame pointer elimination as rest of code - attributes::set_frame_pointer_elimination(cx, llfn); - attributes::apply_target_cpu_attr(cx, llfn); - - let bx = Builder::new_block(cx, llfn, "top"); - - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx); - - // Params from native main() used as args for rust start function - let param_argc = get_param(llfn, 0); - let param_argv = get_param(llfn, 1); - let arg_argc = bx.intcast(param_argc, cx.isize_ty, true); - let arg_argv = param_argv; - - let (start_fn, args) = if use_start_lang_item { - let start_def_id = cx.tcx.require_lang_item(StartFnLangItem); - let start_fn = callee::resolve_and_get_fn( - cx, - start_def_id, - cx.tcx.intern_substs(&[main_ret_ty.into()]), - ); - (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()), - arg_argc, arg_argv]) - } else { - debug!("using user-defined start fn"); - (rust_main, vec![arg_argc, arg_argv]) - }; - - let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, Type::c_int(cx), true)); - } -} - -fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, - llvm_module: &ModuleLlvm) - -> EncodedMetadata { +pub fn write_metadata<'a, 'gcx>( + tcx: TyCtxt<'a, 'gcx, 'gcx>, + llvm_module: &ModuleLlvm +) -> EncodedMetadata { use std::io::Write; use flate2::Compression; use flate2::write::DeflateEncoder; @@ -643,12 +97,12 @@ enum MetadataKind { DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = C_bytes_in_context(metadata_llcx, &compressed); - let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = common::bytes_in_context(metadata_llcx, &compressed); + let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { - llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst), buf.as_ptr()) + llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) }; unsafe { llvm::LLVMSetInitializer(llglobal, llconst); @@ -692,398 +146,7 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> { } } -fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - cgu: &CodegenUnit<'tcx>) - -> CguReuse { - if !tcx.dep_graph.is_fully_enabled() { - return CguReuse::No - } - - let work_product_id = &cgu.work_product_id(); - if tcx.dep_graph.previous_work_product(work_product_id).is_none() { - // We don't have anything cached for this CGU. This can happen - // if the CGU did not exist in the previous session. - return CguReuse::No - } - - // Try to mark the CGU as green. If it we can do so, it means that nothing - // affecting the LLVM module has changed and we can re-use a cached version. - // If we compile with any kind of LTO, this means we can re-use the bitcode - // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only - // know that later). If we are not doing LTO, there is only one optimized - // version of each module, so we re-use that. - let dep_node = cgu.codegen_dep_node(tcx); - assert!(!tcx.dep_graph.dep_node_exists(&dep_node), - "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", - cgu.name()); - - if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { - // We can re-use either the pre- or the post-thinlto state - if tcx.sess.lto() != Lto::No { - CguReuse::PreLto - } else { - CguReuse::PostLto - } - } else { - CguReuse::No - } -} - -pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - rx: mpsc::Receiver>) - -> OngoingCodegen -{ - check_for_rustc_errors_attr(tcx); - - let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); - - // Codegen the metadata. - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); - - let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("metadata")).as_str() - .to_string(); - let metadata_llvm_module = ModuleLlvm::new(tcx.sess, &metadata_cgu_name); - let metadata = time(tcx.sess, "write metadata", || { - write_metadata(tcx, &metadata_llvm_module) - }); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); - - let metadata_module = ModuleCodegen { - name: metadata_cgu_name, - module_llvm: metadata_llvm_module, - kind: ModuleKind::Metadata, - }; - - let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { - Some(time_graph::TimeGraph::new()) - } else { - None - }; - - // Skip crate items and just output metadata in -Z no-codegen mode. - if tcx.sess.opts.debugging_opts.no_codegen || - !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = write::start_async_codegen( - tcx, - time_graph, - metadata, - rx, - 1); - - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - ongoing_codegen.codegen_finished(tcx); - - assert_and_save_dep_graph(tcx); - - ongoing_codegen.check_for_errors(tcx.sess); - - return ongoing_codegen; - } - - // Run the monomorphization collector and partition the collected items into - // codegen units. - let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; - let codegen_units = (*codegen_units).clone(); - - // Force all codegen_unit queries so they are already either red or green - // when compile_codegen_unit accesses them. We are not able to re-execute - // the codegen_unit query from just the DepNode, so an unknown color would - // lead to having to re-execute compile_codegen_unit, possibly - // unnecessarily. - if tcx.dep_graph.is_fully_enabled() { - for cgu in &codegen_units { - tcx.codegen_unit(cgu.name().clone()); - } - } - - let ongoing_codegen = write::start_async_codegen( - tcx, - time_graph.clone(), - metadata, - rx, - codegen_units.len()); - let ongoing_codegen = AbortCodegenOnDrop(Some(ongoing_codegen)); - - // Codegen an allocator shim, if necessary. - // - // If the crate doesn't have an `allocator_kind` set then there's definitely - // no shim to generate. Otherwise we also check our dependency graph for all - // our output crate types. If anything there looks like its a `Dynamic` - // linkage, then it's already got an allocator shim and we'll be using that - // one instead. If nothing exists then it's our job to generate the - // allocator! - let any_dynamic_crate = tcx.sess.dependency_formats.borrow() - .iter() - .any(|(_, list)| { - use rustc::middle::dependency_format::Linkage; - list.iter().any(|&linkage| linkage == Linkage::Dynamic) - }); - let allocator_module = if any_dynamic_crate { - None - } else if let Some(kind) = *tcx.sess.allocator_kind.get() { - let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("allocator")).as_str() - .to_string(); - let modules = ModuleLlvm::new(tcx.sess, &llmod_id); - time(tcx.sess, "write allocator module", || { - unsafe { - allocator::codegen(tcx, &modules, kind) - } - }); - - Some(ModuleCodegen { - name: llmod_id, - module_llvm: modules, - kind: ModuleKind::Allocator, - }) - } else { - None - }; - - if let Some(allocator_module) = allocator_module { - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); - } - - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - - // We sort the codegen units by size. This way we can schedule work for LLVM - // a bit more efficiently. - let codegen_units = { - let mut codegen_units = codegen_units; - codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); - codegen_units - }; - - let mut total_codegen_time = Duration::new(0, 0); - let mut all_stats = Stats::default(); - - for cgu in codegen_units.into_iter() { - ongoing_codegen.wait_for_signal_to_codegen_item(); - ongoing_codegen.check_for_errors(tcx.sess); - - let cgu_reuse = determine_cgu_reuse(tcx, &cgu); - tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); - - match cgu_reuse { - CguReuse::No => { - let _timing_guard = time_graph.as_ref().map(|time_graph| { - time_graph.start(write::CODEGEN_WORKER_TIMELINE, - write::CODEGEN_WORK_PACKAGE_KIND, - &format!("codegen {}", cgu.name())) - }); - let start_time = Instant::now(); - let stats = compile_codegen_unit(tcx, *cgu.name()); - all_stats.extend(stats); - total_codegen_time += start_time.elapsed(); - false - } - CguReuse::PreLto => { - write::submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - CguReuse::PostLto => { - write::submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - }; - } - - ongoing_codegen.codegen_finished(tcx); - - // Since the main thread is sometimes blocked during codegen, we keep track - // -Ztime-passes output manually. - print_time_passes_entry(tcx.sess.time_passes(), - "codegen to LLVM IR", - total_codegen_time); - - rustc_incremental::assert_module_sources::assert_module_sources(tcx); - - symbol_names_test::report_symbol_names(tcx); - - if tcx.sess.codegen_stats() { - println!("--- codegen stats ---"); - println!("n_glues_created: {}", all_stats.n_glues_created); - println!("n_null_glues: {}", all_stats.n_null_glues); - println!("n_real_glues: {}", all_stats.n_real_glues); - - println!("n_fns: {}", all_stats.n_fns); - println!("n_inlines: {}", all_stats.n_inlines); - println!("n_closures: {}", all_stats.n_closures); - println!("fn stats:"); - all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); - for &(ref name, insns) in all_stats.fn_stats.iter() { - println!("{} insns, {}", insns, *name); - } - } - - if tcx.sess.count_llvm_insns() { - for (k, v) in all_stats.llvm_insns.iter() { - println!("{:7} {}", *v, *k); - } - } - - ongoing_codegen.check_for_errors(tcx.sess); - - assert_and_save_dep_graph(tcx); - ongoing_codegen.into_inner() -} - -/// A curious wrapper structure whose only purpose is to call `codegen_aborted` -/// when it's dropped abnormally. -/// -/// In the process of working on rust-lang/rust#55238 a mysterious segfault was -/// stumbled upon. The segfault was never reproduced locally, but it was -/// suspected to be related to the fact that codegen worker threads were -/// sticking around by the time the main thread was exiting, causing issues. -/// -/// This structure is an attempt to fix that issue where the `codegen_aborted` -/// message will block until all workers have finished. This should ensure that -/// even if the main codegen thread panics we'll wait for pending work to -/// complete before returning from the main thread, hopefully avoiding -/// segfaults. -/// -/// If you see this comment in the code, then it means that this workaround -/// worked! We may yet one day track down the mysterious cause of that -/// segfault... -struct AbortCodegenOnDrop(Option); - -impl AbortCodegenOnDrop { - fn into_inner(mut self) -> OngoingCodegen { - self.0.take().unwrap() - } -} - -impl Deref for AbortCodegenOnDrop { - type Target = OngoingCodegen; - - fn deref(&self) -> &OngoingCodegen { - self.0.as_ref().unwrap() - } -} - -impl DerefMut for AbortCodegenOnDrop { - fn deref_mut(&mut self) -> &mut OngoingCodegen { - self.0.as_mut().unwrap() - } -} - -impl Drop for AbortCodegenOnDrop { - fn drop(&mut self) { - if let Some(codegen) = self.0.take() { - codegen.codegen_aborted(); - } - } -} - -fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - time(tcx.sess, - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(tcx.sess, - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx)); -} - -impl CrateInfo { - pub fn new(tcx: TyCtxt) -> CrateInfo { - let mut info = CrateInfo { - panic_runtime: None, - compiler_builtins: None, - profiler_runtime: None, - sanitizer_runtime: None, - is_no_builtins: Default::default(), - native_libraries: Default::default(), - used_libraries: tcx.native_libraries(LOCAL_CRATE), - link_args: tcx.link_args(LOCAL_CRATE), - crate_name: Default::default(), - used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), - used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), - used_crate_source: Default::default(), - wasm_imports: Default::default(), - lang_item_to_crate: Default::default(), - missing_lang_items: Default::default(), - }; - let lang_items = tcx.lang_items(); - - let load_wasm_items = tcx.sess.crate_types.borrow() - .iter() - .any(|c| *c != config::CrateType::Rlib) && - tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; - - if load_wasm_items { - info.load_wasm_imports(tcx, LOCAL_CRATE); - } - - let crates = tcx.crates(); - - let n_crates = crates.len(); - info.native_libraries.reserve(n_crates); - info.crate_name.reserve(n_crates); - info.used_crate_source.reserve(n_crates); - info.missing_lang_items.reserve(n_crates); - - for &cnum in crates.iter() { - info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); - info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); - info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); - if tcx.is_panic_runtime(cnum) { - info.panic_runtime = Some(cnum); - } - if tcx.is_compiler_builtins(cnum) { - info.compiler_builtins = Some(cnum); - } - if tcx.is_profiler_runtime(cnum) { - info.profiler_runtime = Some(cnum); - } - if tcx.is_sanitizer_runtime(cnum) { - info.sanitizer_runtime = Some(cnum); - } - if tcx.is_no_builtins(cnum) { - info.is_no_builtins.insert(cnum); - } - if load_wasm_items { - info.load_wasm_imports(tcx, cnum); - } - let missing = tcx.missing_lang_items(cnum); - for &item in missing.iter() { - if let Ok(id) = lang_items.require(item) { - info.lang_item_to_crate.insert(item, id.krate); - } - } - - // No need to look for lang items that are whitelisted and don't - // actually need to exist. - let missing = missing.iter() - .cloned() - .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) - .collect(); - info.missing_lang_items.insert(cnum, missing); - } - - return info - } - - fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { - self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { - let instance = Instance::mono(tcx, id); - let import_name = tcx.symbol_name(instance); - - (import_name.to_string(), module.clone()) - })); - } -} - -fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { let start_time = Instant::now(); @@ -1100,69 +163,54 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; - write::submit_codegened_module_to_llvm(tcx, - module, - cost); + submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost); return stats; - fn module_codegen<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn module_codegen<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) - -> (Stats, ModuleCodegen) + -> (Stats, ModuleCodegen) { + let backend = LlvmCodegenBackend(()); let cgu = tcx.codegen_unit(cgu_name); - // Instantiate monomorphizations without filling out definitions yet... - let llvm_module = ModuleLlvm::new(tcx.sess, &cgu_name.as_str()); + let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str()); let stats = { let cx = CodegenCx::new(tcx, cgu, &llvm_module); let mono_items = cx.codegen_unit .items_in_deterministic_order(cx.tcx); for &(mono_item, (linkage, visibility)) in &mono_items { - mono_item.predefine(&cx, linkage, visibility); + mono_item.predefine::(&cx, linkage, visibility); } // ... and now that we have everything pre-defined, fill out those definitions. for &(mono_item, _) in &mono_items { - mono_item.define(&cx); + mono_item.define::(&cx); } // If this codegen unit contains the main function, also create the // wrapper here - maybe_create_entry_wrapper(&cx); + maybe_create_entry_wrapper::(&cx); // Run replace-all-uses-with for statics that need it - for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { + for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); - llvm::LLVMDeleteGlobal(old_g); + cx.static_replace_all_uses(old_g, new_g) } } // Create the llvm.used variable // This variable has type [N x i8*] and is stored in the llvm.metadata section - if !cx.used_statics.borrow().is_empty() { - let name = const_cstr!("llvm.used"); - let section = const_cstr!("llvm.metadata"); - let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); - - unsafe { - let g = llvm::LLVMAddGlobal(cx.llmod, - val_ty(array), - name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - } + if !cx.used_statics().borrow().is_empty() { + cx.create_used_variable() } // Finalize debuginfo if cx.sess().opts.debuginfo != DebugInfo::None { - debuginfo::finalize(&cx); + cx.debuginfo_finalize(); } - cx.stats.into_inner() + cx.consume_stats().into_inner() }; (stats, ModuleCodegen { @@ -1173,35 +221,15 @@ fn module_codegen<'a, 'tcx>( } } -pub fn provide_both(providers: &mut Providers) { - providers.dllimport_foreign_items = |tcx, krate| { - let module_map = tcx.foreign_modules(krate); - let module_map = module_map.iter() - .map(|lib| (lib.def_id, lib)) - .collect::>(); - - let dllimports = tcx.native_libraries(krate) - .iter() - .filter(|lib| { - if lib.kind != cstore::NativeLibraryKind::NativeUnknown { - return false - } - let cfg = match lib.cfg { - Some(ref cfg) => cfg, - None => return true, - }; - attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) - }) - .filter_map(|lib| lib.foreign_module) - .map(|id| &module_map[&id]) - .flat_map(|module| module.foreign_items.iter().cloned()) - .collect(); - Lrc::new(dllimports) - }; - - providers.is_dllimport_foreign_item = |tcx, def_id| { - tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) +pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { + let sect = match attrs.link_section { + Some(name) => name, + None => return, }; + unsafe { + let buf = SmallCStr::new(§.as_str()); + llvm::LLVMSetSection(llval, buf.as_ptr()); + } } pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { @@ -1227,25 +255,3 @@ pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { Visibility::Protected => llvm::Visibility::Protected, } } - -// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement -// the HashStable trait. Normally DepGraph::with_task() calls are -// hidden behind queries, but CGU creation is a special case in two -// ways: (1) it's not a query and (2) CGU are output nodes, so their -// Fingerprints are not actually needed. It remains to be clarified -// how exactly this case will be handled in the red/green system but -// for now we content ourselves with providing a no-op HashStable -// implementation for CGUs. -mod temp_stable_hash_impls { - use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, - HashStable}; - use ModuleCodegen; - - impl HashStable for ModuleCodegen { - fn hash_stable(&self, - _: &mut HCX, - _: &mut StableHasher) { - // do nothing - } - } -} diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 9db4015013e28a7fa810ebf41ea6ebb776142ef8..34e4f4d7e1835b7e24bae6014d708b7968b00213 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,18 +9,26 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{IntPredicate, RealPredicate, False, OperandBundleDef}; -use llvm::{self, BasicBlock}; -use common::*; +use llvm::{self, False, BasicBlock}; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; +use rustc_codegen_ssa::{self, MemFlags}; +use common::Funclet; +use context::CodegenCx; use type_::Type; +use type_of::LayoutLlvmExt; use value::Value; use libc::{c_uint, c_char}; -use rustc::ty::TyCtxt; -use rustc::ty::layout::{Align, Size}; -use rustc::session::{config, Session}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, Size, TyLayout}; +use rustc::session::config; use rustc_data_structures::small_c_str::SmallCStr; - +use rustc_codegen_ssa::traits::*; +use syntax; +use rustc_codegen_ssa::base::to_immediate; +use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; +use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; +use std::ffi::CStr; use std::ops::Range; use std::ptr; @@ -46,17 +54,49 @@ fn noname() -> *const c_char { &CNULL } -bitflags! { - pub struct MemFlags: u8 { - const VOLATILE = 1 << 0; - const NONTEMPORAL = 1 << 1; - const UNALIGNED = 1 << 2; +impl BackendTypes for Builder<'_, 'll, 'tcx> { + type Value = as BackendTypes>::Value; + type BasicBlock = as BackendTypes>::BasicBlock; + type Type = as BackendTypes>::Type; + type Context = as BackendTypes>::Context; + type Funclet = as BackendTypes>::Funclet; + + type DIScope = as BackendTypes>::DIScope; +} + +impl ty::layout::HasDataLayout for Builder<'_, '_, '_> { + fn data_layout(&self) -> &ty::layout::TargetDataLayout { + self.cx.data_layout() + } +} + +impl ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.cx.tcx + } +} + +impl ty::layout::LayoutOf for Builder<'_, '_, 'tcx> { + type Ty = Ty<'tcx>; + type TyLayout = TyLayout<'tcx>; + + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { + self.cx.layout_of(ty) } } -impl Builder<'a, 'll, 'tcx> { - pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self { - let bx = Builder::with_cx(cx); + +impl HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { + type CodegenCx = CodegenCx<'ll, 'tcx>; +} + +impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + name: &'b str + ) -> Self { + let mut bx = Builder::with_cx(cx); let llbb = unsafe { let name = SmallCStr::new(name); llvm::LLVMAppendBasicBlockInContext( @@ -69,7 +109,7 @@ pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b s bx } - pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -80,85 +120,77 @@ pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { } } - pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx> { + fn build_sibling_block<'b>(&self, name: &'b str) -> Self { Builder::new_block(self.cx, self.llfn(), name) } - pub fn sess(&self) -> &Session { - self.cx.sess() - } - - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { - self.cx.tcx - } - - pub fn llfn(&self) -> &'ll Value { + fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - pub fn llbb(&self) -> &'ll BasicBlock { + fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } fn count_insn(&self, category: &str) { - if self.cx.sess().codegen_stats() { - self.cx.stats.borrow_mut().n_llvm_insns += 1; + if self.cx().sess().codegen_stats() { + self.cx().stats.borrow_mut().n_llvm_insns += 1; } - if self.cx.sess().count_llvm_insns() { - *self.cx.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; + if self.cx().sess().count_llvm_insns() { + *self.cx().stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; } } - pub fn set_value_name(&self, value: &'ll Value, name: &str) { + fn set_value_name(&mut self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - pub fn position_at_end(&self, llbb: &'ll BasicBlock) { + fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - pub fn position_at_start(&self, llbb: &'ll BasicBlock) { + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } } - pub fn ret_void(&self) { + fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } } - pub fn ret(&self, v: &'ll Value) { + fn ret(&mut self, v: &'ll Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - pub fn br(&self, dest: &'ll BasicBlock) { + fn br(&mut self, dest: &'ll BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); } } - pub fn cond_br( - &self, + fn cond_br( + &mut self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, @@ -169,8 +201,8 @@ pub fn cond_br( } } - pub fn switch( - &self, + fn switch( + &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, num_cases: usize, @@ -180,12 +212,14 @@ pub fn switch( } } - pub fn invoke(&self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + fn invoke( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -193,7 +227,8 @@ pub fn invoke(&self, args); let args = self.check_call("invoke", llfn, args); - let bundle = bundle.map(|b| &*b.raw); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); unsafe { llvm::LLVMRustBuildInvoke(self.llbuilder, @@ -207,7 +242,7 @@ pub fn invoke(&self, } } - pub fn unreachable(&self) { + fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); @@ -215,21 +250,21 @@ pub fn unreachable(&self) { } /* Arithmetic */ - pub fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -238,21 +273,21 @@ pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { } } - pub fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -261,21 +296,21 @@ pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { } } - pub fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -285,42 +320,42 @@ pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { } - pub fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -329,28 +364,28 @@ pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { } } - pub fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - pub fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -359,78 +394,78 @@ pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { } } - pub fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - pub fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - pub fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - pub fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - pub fn neg(&self, v: &'ll Value) -> &'ll Value { + fn neg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - pub fn fneg(&self, v: &'ll Value) -> &'ll Value { + fn fneg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - pub fn not(&self, v: &'ll Value) -> &'ll Value { + fn not(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - pub fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - let bx = Builder::with_cx(self.cx); + fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); bx.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -445,7 +480,7 @@ pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Va } } - pub fn array_alloca(&self, + fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, name: &str, @@ -464,7 +499,7 @@ pub fn array_alloca(&self, } } - pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -473,7 +508,7 @@ pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { } } - pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -482,19 +517,100 @@ pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { } } - pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, size: Size) -> &'ll Value { + fn atomic_load( + &mut self, + ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + size: Size, + ) -> &'ll Value { self.count_insn("load.atomic"); unsafe { - let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order), + ); // LLVM requires the alignment of atomic loads to be at least the size of the type. llvm::LLVMSetAlignment(load, size.bytes() as c_uint); load } } + fn load_operand( + &mut self, + place: PlaceRef<'tcx, &'ll Value> + ) -> OperandRef<'tcx, &'ll Value> { + debug!("PlaceRef::load: {:?}", place); + + assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + + if place.layout.is_zst() { + return OperandRef::new_zst(self.cx(), place.layout); + } + + fn scalar_load_metadata<'a, 'll, 'tcx>( + bx: &mut Builder<'a, 'll, 'tcx>, + load: &'ll Value, + scalar: &layout::Scalar + ) { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(bx.cx()); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + } - pub fn range_metadata(&self, load: &'ll Value, range: Range) { - if self.sess().target.target.arch == "amdgpu" { + let val = if let Some(llextra) = place.llextra { + OperandValue::Ref(place.llval, Some(llextra), place.align) + } else if place.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = self.load(place.llval, place.align); + if let layout::Abi::Scalar(ref scalar) = place.layout.abi { + scalar_load_metadata(self, load, scalar); + } + load + }); + OperandValue::Immediate(to_immediate(self, llval, place.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + let mut load = |i, scalar: &layout::Scalar| { + let llptr = self.struct_gep(place.llval, i as u64); + let load = self.load(llptr, place.align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { + self.trunc(load, self.cx().type_i1()) + } else { + load + } + }; + OperandValue::Pair(load(0, a), load(1, b)) + } else { + OperandValue::Ref(place.llval, None, place.align) + }; + + OperandRef { val, layout: place.layout } + } + + + + fn range_metadata(&mut self, load: &'ll Value, range: Range) { + if self.cx().sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, // tripping an assertion. So, for now, just disable this @@ -503,10 +619,10 @@ pub fn range_metadata(&self, load: &'ll Value, range: Range) { } unsafe { - let llty = val_ty(load); + let llty = self.cx.val_ty(load); let v = [ - C_uint_big(llty, range.start), - C_uint_big(llty, range.end) + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) ]; llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, @@ -516,19 +632,19 @@ pub fn range_metadata(&self, load: &'ll Value, range: Range) { } } - pub fn nonnull_metadata(&self, load: &'ll Value) { + fn nonnull_metadata(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - pub fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - pub fn store_with_flags( - &self, + fn store_with_flags( + &mut self, val: &'ll Value, ptr: &'ll Value, align: Align, @@ -553,7 +669,7 @@ pub fn store_with_flags( // *always* point to a metadata value of the integer 1. // // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = C_i32(self.cx, 1); + let one = self.cx.const_i32(1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); } @@ -561,19 +677,24 @@ pub fn store_with_flags( } } - pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: AtomicOrdering, size: Size) { + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, size: Size) { debug!("Store {:?} -> {:?}", val, ptr); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); unsafe { - let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order), + ); // LLVM requires the alignment of atomic stores to be at least the size of the type. llvm::LLVMSetAlignment(store, size.bytes() as c_uint); } } - pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -581,7 +702,7 @@ pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { } } - pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -589,122 +710,109 @@ pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Valu } } - pub fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); - unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) - } - } - /* Casts */ - pub fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - - pub fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - pub fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - pub fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - pub fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) } } - pub fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("pointercast"); + + fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + self.count_insn("intcast"); unsafe { - llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - pub fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { - self.count_insn("intcast"); + fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); unsafe { - llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) } } /* Comparisons */ - pub fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); + let op = llvm::IntPredicate::from_generic(op); unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } - pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -712,14 +820,14 @@ pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll } /* Miscellaneous instructions */ - pub fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -731,10 +839,10 @@ pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) - } } - pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> Option<&'ll Value> { + fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, + inputs: &[&'ll Value], output: &'ll Type, + volatile: bool, alignstack: bool, + dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -744,18 +852,24 @@ pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, let argtys = inputs.iter().map(|v| { debug!("Asm Input Type: {:?}", *v); - val_ty(*v) + self.cx.val_ty(*v) }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = Type::func(&argtys[..], output); + let fty = self.cx().type_func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); + let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); debug!("Constraint verification result: {:?}", constraints_ok); if constraints_ok { let v = llvm::LLVMRustInlineAsm( - fty, asm, cons, volatile, alignstack, dia); + fty, + asm.as_ptr(), + cons.as_ptr(), + volatile, + alignstack, + AsmDialect::from_generic(dia), + ); Some(self.call(v, inputs, None)) } else { // LLVM has detected an issue with our constraints, bail out @@ -764,49 +878,71 @@ pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, } } - pub fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { - self.count_insn("call"); - - debug!("Call {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("call", llfn, args); - let bundle = bundle.map(|b| &*b.raw); - + fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.cx().type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.cx().type_i8p()); + let src = self.pointercast(src, self.cx().type_i8p()); unsafe { - llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), - args.len() as c_uint, bundle, noname()) + llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint, + src, src_align.abi() as c_uint, size, is_volatile); } } - pub fn memcpy(&self, dst: &'ll Value, dst_align: u64, - src: &'ll Value, src_align: u64, - size: &'ll Value, is_volatile: bool) -> &'ll Value { + fn memmove(&mut self, dst: &'ll Value, dst_align: Align, + src: &'ll Value, src_align: Align, + size: &'ll Value, flags: MemFlags) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memmove. + let val = self.load(src, src_align); + let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val))); + self.store_with_flags(val, ptr, dst_align, flags); + return; + } + let size = self.intcast(size, self.cx().type_isize(), false); + let is_volatile = flags.contains(MemFlags::VOLATILE); + let dst = self.pointercast(dst, self.cx().type_i8p()); + let src = self.pointercast(src, self.cx().type_i8p()); unsafe { - llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align as c_uint, - src, src_align as c_uint, size, is_volatile) + llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint, + src, src_align.abi() as c_uint, size, is_volatile); } } - pub fn memmove(&self, dst: &'ll Value, dst_align: u64, - src: &'ll Value, src_align: u64, - size: &'ll Value, is_volatile: bool) -> &'ll Value { - unsafe { - llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align as c_uint, - src, src_align as c_uint, size, is_volatile) - } + fn memset( + &mut self, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: Align, + flags: MemFlags, + ) { + let ptr_width = &self.cx().sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key); + let ptr = self.pointercast(ptr, self.cx().type_i8p()); + let align = self.cx().const_u32(align.abi() as u32); + let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE)); + self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } - pub fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -814,8 +950,8 @@ pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { } } - pub fn select( - &self, cond: &'ll Value, + fn select( + &mut self, cond: &'ll Value, then_val: &'ll Value, else_val: &'ll Value, ) -> &'ll Value { @@ -826,22 +962,22 @@ pub fn select( } #[allow(dead_code)] - pub fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - pub fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) } } - pub fn insert_element( - &self, vec: &'ll Value, + fn insert_element( + &mut self, vec: &'ll Value, elt: &'ll Value, idx: &'ll Value, ) -> &'ll Value { @@ -851,24 +987,24 @@ pub fn insert_element( } } - pub fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - pub fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { - let elt_ty = val_ty(elt); - let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); - let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); - self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) + let elt_ty = self.cx.val_ty(elt); + let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64)); + let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); + let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty)) } } - pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once @@ -879,7 +1015,7 @@ pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll instr } } - pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once @@ -890,35 +1026,35 @@ pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll instr } } - pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.add"); unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } } - pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.mul"); unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } } - pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.and"); unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } } - pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.or"); unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } } - pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.xor"); unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } } - pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin"); unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } } - pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax"); unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } } - pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); @@ -926,7 +1062,7 @@ pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { instr } } - pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); @@ -934,16 +1070,16 @@ pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { instr } } - pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.min"); unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } } - pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.max"); unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } } - pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -951,7 +1087,7 @@ pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { } } - pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -961,7 +1097,7 @@ pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, } } - pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, + fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { @@ -970,29 +1106,29 @@ pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, } } - pub fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - pub fn set_cleanup(&self, landing_pad: &'ll Value) { + fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - pub fn resume(&self, exn: &'ll Value) -> &'ll Value { + fn resume(&mut self, exn: &'ll Value) -> &'ll Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } } - pub fn cleanup_pad(&self, + fn cleanup_pad(&mut self, parent: Option<&'ll Value>, - args: &[&'ll Value]) -> &'ll Value { + args: &[&'ll Value]) -> Funclet<'ll> { self.count_insn("cleanuppad"); let name = const_cstr!("cleanuppad"); let ret = unsafe { @@ -1002,23 +1138,23 @@ pub fn cleanup_pad(&self, args.as_ptr(), name.as_ptr()) }; - ret.expect("LLVM does not have support for cleanuppad") + Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) } - pub fn cleanup_ret( - &self, cleanup: &'ll Value, + fn cleanup_ret( + &mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>, ) -> &'ll Value { self.count_insn("cleanupret"); let ret = unsafe { - llvm::LLVMRustBuildCleanupRet(self.llbuilder, cleanup, unwind) + llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for cleanupret") } - pub fn catch_pad(&self, + fn catch_pad(&mut self, parent: &'ll Value, - args: &[&'ll Value]) -> &'ll Value { + args: &[&'ll Value]) -> Funclet<'ll> { self.count_insn("catchpad"); let name = const_cstr!("catchpad"); let ret = unsafe { @@ -1026,19 +1162,19 @@ pub fn catch_pad(&self, args.len() as c_uint, args.as_ptr(), name.as_ptr()) }; - ret.expect("LLVM does not have support for catchpad") + Funclet::new(ret.expect("LLVM does not have support for catchpad")) } - pub fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { self.count_insn("catchret"); let ret = unsafe { - llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) + llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for catchret") } - pub fn catch_switch( - &self, + fn catch_switch( + &mut self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, num_handlers: usize, @@ -1053,80 +1189,101 @@ pub fn catch_switch( ret.expect("LLVM does not have support for catchswitch") } - pub fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - pub fn set_personality_fn(&self, personality: &'ll Value) { + fn set_personality_fn(&mut self, personality: &'ll Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } } // Atomic Operations - pub fn atomic_cmpxchg( - &self, + fn atomic_cmpxchg( + &mut self, dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: llvm::Bool, + order: rustc_codegen_ssa::common::AtomicOrdering, + failure_order: rustc_codegen_ssa::common::AtomicOrdering, + weak: bool, ) -> &'ll Value { - unsafe { - llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, - order, failure_order, weak) + let weak = if weak { llvm::True } else { llvm::False }; + unsafe { + llvm::LLVMRustBuildAtomicCmpXchg( + self.llbuilder, + dst, + cmp, + src, + AtomicOrdering::from_generic(order), + AtomicOrdering::from_generic(failure_order), + weak + ) } } - pub fn atomic_rmw( - &self, - op: AtomicRmwBinOp, + fn atomic_rmw( + &mut self, + op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { unsafe { - llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False) + llvm::LLVMBuildAtomicRMW( + self.llbuilder, + AtomicRmwBinOp::from_generic(op), + dst, + src, + AtomicOrdering::from_generic(order), + False) } } - pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { + fn atomic_fence( + &mut self, + order: rustc_codegen_ssa::common::AtomicOrdering, + scope: rustc_codegen_ssa::common::SynchronizationScope + ) { unsafe { - llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); + llvm::LLVMRustBuildAtomicFence( + self.llbuilder, + AtomicOrdering::from_generic(order), + SynchronizationScope::from_generic(scope) + ); } } - pub fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - pub fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - pub fn set_invariant_load(&self, load: &'ll Value) { + fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - /// Returns the ptr value that should be used for storing `val`. - fn check_store<'b>(&self, + fn check_store<'b>(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = val_ty(ptr); - let stored_ty = val_ty(val); - let stored_ptr_ty = stored_ty.ptr_to(); + let dest_ptr_ty = self.cx.val_ty(ptr); + let stored_ty = self.cx.val_ty(val); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); + assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); if dest_ptr_ty == stored_ptr_ty { ptr @@ -1138,24 +1295,23 @@ fn check_store<'b>(&self, } } - /// Returns the args that should be used for a call to `llfn`. - fn check_call<'b>(&self, + fn check_call<'b>(&mut self, typ: &str, llfn: &'ll Value, args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = val_ty(llfn); + let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers - while fn_ty.kind() == llvm::TypeKind::Pointer { - fn_ty = fn_ty.element_type(); + while self.cx.type_kind(fn_ty) == TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); } - assert!(fn_ty.kind() == llvm::TypeKind::Function, + assert!(self.cx.type_kind(fn_ty) == TypeKind::Function, "builder::{} not passed a function, but {:?}", typ, fn_ty); - let param_tys = fn_ty.func_params(); + let param_tys = self.cx.func_params_types(fn_ty); let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| val_ty(v))) + .zip(args.iter().map(|&v| self.cx().val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1166,7 +1322,7 @@ fn check_call<'b>(&self, .zip(args.iter()) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = val_ty(actual_val); + let actual_ty = self.cx().val_ty(actual_val); if expected_ty != actual_ty { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", @@ -1181,23 +1337,15 @@ fn check_call<'b>(&self, Cow::Owned(casted_args) } - pub fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - pub fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } - /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations - /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - /// and the intrinsic for `lt` and passes them to `emit`, which is in - /// charge of generating code to call the passed intrinsic on whatever - /// block of generated code is targeted for the intrinsic. - /// - /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations - /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; } @@ -1209,7 +1357,61 @@ fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, Type::i8p(self.cx)); - self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); + let ptr = self.pointercast(ptr, self.cx.type_i8p()); + self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); + } + + fn call( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + + unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + bundle, noname() + ) + } + } + + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } + } + + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + + fn cx(&self) -> &CodegenCx<'ll, 'tcx> { + self.cx + } + + unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { + llvm::LLVMDeleteBasicBlock(bb); + } + + fn do_not_inline(&mut self, llret: &'ll Value) { + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); } } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index c8c693257d52f410261bd223c7d2872dc50b475a..e79880e8de06e2f4a7226b3fb04e75a683fd9dc7 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -15,18 +15,14 @@ //! closure. use attributes; -use common::{self, CodegenCx}; -use consts; -use declare; use llvm; use monomorphize::Instance; -use type_of::LayoutLlvmExt; +use context::CodegenCx; use value::Value; +use rustc_codegen_ssa::traits::*; -use rustc::hir::def_id::DefId; -use rustc::ty::{self, TypeFoldable}; -use rustc::ty::layout::LayoutOf; -use rustc::ty::subst::Substs; +use rustc::ty::TypeFoldable; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; /// Codegens a reference to a fn/method item, monomorphizing and /// inlining as it goes. @@ -39,7 +35,7 @@ pub fn get_fn( cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>, ) -> &'ll Value { - let tcx = cx.tcx; + let tcx = cx.tcx(); debug!("get_fn(instance={:?})", instance); @@ -47,8 +43,8 @@ pub fn get_fn( assert!(!instance.substs.has_escaping_bound_vars()); assert!(!instance.substs.has_param_types()); - let sig = instance.fn_sig(cx.tcx); - if let Some(&llfn) = cx.instances.borrow().get(&instance) { + let sig = instance.fn_sig(cx.tcx()); + if let Some(&llfn) = cx.instances().borrow().get(&instance) { return llfn; } @@ -57,9 +53,9 @@ pub fn get_fn( // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(sig); - let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx); + let llptrty = cx.backend_type(cx.layout_of(fn_ptr_ty)); - let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) { + let llfn = if let Some(llfn) = cx.get_declared_value(&sym) { // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external // functions. If you have two crates that both bind the same C @@ -83,16 +79,16 @@ pub fn get_fn( // This can occur on either a crate-local or crate-external // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - if common::val_ty(llfn) != llptrty { + if cx.val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - consts::ptrcast(llfn, llptrty) + cx.static_ptrcast(llfn, llptrty) } else { debug!("get_fn: not casting pointer!"); llfn } } else { - let llfn = declare::declare_fn(cx, &sym, sig); - assert_eq!(common::val_ty(llfn), llptrty); + let llfn = cx.declare_fn(&sym, sig); + assert_eq!(cx.val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); if instance.def.is_inline(tcx) { @@ -204,35 +200,3 @@ pub fn get_fn( llfn } - -pub fn resolve_and_get_fn( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, -) -> &'ll Value { - get_fn( - cx, - ty::Instance::resolve( - cx.tcx, - ty::ParamEnv::reveal_all(), - def_id, - substs - ).unwrap() - ) -} - -pub fn resolve_and_get_fn_for_vtable( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, -) -> &'ll Value { - get_fn( - cx, - ty::Instance::resolve_for_vtable( - cx.tcx, - ty::ParamEnv::reveal_all(), - def_id, - substs - ).unwrap() - ) -} diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index c9b464fd8f3dd48482fa3a307a3f55b0c0bc7856..2fc505d42db52c120510550d4c9e2737cccebc4e 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -12,42 +12,26 @@ //! Code that is useful in various codegen modules. -use llvm::{self, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::hir::def_id::DefId; -use rustc::middle::lang_items::LangItem; +use llvm::{self, True, False, Bool, BasicBlock, OperandBundleDef}; use abi; -use base; -use builder::Builder; use consts; -use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; +use rustc_codegen_ssa::traits::*; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{HasDataLayout, LayoutOf}; -use rustc::hir; +use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; +use rustc::mir::interpret::{Scalar, AllocType, Allocation}; +use consts::const_alloc_to_llvm; +use rustc_codegen_ssa::mir::place::PlaceRef; use libc::{c_uint, c_char}; use syntax::symbol::LocalInternedString; -use syntax_pos::{Span, DUMMY_SP}; +use syntax::ast::Mutability; pub use context::CodegenCx; -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} - /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -110,295 +94,307 @@ pub fn bundle(&self) -> &OperandBundleDef<'ll> { } } -pub fn val_ty(v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) - } -} +impl BackendTypes for CodegenCx<'ll, 'tcx> { + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; + type Type = &'ll Type; + type Context = &'ll llvm::Context; + type Funclet = Funclet<'ll>; -// LLVM constant constructors. -pub fn C_null(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstNull(t) - } + type DIScope = &'ll llvm::debuginfo::DIScope; } -pub fn C_undef(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMGetUndef(t) +impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn const_null(&self, t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstNull(t) + } } -} -pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i as u64, True) + fn const_undef(&self, t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMGetUndef(t) + } } -} -pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i, False) + fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i as u64, True) + } } -} -pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { - unsafe { - let words = [u as u64, (u >> 64) as u64]; - llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i, False) + } } -} - -pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value { - C_uint(Type::i1(cx), val as u64) -} - -pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value { - C_int(Type::i32(cx), i as i64) -} -pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value { - C_uint(Type::i32(cx), i as u64) -} - -pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { - C_uint(Type::i64(cx), i) -} + fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { + unsafe { + let words = [u as u64, (u >> 64) as u64]; + llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + } + } -pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { - let bit_size = cx.data_layout().pointer_size.bits(); - if bit_size < 64 { - // make sure it doesn't overflow - assert!(i < (1< &'ll Value { + self.const_uint(self.type_i1(), val as u64) } - C_uint(cx.isize_ty, i) -} + fn const_i32(&self, i: i32) -> &'ll Value { + self.const_int(self.type_i32(), i as i64) + } -pub fn C_u8(cx: &CodegenCx<'ll, '_>, i: u8) -> &'ll Value { - C_uint(Type::i8(cx), i as u64) -} + fn const_u32(&self, i: u32) -> &'ll Value { + self.const_uint(self.type_i32(), i as u64) + } + fn const_u64(&self, i: u64) -> &'ll Value { + self.const_uint(self.type_i64(), i) + } -// This is a 'c-like' raw string, which differs from -// our boxed-and-length-annotated strings. -pub fn C_cstr( - cx: &CodegenCx<'ll, '_>, - s: LocalInternedString, - null_terminated: bool, -) -> &'ll Value { - unsafe { - if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) { - return llval; + fn const_usize(&self, i: u64) -> &'ll Value { + let bit_size = self.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1<, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), - cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to()); - C_fat_ptr(cx, cs, C_usize(cx, len as u64)) -} + fn const_u8(&self, i: u8) -> &'ll Value { + self.const_uint(self.type_i8(), i as u64) + } -pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(cx, &[ptr, meta], false) -} + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { + return llval; + } -pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value { - C_struct_in_context(cx.llcx, elts, packed) -} + let sc = llvm::LLVMConstStringInContext(self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + self.const_cstr_cache.borrow_mut().insert(s, g); + g + } + } -pub fn C_struct_in_context( - llcx: &'ll llvm::Context, - elts: &[&'ll Value], - packed: bool, -) -> &'ll Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as Bool) + fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(self.const_cstr(s, false), + self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); + self.const_fat_ptr(cs, self.const_usize(len as u64)) } -} -pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + fn const_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + self.const_struct(&[ptr, meta], false) } -} -pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + fn const_struct( + &self, + elts: &[&'ll Value], + packed: bool + ) -> &'ll Value { + struct_in_context(self.llcx, elts, packed) } -} -pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value { - C_bytes_in_context(cx.llcx, bytes) -} + fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } + } -pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } } -} -pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + bytes_in_context(self.llcx, bytes) + } - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); + fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - r - } -} + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); -pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None + r } } -} -pub fn const_to_uint(v: &'ll Value) -> u64 { - unsafe { - llvm::LLVMConstIntGetZExtValue(v) + fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if self.is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } } -} -pub fn is_const_integral(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantInt(v).is_some() + fn const_to_uint(&self, v: &'ll Value) -> u64 { + unsafe { + llvm::LLVMConstIntGetZExtValue(v) + } } -} -pub fn is_const_real(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() + fn is_const_integral(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantInt(v).is_some() + } } -} + fn is_const_real(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } + } -#[inline] -fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { - ((hi as u128) << 64) | (lo as u128) -} - -pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { - unsafe { - if is_const_integral(v) { - let (mut lo, mut hi) = (0u64, 0u64); - let success = llvm::LLVMRustConstInt128Get(v, sign_ext, - &mut hi, &mut lo); - if success { - Some(hi_lo_to_u128(lo, hi)) + fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { + unsafe { + if self.is_const_integral(v) { + let (mut lo, mut hi) = (0u64, 0u64); + let success = llvm::LLVMRustConstInt128Get(v, sign_ext, + &mut hi, &mut lo); + if success { + Some(hi_lo_to_u128(lo, hi)) + } else { + None + } } else { None } - } else { - None } } -} -pub fn langcall(tcx: TyCtxt, - span: Option, - msg: &str, - li: LangItem) - -> DefId { - tcx.lang_items().require(li).unwrap_or_else(|s| { - let msg = format!("{} {}", msg, s); - match span { - Some(span) => tcx.sess.span_fatal(span, &msg[..]), - None => tcx.sess.fatal(&msg[..]), + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: &'ll Type, + ) -> &'ll Value { + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; + match cv { + Scalar::Bits { size: 0, .. } => { + assert_eq!(0, layout.value.size(self).bytes()); + self.const_undef(self.type_ix(0)) + }, + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.value.size(self).bytes()); + let llval = self.const_uint_big(self.type_ix(bitsize), bits); + if layout.value == layout::Pointer { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + self.static_bitcast(llval, llty) + } + }, + Scalar::Ptr(ptr) => { + let alloc_type = self.tcx.alloc_map.lock().get(ptr.alloc_id); + let base_addr = match alloc_type { + Some(AllocType::Memory(alloc)) => { + let init = const_alloc_to_llvm(self, alloc); + if alloc.mutability == Mutability::Mutable { + self.static_addr_of_mut(init, alloc.align, None) + } else { + self.static_addr_of(init, alloc.align, None) + } + } + Some(AllocType::Function(fn_instance)) => { + self.get_fn(fn_instance) + } + Some(AllocType::Static(def_id)) => { + assert!(self.tcx.is_static(def_id).is_some()); + self.get_static(def_id) + } + None => bug!("missing allocation {:?}", ptr.alloc_id), + }; + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.static_bitcast(base_addr, self.type_i8p()), + &self.const_usize(ptr.offset.bytes()), + 1, + ) }; + if layout.value != layout::Pointer { + unsafe { llvm::LLVMConstPtrToInt(llval, llty) } + } else { + self.static_bitcast(llval, llty) + } + } } - }) -} + } -// To avoid UB from LLVM, these two functions mask RHS with an -// appropriate mask unconditionally (i.e. the fallback behavior for -// all shifts). For 32- and 64-bit types, this matches the semantics -// of Java. (See related discussion on #1877 and #10183.) - -pub fn build_unchecked_lshift( - bx: &Builder<'a, 'll, 'tcx>, - lhs: &'ll Value, - rhs: &'ll Value -) -> &'ll Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - bx.shl(lhs, rhs) + fn from_const_alloc( + &self, + layout: TyLayout<'tcx>, + alloc: &Allocation, + offset: Size, + ) -> PlaceRef<'tcx, &'ll Value> { + let init = const_alloc_to_llvm(self, alloc); + let base_addr = self.static_addr_of(init, layout.align, None); + + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.static_bitcast(base_addr, self.type_i8p()), + &self.const_usize(offset.bytes()), + 1, + )}; + let llval = self.static_bitcast(llval, self.type_ptr_to(layout.llvm_type(self))); + PlaceRef::new_sized(llval, layout, alloc.align) + } } -pub fn build_unchecked_rshift( - bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - let is_signed = lhs_t.is_signed(); - if is_signed { - bx.ashr(lhs, rhs) - } else { - bx.lshr(lhs, rhs) +pub fn val_ty(v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) } } -fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value { - let rhs_llty = val_ty(rhs); - bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) +pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } } -pub fn shift_mask_val( - bx: &Builder<'a, 'll, 'tcx>, - llty: &'ll Type, - mask_llty: &'ll Type, - invert: bool -) -> &'ll Value { - let kind = llty.kind(); - match kind { - TypeKind::Integer => { - // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. - let val = llty.int_width() - 1; - if invert { - C_int(mask_llty, !val as i64) - } else { - C_uint(mask_llty, val) - } - }, - TypeKind::Vector => { - let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert); - bx.vector_splat(mask_llty.vector_length(), mask) - }, - _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), +pub fn struct_in_context( + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, +) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) } } + +#[inline] +fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { + ((hi as u128) << 64) | (lo as u128) +} diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 9228870bf3a5c2105d84fe298aca420e165524fb..821ac931aac72fa5db9cb81065e7248ed59cc4af 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -11,36 +11,80 @@ use libc::c_uint; use llvm::{self, SetUnnamedAddr, True}; use rustc::hir::def_id::DefId; +use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, + Pointer, ErrorHandled, GlobalId}; use rustc::hir::Node; use debuginfo; -use base; use monomorphize::MonoItem; -use common::{CodegenCx, val_ty}; -use declare; +use common::CodegenCx; use monomorphize::Instance; use syntax_pos::Span; +use rustc_target::abi::HasDataLayout; use syntax_pos::symbol::LocalInternedString; +use base; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; use rustc::ty::{self, Ty}; +use rustc_codegen_ssa::traits::*; -use rustc::ty::layout::{Align, LayoutOf}; +use rustc::ty::layout::{self, Size, Align, LayoutOf}; use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; use std::ffi::{CStr, CString}; -pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstPointerCast(val, ty) +pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { + let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); + let dl = cx.data_layout(); + let pointer_size = dl.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, ((), alloc_id)) in alloc.relocations.iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset])); + } + let ptr_offset = read_target_uint( + dl.endian, + &alloc.bytes[offset..(offset + pointer_size)], + ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; + llvals.push(cx.scalar_to_backend( + Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), + &layout::Scalar { + value: layout::Primitive::Pointer, + valid_range: 0..=!0 + }, + cx.type_i8p() + )); + next_offset = offset + pointer_size; } + if alloc.bytes.len() >= next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..])); + } + + cx.const_struct(&llvals, true) } -pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstBitCast(val, ty) - } +pub fn codegen_static_initializer( + cx: &CodegenCx<'ll, 'tcx>, + def_id: DefId, +) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> { + let instance = ty::Instance::mono(cx.tcx, def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + let param_env = ty::ParamEnv::reveal_all(); + let static_ = cx.tcx.const_eval(param_env.and(cid))?; + + let alloc = match static_.val { + ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, + _ => bug!("static const eval returned {:#?}", static_), + }; + Ok((const_alloc_to_llvm(cx, alloc), alloc)) } fn set_global_alignment(cx: &CodegenCx<'ll, '_>, @@ -62,177 +106,6 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, } } -pub fn addr_of_mut( - cx: &CodegenCx<'ll, '_>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - unsafe { - let gv = match kind { - Some(kind) if !cx.tcx.sess.fewer_names() => { - let name = cx.generate_local_symbol_name(kind); - let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", name); - }); - llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); - gv - }, - _ => declare::define_private_global(cx, val_ty(cv)), - }; - llvm::LLVMSetInitializer(gv, cv); - set_global_alignment(cx, gv, align); - SetUnnamedAddr(gv, true); - gv - } -} - -pub fn addr_of( - cx: &CodegenCx<'ll, '_>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - if let Some(&gv) = cx.const_globals.borrow().get(&cv) { - unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - let llalign = align.abi() as u32; - if llalign > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, llalign); - } - } - return gv; - } - let gv = addr_of_mut(cx, cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } - cx.const_globals.borrow_mut().insert(cv, gv); - gv -} - -pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value { - let instance = Instance::mono(cx.tcx, def_id); - if let Some(&g) = cx.instances.borrow().get(&instance) { - return g; - } - - let defined_in_current_codegen_unit = cx.codegen_unit - .items() - .contains_key(&MonoItem::Static(def_id)); - assert!(!defined_in_current_codegen_unit, - "consts::get_static() should always hit the cache for \ - statics defined in the same CGU, but did not for `{:?}`", - def_id); - - let ty = instance.ty(cx.tcx); - let sym = cx.tcx.symbol_name(instance).as_str(); - - debug!("get_static: sym={} instance={:?}", sym, instance); - - let g = if let Some(id) = cx.tcx.hir.as_local_node_id(def_id) { - - let llty = cx.layout_of(ty).llvm_type(cx); - let (g, attrs) = match cx.tcx.hir.get(id) { - Node::Item(&hir::Item { - ref attrs, span, node: hir::ItemKind::Static(..), .. - }) => { - if declare::get_declared_value(cx, &sym[..]).is_some() { - span_bug!(span, "Conflicting symbol names for static?"); - } - - let g = declare::define_global(cx, &sym[..], llty).unwrap(); - - if !cx.tcx.is_reachable_non_generic(def_id) { - unsafe { - llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); - } - } - - (g, attrs) - } - - Node::ForeignItem(&hir::ForeignItem { - ref attrs, span, node: hir::ForeignItemKind::Static(..), .. - }) => { - let fn_attrs = cx.tcx.codegen_fn_attrs(def_id); - (check_and_apply_linkage(cx, &fn_attrs, ty, sym, Some(span)), attrs) - } - - item => bug!("get_static: expected static, found {:?}", item) - }; - - debug!("get_static: sym={} attrs={:?}", sym, attrs); - - for attr in attrs { - if attr.check_name("thread_local") { - llvm::set_thread_local_mode(g, cx.tls_model); - } - } - - g - } else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - debug!("get_static: sym={} item_attr={:?}", sym, cx.tcx.item_attrs(def_id)); - - let attrs = cx.tcx.codegen_fn_attrs(def_id); - let g = check_and_apply_linkage(cx, &attrs, ty, sym, None); - - // Thread-local statics in some other crate need to *always* be linked - // against in a thread-local fashion, so we need to be sure to apply the - // thread-local attribute locally if it was present remotely. If we - // don't do this then linker errors can be generated where the linker - // complains that one object files has a thread local version of the - // symbol and another one doesn't. - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - } - - let needs_dll_storage_attr = - cx.use_dll_storage_attrs && !cx.tcx.is_foreign_item(def_id) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!(!(cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - cx.tcx.sess.target.target.options.is_like_msvc && - cx.tcx.sess.opts.cg.prefer_dynamic)); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e. it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to - // make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !cx.tcx.is_codegened_item(def_id) { - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - } - g - }; - - if cx.use_dll_storage_attrs && cx.tcx.is_dllimport_foreign_item(def_id) { - // For foreign (native) libs we know the exact storage type to use. - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - - cx.instances.borrow_mut().insert(instance, g); - g -} - fn check_and_apply_linkage( cx: &CodegenCx<'ll, 'tcx>, attrs: &CodegenFnAttrs, @@ -260,7 +133,7 @@ fn check_and_apply_linkage( }; unsafe { // Declare a symbol `foo` with the desired linkage. - let g1 = declare::declare_global(cx, &sym, llty2); + let g1 = cx.declare_global(&sym, llty2); llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -271,7 +144,7 @@ fn check_and_apply_linkage( // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{ + let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{ if let Some(span) = span { cx.sess().span_fatal( span, @@ -288,150 +161,346 @@ fn check_and_apply_linkage( } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global - declare::declare_global(cx, &sym, llty) + cx.declare_global(&sym, llty) } } -pub fn codegen_static<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx>, - def_id: DefId, - is_mutable: bool, -) { +pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { - let attrs = cx.tcx.codegen_fn_attrs(def_id); + llvm::LLVMConstPointerCast(val, ty) + } +} - let (v, alloc) = match ::mir::codegen_static_initializer(cx, def_id) { - Ok(v) => v, - // Error has already been reported - Err(_) => return, - }; +impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> { - let g = get_static(cx, def_id); + fn static_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + ptrcast(val, ty) + } - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = val_ty(v); - let v = if val_llty == Type::i1(cx) { - val_llty = Type::i8(cx); - llvm::LLVMConstZExt(v, val_llty) - } else { - v - }; + fn static_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstBitCast(val, ty) + } + } + + fn static_addr_of_mut( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + unsafe { + let gv = match kind { + Some(kind) if !self.tcx.sess.fewer_names() => { + let name = self.generate_local_symbol_name(kind); + let gv = self.define_global(&name[..], + self.val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); + }); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + gv + }, + _ => self.define_private_global(self.val_ty(cv)), + }; + llvm::LLVMSetInitializer(gv, cv); + set_global_alignment(&self, gv, align); + SetUnnamedAddr(gv, true); + gv + } + } + + fn static_addr_of( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + if let Some(&gv) = self.const_globals.borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } + } + return gv; + } + let gv = self.static_addr_of_mut(cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + self.const_globals.borrow_mut().insert(cv, gv); + gv + } + + fn get_static(&self, def_id: DefId) -> &'ll Value { + let instance = Instance::mono(self.tcx, def_id); + if let Some(&g) = self.instances.borrow().get(&instance) { + return g; + } + + let defined_in_current_codegen_unit = self.codegen_unit + .items() + .contains_key(&MonoItem::Static(def_id)); + assert!(!defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id); + + let ty = instance.ty(self.tcx); + let sym = self.tcx.symbol_name(instance).as_str(); + + debug!("get_static: sym={} instance={:?}", sym, instance); + + let g = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { + + let llty = self.layout_of(ty).llvm_type(self); + let (g, attrs) = match self.tcx.hir.get(id) { + Node::Item(&hir::Item { + ref attrs, span, node: hir::ItemKind::Static(..), .. + }) => { + if self.get_declared_value(&sym[..]).is_some() { + span_bug!(span, "Conflicting symbol names for static?"); + } + + let g = self.define_global(&sym[..], llty).unwrap(); + + if !self.tcx.is_reachable_non_generic(def_id) { + unsafe { + llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); + } + } + + (g, attrs) + } + + Node::ForeignItem(&hir::ForeignItem { + ref attrs, span, node: hir::ForeignItemKind::Static(..), .. + }) => { + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + (check_and_apply_linkage(&self, &fn_attrs, ty, sym, Some(span)), attrs) + } + + item => bug!("get_static: expected static, found {:?}", item) + }; + + debug!("get_static: sym={} attrs={:?}", sym, attrs); + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local_mode(g, self.tls_model); + } + } - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - let g = if val_llty == llty { g } else { - // If we created the global with the wrong type, - // correct the type. - let empty_string = const_cstr!(""); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); - let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(g, empty_string.as_ptr()); - - let linkage = llvm::LLVMRustGetLinkage(g); - let visibility = llvm::LLVMRustGetVisibility(g); - - let new_g = llvm::LLVMRustGetOrInsertGlobal( - cx.llmod, name_string.as_ptr(), val_llty); - - llvm::LLVMRustSetLinkage(new_g, linkage); - llvm::LLVMRustSetVisibility(new_g, visibility); - - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - cx.statics_to_rauw.borrow_mut().push((g, new_g)); - new_g + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id)); + + let attrs = self.tcx.codegen_fn_attrs(def_id); + let g = check_and_apply_linkage(&self, &attrs, ty, sym, None); + + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + } + + let needs_dll_storage_attr = + self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the attrs. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!(!(self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + self.tcx.sess.target.target.options.is_like_msvc && + self.tcx.sess.opts.cg.prefer_dynamic)); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e. it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !self.tcx.is_codegened_item(def_id) { + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + } + g }; - set_global_alignment(cx, g, cx.align_of(ty)); - llvm::LLVMSetInitializer(g, v); - - // As an optimization, all shared statics which do not have interior - // mutability are placed into read-only memory. - if !is_mutable { - if cx.type_is_freeze(ty) { - llvm::LLVMSetGlobalConstant(g, llvm::True); + + if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + // For foreign (native) libs we know the exact storage type to use. + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); } } - debuginfo::create_global_var_metadata(cx, def_id, g); - - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - - // Do not allow LLVM to change the alignment of a TLS on macOS. - // - // By default a global's alignment can be freely increased. - // This allows LLVM to generate more performant instructions - // e.g. using load-aligned into a SIMD register. - // - // However, on macOS 10.10 or below, the dynamic linker does not - // respect any alignment given on the TLS (radar 24221680). - // This will violate the alignment assumption, and causing segfault at runtime. - // - // This bug is very easy to trigger. In `println!` and `panic!`, - // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, - // which the values would be `mem::replace`d on initialization. - // The implementation of `mem::replace` will use SIMD - // whenever the size is 32 bytes or higher. LLVM notices SIMD is used - // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, - // which macOS's dyld disregarded and causing crashes - // (see issues #51794, #51758, #50867, #48866 and #44056). - // - // To workaround the bug, we trick LLVM into not increasing - // the global's alignment by explicitly assigning a section to it - // (equivalent to automatically generating a `#[link_section]` attribute). - // See the comment in the `GlobalValue::canIncreaseAlignment()` function - // of `lib/IR/Globals.cpp` for why this works. - // - // When the alignment is not increased, the optimized `mem::replace` - // will use load-unaligned instructions instead, and thus avoiding the crash. - // - // We could remove this hack whenever we decide to drop macOS 10.10 support. - if cx.tcx.sess.target.target.options.is_like_osx { - let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") - } else { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") - }; - llvm::LLVMSetSection(g, sect_name.as_ptr()); + self.instances.borrow_mut().insert(instance, g); + g + } + + fn codegen_static( + &self, + def_id: DefId, + is_mutable: bool, + ) { + unsafe { + let attrs = self.tcx.codegen_fn_attrs(def_id); + + let (v, alloc) = match codegen_static_initializer(&self, def_id) { + Ok(v) => v, + // Error has already been reported + Err(_) => return, + }; + + let g = self.get_static(def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let mut val_llty = self.val_ty(v); + let v = if val_llty == self.type_i1() { + val_llty = self.type_i8(); + llvm::LLVMConstZExt(v, val_llty) + } else { + v + }; + + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); + let g = if val_llty == llty { + g + } else { + // If we created the global with the wrong type, + // correct the type. + let empty_string = const_cstr!(""); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); + + let linkage = llvm::LLVMRustGetLinkage(g); + let visibility = llvm::LLVMRustGetVisibility(g); + + let new_g = llvm::LLVMRustGetOrInsertGlobal( + self.llmod, name_string.as_ptr(), val_llty); + + llvm::LLVMRustSetLinkage(new_g, linkage); + llvm::LLVMRustSetVisibility(new_g, visibility); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + self.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(&self, g, self.align_of(ty)); + llvm::LLVMSetInitializer(g, v); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if self.type_is_freeze(ty) { + llvm::LLVMSetGlobalConstant(g, llvm::True); + } + } + + debuginfo::create_global_var_metadata(&self, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g. using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if self.tcx.sess.target.target.options.is_like_osx { + let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") + } else { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") + }; + llvm::LLVMSetSection(g, sect_name.as_ptr()); + } } - } - // Wasm statics with custom link sections get special treatment as they - // go into custom sections of the wasm executable. - if cx.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { - if let Some(section) = attrs.link_section { - let section = llvm::LLVMMDStringInContext( - cx.llcx, - section.as_str().as_ptr() as *const _, - section.as_str().len() as c_uint, - ); - let alloc = llvm::LLVMMDStringInContext( - cx.llcx, - alloc.bytes.as_ptr() as *const _, - alloc.bytes.len() as c_uint, - ); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext(cx.llcx, data.as_ptr(), 2); - llvm::LLVMAddNamedMetadataOperand( - cx.llmod, - "wasm.custom_sections\0".as_ptr() as *const _, - meta, - ); + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(section) = attrs.link_section { + let section = llvm::LLVMMDStringInContext( + self.llcx, + section.as_str().as_ptr() as *const _, + section.as_str().len() as c_uint, + ); + let alloc = llvm::LLVMMDStringInContext( + self.llcx, + alloc.bytes.as_ptr() as *const _, + alloc.bytes.len() as c_uint, + ); + let data = [section, alloc]; + let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2); + llvm::LLVMAddNamedMetadataOperand( + self.llmod, + "wasm.custom_sections\0".as_ptr() as *const _, + meta, + ); + } + } else { + base::set_link_section(g, &attrs); } - } else { - base::set_link_section(g, &attrs); - } - if attrs.flags.contains(CodegenFnAttrFlags::USED) { - // This static will be stored in the llvm.used variable which is an array of i8* - let cast = llvm::LLVMConstPointerCast(g, Type::i8p(cx)); - cx.used_statics.borrow_mut().push(cast); + if attrs.flags.contains(CodegenFnAttrFlags::USED) { + // This static will be stored in the llvm.used variable which is an array of i8* + let cast = llvm::LLVMConstPointerCast(g, self.type_i8p()); + self.used_statics.borrow_mut().push(cast); + } } } + unsafe fn static_replace_all_uses(&self, old_g: &'ll Value, new_g: &'ll Value) { + let bitcast = llvm::LLVMConstPointerCast(new_g, self.val_ty(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); + } } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index d6fd069071548cf2b259479923d44417ba215893..5b088ad290810c02f4bbb617737925020a884e39 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -9,20 +9,19 @@ // except according to those terms. use attributes; -use common; use llvm; +use llvm_util; use rustc::dep_graph::DepGraphSafe; use rustc::hir; use debuginfo; -use callee; -use base; -use declare; use monomorphize::Instance; use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; +use rustc_codegen_ssa::traits::*; +use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -33,6 +32,9 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_target::spec::{HasTargetSpec, Target}; +use rustc_codegen_ssa::callee::resolve_and_get_fn; +use rustc_codegen_ssa::base::wants_msvc_seh; +use callee::get_fn; use std::ffi::CStr; use std::cell::{Cell, RefCell}; @@ -45,24 +47,23 @@ /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub struct CodegenCx<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub struct CodegenCx<'ll, 'tcx: 'll> { + pub tcx: TyCtxt<'ll, 'tcx, 'tcx>, pub check_overflow: bool, pub use_dll_storage_attrs: bool, pub tls_model: llvm::ThreadLocalMode, - pub llmod: &'a llvm::Module, - pub llcx: &'a llvm::Context, + pub llmod: &'ll llvm::Module, + pub llcx: &'ll llvm::Context, pub stats: RefCell, pub codegen_unit: Arc>, /// Cache instances of monomorphic and polymorphic items - pub instances: RefCell, &'a Value>>, + pub instances: RefCell, &'ll Value>>, /// Cache generated vtables - pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), - &'a Value>>, + pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>, /// Cache of constant strings, - pub const_cstr_cache: RefCell>, + pub const_cstr_cache: RefCell>, /// Reverse-direction for const ptrs cast from globals. /// Key is a Value holding a *T, @@ -72,40 +73,39 @@ pub struct CodegenCx<'a, 'tcx: 'a> { /// when we ptrcast, and we have to ptrcast during codegen /// of a [T] const because we form a slice, a (*T,usize) pair, not /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell>, + pub const_unsized: RefCell>, /// Cache of emitted const globals (value -> global) - pub const_globals: RefCell>, + pub const_globals: RefCell>, /// List of globals for static variables which need to be passed to the /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete. /// (We have to make sure we don't invalidate any Values referring /// to constants.) - pub statics_to_rauw: RefCell>, + pub statics_to_rauw: RefCell>, /// Statics that will be placed in the llvm.used variable /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details - pub used_statics: RefCell>, + pub used_statics: RefCell>, - pub lltypes: RefCell, Option), &'a Type>>, - pub scalar_lltypes: RefCell, &'a Type>>, + pub lltypes: RefCell, Option), &'ll Type>>, + pub scalar_lltypes: RefCell, &'ll Type>>, pub pointee_infos: RefCell, Size), Option>>, - pub isize_ty: &'a Type, + pub isize_ty: &'ll Type, - pub dbg_cx: Option>, + pub dbg_cx: Option>, - eh_personality: Cell>, - eh_unwind_resume: Cell>, - pub rust_try_fn: Cell>, + eh_personality: Cell>, + eh_unwind_resume: Cell>, + pub rust_try_fn: Cell>, - intrinsics: RefCell>, + intrinsics: RefCell>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell, } -impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> { -} +impl<'ll, 'tcx> DepGraphSafe for CodegenCx<'ll, 'tcx> {} pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { @@ -218,11 +218,11 @@ pub unsafe fn create_module( llmod } -impl<'a, 'tcx> CodegenCx<'a, 'tcx> { - crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, +impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { + crate fn new(tcx: TyCtxt<'ll, 'tcx, 'tcx>, codegen_unit: Arc>, - llvm_module: &'a ::ModuleLlvm) - -> CodegenCx<'a, 'tcx> { + llvm_module: &'ll ::ModuleLlvm) + -> Self { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these @@ -316,34 +316,26 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { } } -impl<'b, 'tcx> CodegenCx<'b, 'tcx> { - pub fn sess<'a>(&'a self) -> &'a Session { - &self.tcx.sess +impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn vtables(&self) -> &RefCell, + ty::PolyExistentialTraitRef<'tcx>), &'ll Value>> + { + &self.vtables } - pub fn get_intrinsic(&self, key: &str) -> &'b Value { - if let Some(v) = self.intrinsics.borrow().get(key).cloned() { - return v; - } + fn instances(&self) -> &RefCell, &'ll Value>> { + &self.instances + } - declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value { + get_fn(&&self,instance) } - /// Generate a new symbol name with the given prefix. This symbol name must - /// only be used for definitions with `internal` or `private` linkage. - pub fn generate_local_symbol_name(&self, prefix: &str) -> String { - let idx = self.local_gen_sym_counter.get(); - self.local_gen_sym_counter.set(idx + 1); - // Include a '.' character, so there can be no accidental conflicts with - // user defined names - let mut name = String::with_capacity(prefix.len() + 6); - name.push_str(prefix); - name.push_str("."); - base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); - name + fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { + llvm::get_param(llfn, index) } - pub fn eh_personality(&self) -> &'b Value { + fn eh_personality(&self) -> &'ll Value { // The exception handling personality function. // // If our compilation unit has the `eh_personality` lang item somewhere @@ -369,17 +361,17 @@ pub fn eh_personality(&self) -> &'b Value { } let tcx = self.tcx; let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.sess()) => { - callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) + Some(def_id) if !wants_msvc_seh(self.sess()) => { + resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) } _ => { - let name = if base::wants_msvc_seh(self.sess()) { + let name = if wants_msvc_seh(self.sess()) { "__CxxFrameHandler3" } else { "rust_eh_personality" }; - let fty = Type::variadic_func(&[], Type::i32(self)); - declare::declare_cfn(self, name, fty) + let fty = self.type_variadic_func(&[], self.type_i32()); + self.declare_cfn(name, fty) } }; attributes::apply_target_cpu_attr(self, llfn); @@ -389,7 +381,7 @@ pub fn eh_personality(&self) -> &'b Value { // Returns a Value of the "eh_unwind_resume" lang item if one is defined, // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> &'b Value { + fn eh_unwind_resume(&self) -> &'ll Value { use attributes; let unwresume = &self.eh_unwind_resume; if let Some(llfn) = unwresume.get() { @@ -399,7 +391,7 @@ pub fn eh_unwind_resume(&self) -> &'b Value { let tcx = self.tcx; assert!(self.sess().target.target.options.custom_unwind_resume); if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { - let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); + let llfn = resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); unwresume.set(Some(llfn)); return llfn; } @@ -412,37 +404,406 @@ pub fn eh_unwind_resume(&self) -> &'b Value { Abi::C )); - let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", sig); + let llfn = self.declare_fn("rust_eh_unwind_resume", sig); attributes::unwind(llfn, true); attributes::apply_target_cpu_attr(self, llfn); unwresume.set(Some(llfn)); llfn } - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx, ty) + fn sess(&self) -> &Session { + &self.tcx.sess + } + + fn check_overflow(&self) -> bool { + self.check_overflow + } + + fn stats(&self) -> &RefCell { + &self.stats + } + + fn consume_stats(self) -> RefCell { + self.stats } - pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx, ty) + fn codegen_unit(&self) -> &Arc> { + &self.codegen_unit } - pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx, ty) + fn statics_to_rauw(&self) -> &RefCell> { + &self.statics_to_rauw } - pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - use syntax_pos::DUMMY_SP; - if ty.is_sized(self.tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) { - return false; + fn used_statics(&self) -> &RefCell> { + &self.used_statics + } + + fn set_frame_pointer_elimination(&self, llfn: &'ll Value) { + attributes::set_frame_pointer_elimination(self, llfn) + } + + fn apply_target_cpu_attr(&self, llfn: &'ll Value) { + attributes::apply_target_cpu_attr(self, llfn) + } + + fn closure_env_needs_indirect_debuginfo(&self) -> bool { + llvm_util::get_major_version() < 6 + } + + fn create_used_variable(&self) { + let name = const_cstr!("llvm.used"); + let section = const_cstr!("llvm.metadata"); + let array = self.const_array( + &self.type_ptr_to(self.type_i8()), + &*self.used_statics.borrow() + ); + + unsafe { + let g = llvm::LLVMAddGlobal(self.llmod, + self.val_ty(array), + name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); } + } +} + +impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> { + fn get_intrinsic(&self, key: &str) -> &'b Value { + if let Some(v) = self.intrinsics.borrow().get(key).cloned() { + return v; + } + + self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + } + + fn declare_intrinsic( + &self, + key: &str + ) -> Option<&'b Value> { + macro_rules! ifn { + ($name:expr, fn() -> $ret:expr) => ( + if key == $name { + let f = self.declare_cfn($name, self.type_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn(...) -> $ret:expr) => ( + if key == $name { + let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( + if key == $name { + let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + } + macro_rules! mk_struct { + ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false)) + } + + let i8p = self.type_i8p(); + let void = self.type_void(); + let i1 = self.type_i1(); + let t_i8 = self.type_i8(); + let t_i16 = self.type_i16(); + let t_i32 = self.type_i32(); + let t_i64 = self.type_i64(); + let t_i128 = self.type_i128(); + let t_f32 = self.type_f32(); + let t_f64 = self.type_f64(); + + let t_v2f32 = self.type_vector(t_f32, 2); + let t_v4f32 = self.type_vector(t_f32, 4); + let t_v8f32 = self.type_vector(t_f32, 8); + let t_v16f32 = self.type_vector(t_f32, 16); + + let t_v2f64 = self.type_vector(t_f64, 2); + let t_v4f64 = self.type_vector(t_f64, 4); + let t_v8f64 = self.type_vector(t_f64, 8); + + ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + + ifn!("llvm.trap", fn() -> void); + ifn!("llvm.debugtrap", fn() -> void); + ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + + ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); + ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); + ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); + ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); + ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); + ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); + ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); + + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); + ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.round.f32", fn(t_f32) -> t_f32); + ifn!("llvm.round.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); + ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); + ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); + ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); + ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); + ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); + ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8); + ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16); + ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32); + ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64); + ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128); - let tail = self.tcx.struct_tail(ty); - match tail.sty { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), + ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8); + ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16); + ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32); + ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64); + ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128); + + ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); + ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); + ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.localescape", fn(...) -> void); + ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); + ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + + ifn!("llvm.assume", fn(i1) -> void); + ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + + if self.sess().opts.debuginfo != DebugInfo::None { + ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void); + ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void); } + return None; + } +} + +impl<'b, 'tcx> CodegenCx<'b, 'tcx> { + /// Generate a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local_gen_sym_counter.get(); + self.local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); + name } } @@ -477,313 +838,3 @@ fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { }) } } - -/// Declare any llvm intrinsics that you might need -fn declare_intrinsic(cx: &CodegenCx<'ll, '_>, key: &str) -> Option<&'ll Value> { - macro_rules! ifn { - ($name:expr, fn() -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn(...) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - } - macro_rules! mk_struct { - ($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false)) - } - - let i8p = Type::i8p(cx); - let void = Type::void(cx); - let i1 = Type::i1(cx); - let t_i8 = Type::i8(cx); - let t_i16 = Type::i16(cx); - let t_i32 = Type::i32(cx); - let t_i64 = Type::i64(cx); - let t_i128 = Type::i128(cx); - let t_f32 = Type::f32(cx); - let t_f64 = Type::f64(cx); - - let t_v2f32 = Type::vector(t_f32, 2); - let t_v4f32 = Type::vector(t_f32, 4); - let t_v8f32 = Type::vector(t_f32, 8); - let t_v16f32 = Type::vector(t_f32, 16); - - let t_v2f64 = Type::vector(t_f64, 2); - let t_v4f64 = Type::vector(t_f64, 4); - let t_v8f64 = Type::vector(t_f64, 8); - - ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); - - ifn!("llvm.trap", fn() -> void); - ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); - - ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); - ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); - ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); - ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); - ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); - ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); - ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); - ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); - ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); - - ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); - ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); - ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); - ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); - ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); - ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); - ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); - ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); - ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); - ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); - ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); - ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.round.f32", fn(t_f32) -> t_f32); - ifn!("llvm.round.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); - ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); - ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); - ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); - ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); - ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); - ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8); - ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16); - ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32); - ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64); - ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128); - - ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8); - ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16); - ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32); - ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64); - ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128); - - ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); - ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); - - ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); - ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); - - ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); - - if cx.sess().opts.debuginfo != DebugInfo::None { - ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void); - ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void); - } - - None -} diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index 56352ae963f20ab4d1febe92224d51cf80767c5e..c18e126e52003960fe1412be3bf8ef479282e378 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -8,12 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{FunctionDebugContext, FunctionDebugContextData}; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, FunctionDebugContextData, MirDebugScope}; use super::metadata::file_metadata; use super::utils::{DIB, span_start}; use llvm; -use llvm::debuginfo::DIScope; +use llvm::debuginfo::{DIScope, DISubprogram}; use common::CodegenCx; use rustc::mir::{Mir, SourceScope}; @@ -26,28 +26,13 @@ use syntax_pos::BytePos; -#[derive(Clone, Copy, Debug)] -pub struct MirDebugScope<'ll> { - pub scope_metadata: Option<&'ll DIScope>, - // Start and end offsets of the file to which this DIScope belongs. - // These are used to quickly determine whether some span refers to the same file. - pub file_start_pos: BytePos, - pub file_end_pos: BytePos, -} - -impl MirDebugScope<'ll> { - pub fn is_valid(&self) -> bool { - self.scope_metadata.is_some() - } -} - /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes( cx: &CodegenCx<'ll, '_>, mir: &Mir, - debug_context: &FunctionDebugContext<'ll>, -) -> IndexVec> { + debug_context: &FunctionDebugContext<&'ll DISubprogram>, +) -> IndexVec> { let null_scope = MirDebugScope { scope_metadata: None, file_start_pos: BytePos(0), @@ -82,9 +67,9 @@ pub fn create_mir_scopes( fn make_mir_scope(cx: &CodegenCx<'ll, '_>, mir: &Mir, has_variables: &BitSet, - debug_context: &FunctionDebugContextData<'ll>, + debug_context: &FunctionDebugContextData<&'ll DISubprogram>, scope: SourceScope, - scopes: &mut IndexVec>) { + scopes: &mut IndexVec>) { if scopes[scope].is_valid() { return; } diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index f6faddb894ffdf41c532659fffc92afea806f412..0046a07236673b07f876aa87e6ca3cfd116bfa34 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -12,24 +12,23 @@ use llvm; -use common::{C_bytes, CodegenCx, C_i32}; +use common::CodegenCx; use builder::Builder; -use declare; use rustc::session::config::DebugInfo; -use type_::Type; use value::Value; +use rustc_codegen_ssa::traits::*; use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { - if needs_gdb_debug_scripts_section(bx.cx) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx); +pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) { + if needs_gdb_debug_scripts_section(bx.cx()) { + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)]; + let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { @@ -55,15 +54,15 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array(Type::i8(cx), + let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64); - let section_var = declare::define_global(cx, section_var_name, + let section_var = cx.define_global(section_var_name, llvm_type).unwrap_or_else(||{ bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); - llvm::LLVMSetInitializer(section_var, C_bytes(cx, section_contents)); + llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 97a3ae9c9faf4e534956892826d4026d241bbed0..1c787a969324b6c6ac4d04d4a53bc9a00ac853db 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -17,6 +17,7 @@ use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; +use rustc_codegen_ssa::traits::*; use abi; use value::Value; @@ -32,7 +33,7 @@ use rustc::ich::NodeIdHashingMode; use rustc_data_structures::fingerprint::Fingerprint; use rustc::ty::Instance; -use common::{CodegenCx, C_u64}; +use common::CodegenCx; use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; use rustc::ty::layout::{self, Align, HasDataLayout, Integer, IntegerExt, LayoutOf, PrimitiveExt, Size, TyLayout}; @@ -1810,7 +1811,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, member_description.offset.bits(), match member_description.discriminant { None => None, - Some(value) => Some(C_u64(cx, value)), + Some(value) => Some(cx.const_u64(value)), }, member_description.flags, member_description.type_metadata)) @@ -1966,22 +1967,6 @@ pub fn create_global_var_metadata( } } -// Creates an "extension" of an existing DIScope into another file. -pub fn extend_scope_to_file( - cx: &CodegenCx<'ll, '_>, - scope_metadata: &'ll DIScope, - file: &syntax_pos::SourceFile, - defining_crate: CrateNum, -) -> &'ll DILexicalBlock { - let file_metadata = file_metadata(cx, &file.name, defining_crate); - unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlockFile( - DIB(cx), - scope_metadata, - file_metadata) - } -} - /// Creates debug information for the given vtable, which is for the /// given type. /// @@ -2037,3 +2022,19 @@ pub fn create_vtable_metadata( 0); } } + +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file( + cx: &CodegenCx<'ll, '_>, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, +) -> &'ll DILexicalBlock { + let file_metadata = file_metadata(cx, &file.name, defining_crate); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(cx), + scope_metadata, + file_metadata) + } +} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 042e72e921ecead4e02951a41c7f3747db5e0966..9784cc6cf9c80054f0126da502405bea656b12a8 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -11,8 +11,8 @@ // See doc.rs for documentation. mod doc; -use self::VariableAccess::*; -use self::VariableKind::*; +use rustc_codegen_ssa::debuginfo::VariableAccess::*; +use rustc_codegen_ssa::debuginfo::VariableKind::*; use self::utils::{DIB, span_start, create_DIArray, is_node_local_to_unit}; use self::namespace::mangled_name_of_instance; @@ -21,7 +21,8 @@ use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; use llvm; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags}; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags, + DILexicalBlock}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def_id::{DefId, CrateNum}; use rustc::ty::subst::{Substs, UnpackedKind}; @@ -35,7 +36,10 @@ use rustc::session::config::{self, DebugInfo}; use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_data_structures::indexed_vec::IndexVec; use value::Value; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, + VariableKind, FunctionDebugContextData}; use libc::c_uint; use std::cell::{Cell, RefCell}; @@ -44,7 +48,8 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; +use rustc_codegen_ssa::traits::*; pub mod gdb; mod utils; @@ -54,10 +59,8 @@ mod create_scope_map; mod source_loc; -pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; -pub use self::source_loc::start_emitting_source_locations; +pub use self::create_scope_map::{create_mir_scopes}; pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_vtable_metadata; pub use self::metadata::extend_scope_to_file; pub use self::source_loc::set_source_location; @@ -109,54 +112,6 @@ pub fn new(llmod: &'a llvm::Module) -> Self { } } -pub enum FunctionDebugContext<'ll> { - RegularContext(FunctionDebugContextData<'ll>), - DebugInfoDisabled, - FunctionWithoutDebugInfo, -} - -impl FunctionDebugContext<'ll> { - pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<'ll> { - match *self { - FunctionDebugContext::RegularContext(ref data) => data, - FunctionDebugContext::DebugInfoDisabled => { - span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); - } - } - } - - fn debuginfo_disabled_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" - } - - fn should_be_ignored_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext for function that should be \ - ignored by debug info!" - } -} - -pub struct FunctionDebugContextData<'ll> { - fn_metadata: &'ll DISubprogram, - source_locations_enabled: Cell, - pub defining_crate: CrateNum, -} - -pub enum VariableAccess<'a, 'll> { - // The llptr given is an alloca containing the variable's value - DirectVariable { alloca: &'ll Value }, - // The llptr given is an alloca containing the start of some pointer chain - // leading to the variable's content. - IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] } -} - -pub enum VariableKind { - ArgumentVariable(usize /*index*/), - LocalVariable, -} - /// Create any deferred debug metadata nodes pub fn finalize(cx: &CodegenCx) { if cx.dbg_cx.is_none() { @@ -202,348 +157,398 @@ pub fn finalize(cx: &CodegenCx) { }; } -/// Creates the function-specific debug context. -/// -/// Returns the FunctionDebugContext for the function which holds state needed -/// for debug info creation. The function may also return another variant of the -/// FunctionDebugContext enum which indicates why no debuginfo should be created -/// for the function. -pub fn create_function_debug_context( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - sig: ty::FnSig<'tcx>, - llfn: &'ll Value, - mir: &mir::Mir, -) -> FunctionDebugContext<'ll> { - if cx.sess().opts.debuginfo == DebugInfo::None { - return FunctionDebugContext::DebugInfoDisabled; +impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn declare_local( + &mut self, + dbg_context: &FunctionDebugContext<&'ll DISubprogram>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: &'ll DIScope, + variable_access: VariableAccess<'_, &'ll Value>, + variable_kind: VariableKind, + span: Span, + ) { + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + let cx = self.cx(); + + let file = span_start(cx, span).file; + let file_metadata = file_metadata(cx, + &file.name, + dbg_context.get_ref(span).defining_crate); + + let loc = span_start(cx, span); + let type_metadata = type_metadata(cx, variable_type, span); + + let (argument_index, dwarf_tag) = match variable_kind { + ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), + LocalVariable => (0, DW_TAG_auto_variable) + }; + let align = cx.align_of(variable_type); + + let name = SmallCStr::new(&variable_name.as_str()); + match (variable_access, &[][..]) { + (DirectVariable { alloca }, address_operations) | + (IndirectVariable {alloca, address_operations}, _) => { + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateVariable( + DIB(cx), + dwarf_tag, + scope_metadata, + name.as_ptr(), + file_metadata, + loc.line as c_uint, + type_metadata, + cx.sess().opts.optimize != config::OptLevel::No, + DIFlags::FlagZero, + argument_index, + align.abi() as u32, + ) + }; + source_loc::set_debug_location(self, + InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); + unsafe { + let debug_loc = llvm::LLVMGetCurrentDebugLocation(self.llbuilder); + let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + alloca, + metadata, + address_operations.as_ptr(), + address_operations.len() as c_uint, + debug_loc, + self.llbb()); + + llvm::LLVMSetInstDebugLocation(self.llbuilder, instr); + } + source_loc::set_debug_location(self, UnknownLocation); + } + } + } + + fn set_source_location( + &mut self, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + scope: Option<&'ll DIScope>, + span: Span, + ) { + set_source_location(debug_context, &self, scope, span) } + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { + gdb::insert_reference_to_gdb_debug_scripts_section_global(self) + } +} - if let InstanceDef::Item(def_id) = instance.def { - if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { - return FunctionDebugContext::FunctionWithoutDebugInfo; +impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: &'ll Value, + mir: &mir::Mir, + ) -> FunctionDebugContext<&'ll DISubprogram> { + if self.sess().opts.debuginfo == DebugInfo::None { + return FunctionDebugContext::DebugInfoDisabled; + } + + if let InstanceDef::Item(def_id) = instance.def { + if self.tcx().codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } } - } - let span = mir.span; + let span = mir.span; - // This can be the case for functions inlined from another crate - if span.is_dummy() { - // FIXME(simulacrum): Probably can't happen; remove. - return FunctionDebugContext::FunctionWithoutDebugInfo; - } + // This can be the case for functions inlined from another crate + if span.is_dummy() { + // FIXME(simulacrum): Probably can't happen; remove. + return FunctionDebugContext::FunctionWithoutDebugInfo; + } - let def_id = instance.def_id(); - let containing_scope = get_containing_scope(cx, instance); - let loc = span_start(cx, span); - let file_metadata = file_metadata(cx, &loc.file.name, def_id.krate); + let def_id = instance.def_id(); + let containing_scope = get_containing_scope(self, instance); + let loc = span_start(self, span); + let file_metadata = file_metadata(self, &loc.file.name, def_id.krate); - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(cx, sig); - llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) - }; + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(self, sig); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), file_metadata, fn_signature) + }; - // Find the enclosing function, in case this is a closure. - let def_key = cx.tcx.def_key(def_id); - let mut name = def_key.disambiguated_data.data.to_string(); + // Find the enclosing function, in case this is a closure. + let def_key = self.tcx().def_key(def_id); + let mut name = def_key.disambiguated_data.data.to_string(); - let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id); + let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id); - // Get_template_parameters() will append a `<...>` clause to the function - // name if necessary. - let generics = cx.tcx.generics_of(enclosing_fn_def_id); - let substs = instance.substs.truncate_to(cx.tcx, generics); - let template_parameters = get_template_parameters(cx, - &generics, - substs, - file_metadata, - &mut name); + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = self.tcx().generics_of(enclosing_fn_def_id); + let substs = instance.substs.truncate_to(self.tcx(), generics); + let template_parameters = get_template_parameters(self, + &generics, + substs, + file_metadata, + &mut name); - // Get the linkage_name, which is just the symbol name - let linkage_name = mangled_name_of_instance(cx, instance); + // Get the linkage_name, which is just the symbol name + let linkage_name = mangled_name_of_instance(self, instance); - let scope_line = span_start(cx, span).line; - let is_local_to_unit = is_node_local_to_unit(cx, def_id); + let scope_line = span_start(self, span).line; + let is_local_to_unit = is_node_local_to_unit(self, def_id); - let function_name = CString::new(name).unwrap(); - let linkage_name = SmallCStr::new(&linkage_name.as_str()); + let function_name = CString::new(name).unwrap(); + let linkage_name = SmallCStr::new(&linkage_name.as_str()); - let mut flags = DIFlags::FlagPrototyped; + let mut flags = DIFlags::FlagPrototyped; - let local_id = cx.tcx.hir.as_local_node_id(def_id); - if let Some((id, _, _)) = *cx.sess().entry_fn.borrow() { - if local_id == Some(id) { - flags |= DIFlags::FlagMainSubprogram; + let local_id = self.tcx().hir.as_local_node_id(def_id); + if let Some((id, _, _)) = *self.sess().entry_fn.borrow() { + if local_id == Some(id) { + flags |= DIFlags::FlagMainSubprogram; + } } - } - if cx.layout_of(sig.output()).abi.is_uninhabited() { - flags |= DIFlags::FlagNoReturn; - } + if self.layout_of(sig.output()).abi.is_uninhabited() { + flags |= DIFlags::FlagNoReturn; + } - let fn_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateFunction( - DIB(cx), - containing_scope, - function_name.as_ptr(), - linkage_name.as_ptr(), - file_metadata, - loc.line as c_uint, - function_type_metadata, - is_local_to_unit, - true, - scope_line as c_uint, - flags, - cx.sess().opts.optimize != config::OptLevel::No, - llfn, - template_parameters, - None) - }; + let fn_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFunction( + DIB(self), + containing_scope, + function_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + loc.line as c_uint, + function_type_metadata, + is_local_to_unit, + true, + scope_line as c_uint, + flags, + self.sess().opts.optimize != config::OptLevel::No, + llfn, + template_parameters, + None) + }; - // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = FunctionDebugContextData { - fn_metadata, - source_locations_enabled: Cell::new(false), - defining_crate: def_id.krate, - }; + // Initialize fn debug context (including scope map and namespace map) + let fn_debug_context = FunctionDebugContextData { + fn_metadata, + source_locations_enabled: Cell::new(false), + defining_crate: def_id.krate, + }; - return FunctionDebugContext::RegularContext(fn_debug_context); + return FunctionDebugContext::RegularContext(fn_debug_context); - fn get_function_signature( - cx: &CodegenCx<'ll, 'tcx>, - sig: ty::FnSig<'tcx>, - ) -> &'ll DIArray { - if cx.sess().opts.debuginfo == DebugInfo::Limited { - return create_DIArray(DIB(cx), &[]); - } + fn get_function_signature<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + sig: ty::FnSig<'tcx>, + ) -> &'ll DIArray { + if cx.sess().opts.debuginfo == DebugInfo::Limited { + return create_DIArray(DIB(cx), &[]); + } - let mut signature = Vec::with_capacity(sig.inputs().len() + 1); + let mut signature = Vec::with_capacity(sig.inputs().len() + 1); - // Return type -- llvm::DIBuilder wants this at index 0 - signature.push(match sig.output().sty { - ty::Tuple(ref tys) if tys.is_empty() => None, - _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) - }); + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(match sig.output().sty { + ty::Tuple(ref tys) if tys.is_empty() => None, + _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) + }); - let inputs = if sig.abi == Abi::RustCall { - &sig.inputs()[..sig.inputs().len() - 1] - } else { - sig.inputs() - }; + let inputs = if sig.abi == Abi::RustCall { + &sig.inputs()[..sig.inputs().len() - 1] + } else { + sig.inputs() + }; - // Arguments types - if cx.sess().target.target.options.is_like_msvc { - // FIXME(#42800): - // There is a bug in MSDIA that leads to a crash when it encounters - // a fixed-size array of `u8` or something zero-sized in a - // function-type (see #40477). - // As a workaround, we replace those fixed-size arrays with a - // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would - // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, - // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. - // This transformed type is wrong, but these function types are - // already inaccurate due to ABI adjustments (see #42800). - signature.extend(inputs.iter().map(|&t| { - let t = match t.sty { - ty::Array(ct, _) - if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { - cx.tcx.mk_imm_ptr(ct) - } - _ => t - }; - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } else { - signature.extend(inputs.iter().map(|t| { - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } + // Arguments types + if cx.sess().target.target.options.is_like_msvc { + // FIXME(#42800): + // There is a bug in MSDIA that leads to a crash when it encounters + // a fixed-size array of `u8` or something zero-sized in a + // function-type (see #40477). + // As a workaround, we replace those fixed-size arrays with a + // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would + // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, + // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. + // This transformed type is wrong, but these function types are + // already inaccurate due to ABI adjustments (see #42800). + signature.extend(inputs.iter().map(|&t| { + let t = match t.sty { + ty::Array(ct, _) + if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { + cx.tcx.mk_imm_ptr(ct) + } + _ => t + }; + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } else { + signature.extend(inputs.iter().map(|t| { + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } - if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { - if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { - signature.extend( - args.iter().map(|argument_type| { - Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) - }) - ); + if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { + if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { + signature.extend( + args.iter().map(|argument_type| { + Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) + }) + ); + } } + + create_DIArray(DIB(cx), &signature[..]) } - create_DIArray(DIB(cx), &signature[..]) - } + fn get_template_parameters<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + generics: &ty::Generics, + substs: &Substs<'tcx>, + file_metadata: &'ll DIFile, + name_to_append_suffix_to: &mut String, + ) -> &'ll DIArray { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } - fn get_template_parameters( - cx: &CodegenCx<'ll, 'tcx>, - generics: &ty::Generics, - substs: &Substs<'tcx>, - file_metadata: &'ll DIFile, - name_to_append_suffix_to: &mut String, - ) -> &'ll DIArray { - if substs.types().next().is_none() { - return create_DIArray(DIB(cx), &[]); - } + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } - name_to_append_suffix_to.push('<'); - for (i, actual_type) in substs.types().enumerate() { - if i != 0 { - name_to_append_suffix_to.push_str(","); + let actual_type = + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx, + actual_type, + true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { + let names = get_parameter_names(cx, generics); + substs.iter().zip(names).filter_map(|(kind, name)| { + if let UnpackedKind::Type(ty) = kind.unpack() { + let actual_type = + cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); + let actual_type_metadata = + type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = SmallCStr::new(&name.as_str()); + Some(unsafe { + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr(), + actual_type_metadata, + file_metadata, + 0, + 0, + )) + }) + } else { + None + } + }).collect() + } else { + vec![] + }; + + return create_DIArray(DIB(cx), &template_params[..]); + } - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); - // Add actual type name to <...> clause of function name - let actual_type_name = compute_debuginfo_type_name(cx, - actual_type, - true); - name_to_append_suffix_to.push_str(&actual_type_name[..]); + fn get_parameter_names(cx: &CodegenCx, + generics: &ty::Generics) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_parameter_names(cx, cx.tcx.generics_of(def_id)) + }); + names.extend(generics.params.iter().map(|param| param.name)); + names } - name_to_append_suffix_to.push('>'); - - // Again, only create type information if full debuginfo is enabled - let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { - let names = get_parameter_names(cx, generics); - substs.iter().zip(names).filter_map(|(kind, name)| { - if let UnpackedKind::Type(ty) = kind.unpack() { - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); - let actual_type_metadata = - type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); - let name = SmallCStr::new(&name.as_str()); - Some(unsafe { - Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_ptr(), - actual_type_metadata, - file_metadata, - 0, - 0, - )) - }) + + fn get_containing_scope<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + instance: Instance<'tcx>, + ) -> &'ll DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + &cx.tcx.type_of(impl_def_id), + ); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::Adt(def, ..) if !def.is_box() => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } } else { + // For trait method impls we still use the "parallel namespace" + // strategy None } - }).collect() - } else { - vec![] - }; + }); + + self_type.unwrap_or_else(|| { + namespace::item_namespace(cx, DefId { + krate: instance.def_id().krate, + index: cx.tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?") + }) + }) + } + } - create_DIArray(DIB(cx), &template_params[..]) + fn create_vtable_metadata( + &self, + ty: Ty<'tcx>, + vtable: Self::Value, + ) { + metadata::create_vtable_metadata(self, ty, vtable) } - fn get_parameter_names(cx: &CodegenCx, - generics: &ty::Generics) - -> Vec { - let mut names = generics.parent.map_or(vec![], |def_id| { - get_parameter_names(cx, cx.tcx.generics_of(def_id)) - }); - names.extend(generics.params.iter().map(|param| param.name)); - names + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + ) -> IndexVec> { + create_scope_map::create_mir_scopes(self, mir, debug_context) } - fn get_containing_scope( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - ) -> &'ll DIScope { - // First, let's see if this is a method within an inherent impl. Because - // if yes, we want to make the result subroutine DIE a child of the - // subroutine's self-type. - let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { - // If the method does *not* belong to a trait, proceed - if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { - let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( - instance.substs, - ty::ParamEnv::reveal_all(), - &cx.tcx.type_of(impl_def_id), - ); - - // Only "class" methods are generally understood by LLVM, - // so avoid methods on other types (e.g. `<*mut T>::null`). - match impl_self_ty.sty { - ty::Adt(def, ..) if !def.is_box() => { - Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) - } - _ => None - } - } else { - // For trait method impls we still use the "parallel namespace" - // strategy - None - } - }); - - self_type.unwrap_or_else(|| { - namespace::item_namespace(cx, DefId { - krate: instance.def_id().krate, - index: cx.tcx - .def_key(instance.def_id()) - .parent - .expect("get_containing_scope: missing parent?") - }) - }) + fn extend_scope_to_file( + &self, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, + ) -> &'ll DILexicalBlock { + metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate) + } + + fn debuginfo_finalize(&self) { + finalize(self) } -} -pub fn declare_local( - bx: &Builder<'a, 'll, 'tcx>, - dbg_context: &FunctionDebugContext<'ll>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: &'ll DIScope, - variable_access: VariableAccess<'_, 'll>, - variable_kind: VariableKind, - span: Span, -) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); - let cx = bx.cx; - - let file = span_start(cx, span).file; - let file_metadata = file_metadata(cx, - &file.name, - dbg_context.get_ref(span).defining_crate); - - let loc = span_start(cx, span); - let type_metadata = type_metadata(cx, variable_type, span); - - let (argument_index, dwarf_tag) = match variable_kind { - ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), - LocalVariable => (0, DW_TAG_auto_variable) - }; - let align = cx.align_of(variable_type); - - let name = SmallCStr::new(&variable_name.as_str()); - match (variable_access, &[][..]) { - (DirectVariable { alloca }, address_operations) | - (IndirectVariable {alloca, address_operations}, _) => { - let metadata = unsafe { - llvm::LLVMRustDIBuilderCreateVariable( - DIB(cx), - dwarf_tag, - scope_metadata, - name.as_ptr(), - file_metadata, - loc.line as c_uint, - type_metadata, - cx.sess().opts.optimize != config::OptLevel::No, - DIFlags::FlagZero, - argument_index, - align.abi() as u32, - ) - }; - source_loc::set_debug_location(bx, - InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); - unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder); - let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( - DIB(cx), - alloca, - metadata, - address_operations.as_ptr(), - address_operations.len() as c_uint, - debug_loc, - bx.llbb()); - - llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr); - } - source_loc::set_debug_location(bx, UnknownLocation); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4] { + unsafe { + [llvm::LLVMRustDIBuilderCreateOpDeref(), + llvm::LLVMRustDIBuilderCreateOpPlusUconst(), + byte_offset_of_var_in_env as i64, + llvm::LLVMRustDIBuilderCreateOpDeref()] } } } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 60ebcb888166ff96bb0193f05edf153b7551d9c5..c6772e8c98e367d65b05dfbc17be1f717e78448e 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -12,11 +12,12 @@ use super::utils::{debug_context, span_start}; use super::metadata::UNKNOWN_COLUMN_NUMBER; -use super::FunctionDebugContext; +use rustc_codegen_ssa::debuginfo::FunctionDebugContext; use llvm; use llvm::debuginfo::DIScope; use builder::Builder; +use rustc_codegen_ssa::traits::*; use libc::c_uint; use syntax_pos::{Span, Pos}; @@ -24,8 +25,8 @@ /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). -pub fn set_source_location( - debug_context: &FunctionDebugContext<'ll>, +pub fn set_source_location( + debug_context: &FunctionDebugContext, bx: &Builder<'_, 'll, '_>, scope: Option<&'ll DIScope>, span: Span, @@ -40,8 +41,8 @@ pub fn set_source_location( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); - let loc = span_start(bx.cx, span); + debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span)); + let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) } else { UnknownLocation @@ -49,18 +50,6 @@ pub fn set_source_location( set_debug_location(bx, dbg_loc); } -/// Enables emitting source locations for the given functions. -/// -/// Since we don't want source locations to be emitted for the function prelude, -/// they are disabled when beginning to codegen a new function. This functions -/// switches source location emitting on and must therefore be called before the -/// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) { - if let FunctionDebugContext::RegularContext(ref data) = *dbg_context { - data.source_locations_enabled.set(true); - } -} - #[derive(Copy, Clone, PartialEq)] pub enum InternalDebugLocation<'ll> { @@ -78,13 +67,16 @@ pub fn new(scope: &'ll DIScope, line: usize, col: usize) -> Self { } } -pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDebugLocation<'ll>) { +pub fn set_debug_location( + bx: &Builder<'_, 'll, '_>, + debug_location: InternalDebugLocation<'ll> +) { let metadata_node = match debug_location { KnownLocation { scope, line, col } => { // For MSVC, set the column number to zero. // Otherwise, emit it. This mimics clang behaviour. // See discussion in https://github.com/rust-lang/rust/issues/42921 - let col_used = if bx.cx.sess().target.target.options.is_like_msvc { + let col_used = if bx.cx().sess().target.target.options.is_like_msvc { UNKNOWN_COLUMN_NUMBER } else { col as c_uint @@ -93,7 +85,7 @@ pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDeb unsafe { Some(llvm::LLVMRustDIBuilderCreateDebugLocation( - debug_context(bx.cx).llcontext, + debug_context(bx.cx()).llcontext, line as c_uint, col_used, scope, diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index eb5ae81b2184024ad6191c52580dc348466a87a2..c3a15ccca0a9d0f340dc4e5589cc29c8fb45cd25 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -14,6 +14,7 @@ use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; +use rustc_codegen_ssa::traits::*; use rustc::hir; diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index 19bc4ac39d308dcf76e3f00847e110d8f9a5ffe9..89262beb356db749ee8299184b663b20d4abd945 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -19,6 +19,7 @@ use llvm; use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; use common::{CodegenCx}; +use rustc_codegen_ssa::traits::*; use syntax_pos::{self, Span}; diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index f4aede55ce1a628d4e054b09a78a6547ae114889..c23aab409a9a8d3ba906047ecf0f3abe9a4e1eb2 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -31,22 +31,9 @@ use attributes; use context::CodegenCx; use type_::Type; +use rustc_codegen_ssa::traits::*; use value::Value; - -/// Declare a global value. -/// -/// If there’s a value with the same name already declared, the function will -/// return its Value instead. -pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value { - debug!("declare_global(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { - llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty) - } -} - - /// Declare a function. /// /// If there’s a value with the same name already declared, the function will @@ -108,123 +95,108 @@ fn declare_raw_fn( llfn } +impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> { -/// Declare a C ABI function. -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_fn` instead. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> &'ll Value { - declare_raw_fn(cx, name, llvm::CCallConv, fn_type) -} - - -/// Declare a Rust function. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - sig: PolyFnSig<'tcx>, -) -> &'ll Value { - debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - debug!("declare_rust_fn (after region erasure) sig={:?}", sig); - - let fty = FnType::new(cx, sig, &[]); - let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx)); - - if cx.layout_of(sig.output()).abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + fn declare_global( + &self, + name: &str, ty: &'ll Type + ) -> &'ll Value { + debug!("declare_global(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { + llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty) + } } - if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { - attributes::unwind(llfn, false); + fn declare_cfn( + &self, + name: &str, + fn_type: &'ll Type + ) -> &'ll Value { + declare_raw_fn(self, name, llvm::CCallConv, fn_type) } - fty.apply_attrs_llfn(llfn); + fn declare_fn( + &self, + name: &str, + sig: PolyFnSig<'tcx>, + ) -> &'ll Value { + debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig); + let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + debug!("declare_rust_fn (after region erasure) sig={:?}", sig); - llfn -} + let fty = FnType::new(self, sig, &[]); + let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); + if self.layout_of(sig.output()).abi.is_uninhabited() { + llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + } -/// Declare a global with an intention to define it. -/// -/// Use this function when you intend to define a global. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> { - if get_defined_value(cx, name).is_some() { - None - } else { - Some(declare_global(cx, name, ty)) + if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { + attributes::unwind(llfn, false); + } + + fty.apply_attrs_llfn(llfn); + + llfn } -} -/// Declare a private global -/// -/// Use this function when you intend to define a global without a name. -pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty) + fn define_global( + &self, + name: &str, + ty: &'ll Type + ) -> Option<&'ll Value> { + if self.get_defined_value(name).is_some() { + None + } else { + Some(self.declare_global(name, ty)) + } } -} -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_sig: PolyFnSig<'tcx>, -) -> &'ll Value { - if get_defined_value(cx, name).is_some() { - cx.sess().fatal(&format!("symbol `{}` already defined", name)) - } else { - declare_fn(cx, name, fn_sig) + fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) + } } -} -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_internal_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_sig: PolyFnSig<'tcx>, -) -> &'ll Value { - let llfn = define_fn(cx, name, fn_sig); - unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; - llfn -} + fn define_fn( + &self, + name: &str, + fn_sig: PolyFnSig<'tcx>, + ) -> &'ll Value { + if self.get_defined_value(name).is_some() { + self.sess().fatal(&format!("symbol `{}` already defined", name)) + } else { + self.declare_fn(name, fn_sig) + } + } + fn define_internal_fn( + &self, + name: &str, + fn_sig: PolyFnSig<'tcx>, + ) -> &'ll Value { + let llfn = self.define_fn(name, fn_sig); + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; + llfn + } -/// Get declared value by name. -pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { - debug!("get_declared_value(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) } -} + fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { + debug!("get_declared_value(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) } + } -/// Get defined or externally defined (AvailableExternally linkage) value by -/// name. -pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { - get_declared_value(cx, name).and_then(|val|{ - let declaration = unsafe { - llvm::LLVMIsDeclaration(val) != 0 - }; - if !declaration { - Some(val) - } else { - None - } - }) + fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { + self.get_declared_value(name).and_then(|val|{ + let declaration = unsafe { + llvm::LLVMIsDeclaration(val) != 0 + }; + if !declaration { + Some(val) + } else { + None + } + }) + } } diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs index 5721938c9c0a75a9899b34c1551f28ed176cf85e..94776f17c7989074aea7cf6b89a75776788d76dd 100644 --- a/src/librustc_codegen_llvm/diagnostics.rs +++ b/src/librustc_codegen_llvm/diagnostics.rs @@ -47,37 +47,4 @@ fn main() { ``` "##, -E0668: r##" -Malformed inline assembly rejected by LLVM. - -LLVM checks the validity of the constraints and the assembly string passed to -it. This error implies that LLVM seems something wrong with the inline -assembly call. - -In particular, it can happen if you forgot the closing bracket of a register -constraint (see issue #51430): -```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) -#![feature(asm)] - -fn main() { - let rax: u64; - unsafe { - asm!("" :"={rax"(rax)); - println!("Accumulator is: {}", rax); - } -} -``` -"##, - -E0669: r##" -Cannot convert inline assembly operand to a single LLVM value. - -This error usually happens when trying to pass in a value to an input inline -assembly operand that is actually a pair of values. In particular, this can -happen when trying to pass in a slice, for instance a `&str`. In Rust, these -values are represented internally as a pair of values, the pointer and its -length. When passed as an input operand, this pair of values can not be -coerced into a register and thus we must fail with an error. -"##, - } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 8f79d49b3e2b4c989625e7c58615c4cefec5a286..b2f1f933da4db645dc6dd88f633197e11193d64d 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -12,25 +12,28 @@ use attributes; use intrinsics::{self, Intrinsic}; -use llvm::{self, TypeKind}; +use llvm; use llvm_util; use abi::{Abi, FnType, LlvmType, PassMode}; -use mir::place::PlaceRef; -use mir::operand::{OperandRef, OperandValue}; -use base::*; -use common::*; -use declare; -use glue; +use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::glue; +use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types}; +use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; -use rustc::ty::layout::LayoutOf; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; +use rustc_codegen_ssa::common::TypeKind; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; use builder::Builder; use value::Value; +use rustc_codegen_ssa::traits::*; + use rustc::session::Session; use syntax_pos::Span; @@ -84,677 +87,740 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu Some(cx.get_intrinsic(&llvm_name)) } -/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, -/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, -/// add them to librustc_codegen_llvm/context.rs -pub fn codegen_intrinsic_call( - bx: &Builder<'a, 'll, 'tcx>, - callee_ty: Ty<'tcx>, - fn_ty: &FnType<'tcx, Ty<'tcx>>, - args: &[OperandRef<'ll, 'tcx>], - llresult: &'ll Value, - span: Span, -) { - let cx = bx.cx; - let tcx = cx.tcx; +impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { + fn codegen_intrinsic_call( + &mut self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, &'ll Value>], + llresult: &'ll Value, + span: Span, + ) { + let tcx = self.cx().tcx; - let (def_id, substs) = match callee_ty.sty { - ty::FnDef(def_id, substs) => (def_id, substs), - _ => bug!("expected fn item type, found {}", callee_ty) - }; + let (def_id, substs) = match callee_ty.sty { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty) + }; - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = &*tcx.item_name(def_id).as_str(); - - let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); - let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); - - let simple = get_simple_intrinsic(cx, name); - let llval = match name { - _ if simple.is_some() => { - bx.call(simple.unwrap(), - &args.iter().map(|arg| arg.immediate()).collect::>(), - None) - } - "unreachable" => { - return; - }, - "likely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None) - } - "unlikely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None) - } - "try" => { - try_intrinsic(bx, cx, - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - llresult); - return; - } - "breakpoint" => { - let llfn = cx.get_intrinsic(&("llvm.debugtrap")); - bx.call(llfn, &[], None) - } - "size_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.size_of(tp_ty).bytes()) - } - "size_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llsize - } else { - C_usize(cx, cx.size_of(tp_ty).bytes()) + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = &*tcx.item_name(def_id).as_str(); + + let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx()); + let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); + + let simple = get_simple_intrinsic(self.cx(), name); + let llval = match name { + _ if simple.is_some() => { + self.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) } - } - "min_align_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).abi()) - } - "min_align_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llalign - } else { - C_usize(cx, cx.align_of(tp_ty).abi()) + "unreachable" => { + return; + }, + "likely" => { + let expect = self.cx().get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), self.cx().const_bool(true)], None) } - } - "pref_align_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).pref()) - } - "type_name" => { - let tp_ty = substs.type_at(0); - let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - C_str_slice(cx, ty_name) - } - "type_id" => { - C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) - } - "init" => { - let ty = substs.type_at(0); - if !cx.layout_of(ty).is_zst() { - // Just zero out the stack slot. - // If we store a zero constant, LLVM will drown in vreg allocation for large data - // structures, and the generated code will be awful. (A telltale sign of this is - // large quantities of `mov [byte ptr foo],0` in the generated code.) - memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1)); + "unlikely" => { + let expect = self.cx().get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), self.cx().const_bool(false)], None) } - return; - } - // Effectively no-ops - "uninit" | "forget" => { - return; - } - "needs_drop" => { - let tp_ty = substs.type_at(0); - - C_bool(cx, bx.cx.type_needs_drop(tp_ty)) - } - "offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.inbounds_gep(ptr, &[offset]) - } - "arith_offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.gep(ptr, &[offset]) - } - - "copy_nonoverlapping" => { - copy_intrinsic(bx, false, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "copy" => { - copy_intrinsic(bx, true, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "write_bytes" => { - memset_intrinsic(bx, false, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - - "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bx, false, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_copy_memory" => { - copy_intrinsic(bx, true, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_set_memory" => { - memset_intrinsic(bx, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_load" | "unaligned_volatile_load" => { - let tp_ty = substs.type_at(0); - let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); + "try" => { + try_intrinsic(self, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); + return; } - let load = bx.volatile_load(ptr); - let align = if name == "unaligned_volatile_load" { - 1 - } else { - cx.align_of(tp_ty).abi() as u32 - }; - unsafe { - llvm::LLVMSetAlignment(load, align); + "breakpoint" => { + let llfn = self.cx().get_intrinsic(&("llvm.debugtrap")); + self.call(llfn, &[], None) } - to_immediate(bx, load, cx.layout_of(tp_ty)) - }, - "volatile_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.volatile_store(bx, dst); - return; - }, - "unaligned_volatile_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.unaligned_volatile_store(bx, dst); - return; - }, - "prefetch_read_data" | "prefetch_write_data" | - "prefetch_read_instruction" | "prefetch_write_instruction" => { - let expect = cx.get_intrinsic(&("llvm.prefetch")); - let (rw, cache_type) = match name { - "prefetch_read_data" => (0, 1), - "prefetch_write_data" => (1, 1), - "prefetch_read_instruction" => (0, 0), - "prefetch_write_instruction" => (1, 0), - _ => bug!() - }; - bx.call(expect, &[ - args[0].immediate(), - C_i32(cx, rw), - args[1].immediate(), - C_i32(cx, cache_type) - ], None) - }, - "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | - "bitreverse" | "add_with_overflow" | "sub_with_overflow" | - "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | - "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" | - "rotate_left" | "rotate_right" => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, cx) { - Some((width, signed)) => - match name { - "ctlz" | "cttz" => { - let y = C_bool(bx.cx, false); - let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctlz_nonzero" | "cttz_nonzero" => { - let y = C_bool(bx.cx, true); - let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); - let llfn = cx.get_intrinsic(llvm_name); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &[args[0].immediate()], None), - "bswap" => { - if width == 8 { - args[0].immediate() // byte swap a u8/i8 is just a no-op - } else { - bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &[args[0].immediate()], None) - } - } - "bitreverse" => { - bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), - &[args[0].immediate()], None) - } - "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { - let intrinsic = format!("llvm.{}{}.with.overflow.i{}", - if signed { 's' } else { 'u' }, - &name[..3], width); - let llfn = bx.cx.get_intrinsic(&intrinsic); - - // Convert `i1` to a `bool`, and write it to the out parameter - let pair = bx.call(llfn, &[ - args[0].immediate(), - args[1].immediate() - ], None); - let val = bx.extract_value(pair, 0); - let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx)); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(overflow, dest.llval, dest.align); - - return; - }, - "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()), - "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()), - "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()), - "exact_div" => - if signed { - bx.exactsdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.exactudiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_div" => - if signed { - bx.sdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.udiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_rem" => - if signed { - bx.srem(args[0].immediate(), args[1].immediate()) - } else { - bx.urem(args[0].immediate(), args[1].immediate()) - }, - "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()), - "unchecked_shr" => - if signed { - bx.ashr(args[0].immediate(), args[1].immediate()) - } else { - bx.lshr(args[0].immediate(), args[1].immediate()) - }, - "rotate_left" | "rotate_right" => { - let is_left = name == "rotate_left"; - let val = args[0].immediate(); - let raw_shift = args[1].immediate(); - if llvm_util::get_major_version() >= 7 { - // rotate = funnel shift with first two args the same - let llvm_name = &format!("llvm.fsh{}.i{}", - if is_left { 'l' } else { 'r' }, width); - let llfn = cx.get_intrinsic(llvm_name); - bx.call(llfn, &[val, val, raw_shift], None) - } else { - // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) - // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) - let width = C_uint(Type::ix(cx, width), width); - let shift = bx.urem(raw_shift, width); - let inv_shift = bx.urem(bx.sub(width, raw_shift), width); - let shift1 = bx.shl(val, if is_left { shift } else { inv_shift }); - let shift2 = bx.lshr(val, if !is_left { shift } else { inv_shift }); - bx.or(shift1, shift2) - } - }, - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - return; + "size_of" => { + let tp_ty = substs.type_at(0); + self.cx().const_usize(self.cx().size_of(tp_ty).bytes()) + } + "size_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (llsize, _) = + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + llsize + } else { + self.cx().const_usize(self.cx().size_of(tp_ty).bytes()) } } - }, - "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { - let sty = &arg_tys[0].sty; - match float_type_width(sty) { - Some(_width) => - match name { - "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()), - "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()), - "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()), - "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), - "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()), - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic float type, found `{}`", name, sty)); - return; + "min_align_of" => { + let tp_ty = substs.type_at(0); + self.cx().const_usize(self.cx().align_of(tp_ty).abi()) + } + "min_align_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (_, llalign) = + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + llalign + } else { + self.cx().const_usize(self.cx().align_of(tp_ty).abi()) } } + "pref_align_of" => { + let tp_ty = substs.type_at(0); + self.cx().const_usize(self.cx().align_of(tp_ty).pref()) + } + "type_name" => { + let tp_ty = substs.type_at(0); + let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); + self.cx().const_str_slice(ty_name) + } + "type_id" => { + self.cx().const_u64(self.cx().tcx.type_id_hash(substs.type_at(0))) + } + "init" => { + let ty = substs.type_at(0); + if !self.cx().layout_of(ty).is_zst() { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large + // data structures, and the generated code will be awful. (A telltale sign of + // this is large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic( + self, + false, + ty, + llresult, + self.cx().const_u8(0), + self.cx().const_usize(1) + ); + } + return; + } + // Effectively no-ops + "uninit" | "forget" => { + return; + } + "needs_drop" => { + let tp_ty = substs.type_at(0); - }, - - "discriminant_value" => { - args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty) - } - - name if name.starts_with("simd_") => { - match generic_simd_intrinsic(bx, name, - callee_ty, - args, - ret_ty, llret_ty, - span) { - Ok(llval) => llval, - Err(()) => return + self.cx().const_bool(self.cx().type_needs_drop(tp_ty)) + } + "offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + self.inbounds_gep(ptr, &[offset]) + } + "arith_offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + self.gep(ptr, &[offset]) } - } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_[_]", and no ordering means SeqCst - name if name.starts_with("atomic_") => { - use llvm::AtomicOrdering::*; - - let split: Vec<&str> = name.split('_').collect(); - - let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; - let (order, failorder) = match split.len() { - 2 => (SequentiallyConsistent, SequentiallyConsistent), - 3 => match split[2] { - "unordered" => (Unordered, Unordered), - "relaxed" => (Monotonic, Monotonic), - "acq" => (Acquire, Acquire), - "rel" => (Release, Monotonic), - "acqrel" => (AcquireRelease, Acquire), - "failrelaxed" if is_cxchg => - (SequentiallyConsistent, Monotonic), - "failacq" if is_cxchg => - (SequentiallyConsistent, Acquire), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - 4 => match (split[2], split[3]) { - ("acq", "failrelaxed") if is_cxchg => - (Acquire, Monotonic), - ("acqrel", "failrelaxed") if is_cxchg => - (AcquireRelease, Monotonic), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - _ => cx.sess().fatal("Atomic intrinsic not in correct format"), - }; - let invalid_monomorphization = |ty| { - span_invalid_monomorphization_error(tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - }; + "copy_nonoverlapping" => { + copy_intrinsic(self, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()); + return; + } + "copy" => { + copy_intrinsic(self, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()); + return; + } + "write_bytes" => { + memset_intrinsic(self, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } - match split[1] { - "cxchg" | "cxchgweak" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let pair = bx.atomic_cmpxchg( - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - order, - failorder, - weak); - let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx)); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(success, dest.llval, dest.align); - return; - } else { - return invalid_monomorphization(ty); - } + "volatile_copy_nonoverlapping_memory" => { + copy_intrinsic(self, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } + "volatile_copy_memory" => { + copy_intrinsic(self, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } + "volatile_set_memory" => { + memset_intrinsic(self, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()); + return; + } + "volatile_load" | "unaligned_volatile_load" => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_ty.ret.mode { + ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty.llvm_type(self.cx()))); } - - "load" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let size = cx.size_of(ty); - bx.atomic_load(args[0].immediate(), order, size) - } else { - return invalid_monomorphization(ty); + let load = self.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + self.cx().align_of(tp_ty).abi() as u32 + }; + unsafe { + llvm::LLVMSetAlignment(load, align); + } + to_immediate(self, load, self.cx().layout_of(tp_ty)) + }, + "volatile_store" => { + let dst = args[0].deref(self.cx()); + args[1].val.volatile_store(self, dst); + return; + }, + "unaligned_volatile_store" => { + let dst = args[0].deref(self.cx()); + args[1].val.unaligned_volatile_store(self, dst); + return; + }, + "prefetch_read_data" | "prefetch_write_data" | + "prefetch_read_instruction" | "prefetch_write_instruction" => { + let expect = self.cx().get_intrinsic(&("llvm.prefetch")); + let (rw, cache_type) = match name { + "prefetch_read_data" => (0, 1), + "prefetch_write_data" => (1, 1), + "prefetch_read_instruction" => (0, 0), + "prefetch_write_instruction" => (1, 0), + _ => bug!() + }; + self.call(expect, &[ + args[0].immediate(), + self.cx().const_i32(rw), + args[1].immediate(), + self.cx().const_i32(cache_type) + ], None) + }, + "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | + "bitreverse" | "add_with_overflow" | "sub_with_overflow" | + "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" | + "rotate_left" | "rotate_right" => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, self.cx()) { + Some((width, signed)) => + match name { + "ctlz" | "cttz" => { + let y = self.cx().const_bool(false); + let llfn = self.cx().get_intrinsic( + &format!("llvm.{}.i{}", name, width), + ); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctlz_nonzero" | "cttz_nonzero" => { + let y = self.cx().const_bool(true); + let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); + let llfn = self.cx().get_intrinsic(llvm_name); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctpop" => self.call( + self.cx().get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &[args[0].immediate()], + None + ), + "bswap" => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } else { + self.call( + self.cx().get_intrinsic( + &format!("llvm.bswap.i{}", width), + ), + &[args[0].immediate()], + None, + ) + } + } + "bitreverse" => { + self.call( + self.cx().get_intrinsic( + &format!("llvm.bitreverse.i{}", width), + ), + &[args[0].immediate()], + None, + ) + } + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { + let intrinsic = format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + &name[..3], width); + let llfn = self.cx().get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let pair = self.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); + let val = self.extract_value(pair, 0); + let overflow = self.extract_value(pair, 1); + let overflow = self.zext(overflow, self.cx().type_bool()); + + let dest = result.project_field(self, 0); + self.store(val, dest.llval, dest.align); + let dest = result.project_field(self, 1); + self.store(overflow, dest.llval, dest.align); + + return; + }, + "overflowing_add" => self.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => self.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => self.mul(args[0].immediate(), args[1].immediate()), + "exact_div" => + if signed { + self.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + self.exactudiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_div" => + if signed { + self.sdiv(args[0].immediate(), args[1].immediate()) + } else { + self.udiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_rem" => + if signed { + self.srem(args[0].immediate(), args[1].immediate()) + } else { + self.urem(args[0].immediate(), args[1].immediate()) + }, + "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()), + "unchecked_shr" => + if signed { + self.ashr(args[0].immediate(), args[1].immediate()) + } else { + self.lshr(args[0].immediate(), args[1].immediate()) + }, + "rotate_left" | "rotate_right" => { + let is_left = name == "rotate_left"; + let val = args[0].immediate(); + let raw_shift = args[1].immediate(); + if llvm_util::get_major_version() >= 7 { + // rotate = funnel shift with first two args the same + let llvm_name = &format!("llvm.fsh{}.i{}", + if is_left { 'l' } else { 'r' }, width); + let llfn = self.cx().get_intrinsic(llvm_name); + self.call(llfn, &[val, val, raw_shift], None) + } else { + // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) + // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) + let width = self.cx().const_uint( + self.cx().type_ix(width), + width, + ); + let shift = self.urem(raw_shift, width); + let width_minus_raw_shift = self.sub(width, raw_shift); + let inv_shift = self.urem(width_minus_raw_shift, width); + let shift1 = self.shl( + val, + if is_left { shift } else { inv_shift }, + ); + let shift2 = self.lshr( + val, + if !is_left { shift } else { inv_shift }, + ); + self.or(shift1, shift2) + } + }, + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + return; } } - "store" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let size = cx.size_of(ty); - bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size); + }, + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let sty = &arg_tys[0].sty; + match float_type_width(sty) { + Some(_width) => + match name { + "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", name, sty)); return; - } else { - return invalid_monomorphization(ty); } } - "fence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); - return; - } + }, - "singlethreadfence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); - return; - } + "discriminant_value" => { + args[0].deref(self.cx()).codegen_get_discr(self, ret_ty) + } - // These are all AtomicRMW ops - op => { - let atom_op = match op { - "xchg" => llvm::AtomicXchg, - "xadd" => llvm::AtomicAdd, - "xsub" => llvm::AtomicSub, - "and" => llvm::AtomicAnd, - "nand" => llvm::AtomicNand, - "or" => llvm::AtomicOr, - "xor" => llvm::AtomicXor, - "max" => llvm::AtomicMax, - "min" => llvm::AtomicMin, - "umax" => llvm::AtomicUMax, - "umin" => llvm::AtomicUMin, - _ => cx.sess().fatal("unknown atomic operation") - }; - - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) - } else { - return invalid_monomorphization(ty); - } + name if name.starts_with("simd_") => { + match generic_simd_intrinsic(self, name, + callee_ty, + args, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return } } - } - - "nontemporal_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.nontemporal_store(bx, dst); - return; - } + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_[_]", and no ordering means SeqCst + name if name.starts_with("atomic_") => { + use rustc_codegen_ssa::common::AtomicOrdering::*; + use rustc_codegen_ssa::common:: + {SynchronizationScope, AtomicRmwBinOp}; + + let split: Vec<&str> = name.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => + (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => + (SequentiallyConsistent, Acquire), + _ => self.cx().sess().fatal("unknown ordering in atomic intrinsic") + }, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => + (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => + (AcquireRelease, Monotonic), + _ => self.cx().sess().fatal("unknown ordering in atomic intrinsic") + }, + _ => self.cx().sess().fatal("Atomic intrinsic not in correct format"), + }; - _ => { - let intr = Intrinsic::find(&name).unwrap_or_else(|| - bug!("unknown intrinsic '{}'", name)); + let invalid_monomorphization = |ty| { + span_invalid_monomorphization_error(tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + }; - fn one(x: Vec) -> T { - assert_eq!(x.len(), 1); - x.into_iter().next().unwrap() - } - fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> { - use intrinsics::Type::*; - match *t { - Void => vec![Type::void(cx)], - Integer(_signed, _width, llvm_width) => { - vec![Type::ix(cx, llvm_width as u64)] + match split[1] { + "cxchg" | "cxchgweak" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, self.cx()).is_some() { + let weak = split[1] == "cxchgweak"; + let pair = self.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); + let val = self.extract_value(pair, 0); + let success = self.extract_value(pair, 1); + let success = self.zext(success, self.cx().type_bool()); + + let dest = result.project_field(self, 0); + self.store(val, dest.llval, dest.align); + let dest = result.project_field(self, 1); + self.store(success, dest.llval, dest.align); + return; + } else { + return invalid_monomorphization(ty); + } } - Float(x) => { - match x { - 32 => vec![Type::f32(cx)], - 64 => vec![Type::f64(cx)], - _ => bug!() + + "load" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, self.cx()).is_some() { + let size = self.cx().size_of(ty); + self.atomic_load(args[0].immediate(), order, size) + } else { + return invalid_monomorphization(ty); } } - Pointer(ref t, ref llvm_elem, _const) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![elem.ptr_to()] + + "store" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, self.cx()).is_some() { + let size = self.cx().size_of(ty); + self.atomic_store( + args[1].immediate(), + args[0].immediate(), + order, + size + ); + return; + } else { + return invalid_monomorphization(ty); + } } - Vector(ref t, ref llvm_elem, length) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![Type::vector(elem, length as u64)] + + "fence" => { + self.atomic_fence(order, SynchronizationScope::CrossThread); + return; } - Aggregate(false, ref contents) => { - let elems = contents.iter() - .map(|t| one(ty_to_type(cx, t))) - .collect::>(); - vec![Type::struct_(cx, &elems, false)] + + "singlethreadfence" => { + self.atomic_fence(order, SynchronizationScope::SingleThread); + return; } - Aggregate(true, ref contents) => { - contents.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect() + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => AtomicRmwBinOp::AtomicXchg, + "xadd" => AtomicRmwBinOp::AtomicAdd, + "xsub" => AtomicRmwBinOp::AtomicSub, + "and" => AtomicRmwBinOp::AtomicAnd, + "nand" => AtomicRmwBinOp::AtomicNand, + "or" => AtomicRmwBinOp::AtomicOr, + "xor" => AtomicRmwBinOp::AtomicXor, + "max" => AtomicRmwBinOp::AtomicMax, + "min" => AtomicRmwBinOp::AtomicMin, + "umax" => AtomicRmwBinOp::AtomicUMax, + "umin" => AtomicRmwBinOp::AtomicUMin, + _ => self.cx().sess().fatal("unknown atomic operation") + }; + + let ty = substs.type_at(0); + if int_type_width_signed(ty, self.cx()).is_some() { + self.atomic_rmw( + atom_op, + args[0].immediate(), + args[1].immediate(), + order + ) + } else { + return invalid_monomorphization(ty); + } } } } - // This allows an argument list like `foo, (bar, baz), - // qux` to be converted into `foo, bar, baz, qux`, integer - // arguments to be truncated as needed and pointers to be - // cast. - fn modify_as_needed( - bx: &Builder<'a, 'll, 'tcx>, - t: &intrinsics::Type, - arg: &OperandRef<'ll, 'tcx>, - ) -> Vec<&'ll Value> { - match *t { - intrinsics::Type::Aggregate(true, ref contents) => { - // We found a tuple that needs squishing! So - // run over the tuple and load each field. - // - // This assumes the type is "simple", i.e. no - // destructors, and the contents are SIMD - // etc. - assert!(!bx.cx.type_needs_drop(arg.layout.ty)); - let (ptr, align) = match arg.val { - OperandValue::Ref(ptr, None, align) => (ptr, align), - _ => bug!() - }; - let arg = PlaceRef::new_sized(ptr, arg.layout, align); - (0..contents.len()).map(|i| { - arg.project_field(bx, i).load(bx).immediate() - }).collect() - } - intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())] - } - intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))] + "nontemporal_store" => { + let dst = args[0].deref(self.cx()); + args[1].val.nontemporal_store(self, dst); + return; + } + + _ => { + let intr = match Intrinsic::find(&name) { + Some(intr) => intr, + None => bug!("unknown intrinsic '{}'", name), + }; + fn one(x: Vec) -> T { + assert_eq!(x.len(), 1); + x.into_iter().next().unwrap() + } + fn ty_to_type<'ll>( + cx: &CodegenCx<'ll, '_>, + t: &intrinsics::Type + ) -> Vec<&'ll Type> { + use intrinsics::Type::*; + match *t { + Void => vec![cx.type_void()], + Integer(_signed, _width, llvm_width) => { + vec![cx.type_ix( llvm_width as u64)] + } + Float(x) => { + match x { + 32 => vec![cx.type_f32()], + 64 => vec![cx.type_f64()], + _ => bug!() + } + } + Pointer(ref t, ref llvm_elem, _const) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_ptr_to(elem)] + } + Vector(ref t, ref llvm_elem, length) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_vector(elem, length as u64)] + } + Aggregate(false, ref contents) => { + let elems = contents.iter() + .map(|t| one(ty_to_type(cx, t))) + .collect::>(); + vec![cx.type_struct( &elems, false)] + } + Aggregate(true, ref contents) => { + contents.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect() + } } - intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { - // the LLVM intrinsic uses a smaller integer - // size than the C intrinsic's signature, so - // we have to trim it down here. - vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))] + } + + // This allows an argument list like `foo, (bar, baz), + // qux` to be converted into `foo, bar, baz, qux`, integer + // arguments to be truncated as needed and pointers to be + // cast. + fn modify_as_needed<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + t: &intrinsics::Type, + arg: &OperandRef<'tcx, &'ll Value>, + ) -> Vec<&'ll Value> { + match *t { + intrinsics::Type::Aggregate(true, ref contents) => { + // We found a tuple that needs squishing! So + // run over the tuple and load each field. + // + // This assumes the type is "simple", i.e. no + // destructors, and the contents are SIMD + // etc. + assert!(!bx.cx().type_needs_drop(arg.layout.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, None, align) => (ptr, align), + _ => bug!() + }; + let arg = PlaceRef::new_sized(ptr, arg.layout, align); + (0..contents.len()).map(|i| { + let field = arg.project_field(bx, i); + bx.load_operand(field).immediate() + }).collect() + } + intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))] + } + intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + vec![ + bx.bitcast(arg.immediate(), + bx.cx().type_vector(llvm_elem, length as u64)) + ] + } + intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { + // the LLVM intrinsic uses a smaller integer + // size than the C intrinsic's signature, so + // we have to trim it down here. + vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))] + } + _ => vec![arg.immediate()], } - _ => vec![arg.immediate()], } - } - let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect::>(); + let inputs = intr.inputs.iter() + .flat_map(|t| ty_to_type(self.cx(), t)) + .collect::>(); - let outputs = one(ty_to_type(cx, &intr.output)); + let outputs = one(ty_to_type(self.cx(), &intr.output)); - let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { - modify_as_needed(bx, t, arg) - }).collect(); - assert_eq!(inputs.len(), llargs.len()); + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(self, t, arg) + }).collect(); + assert_eq!(inputs.len(), llargs.len()); - let val = match intr.definition { - intrinsics::IntrinsicDef::Named(name) => { - let f = declare::declare_cfn(cx, - name, - Type::func(&inputs, outputs)); - bx.call(f, &llargs, None) - } - }; + let val = match intr.definition { + intrinsics::IntrinsicDef::Named(name) => { + let f = self.cx().declare_cfn( + name, + self.cx().type_func(&inputs, outputs), + ); + self.call(f, &llargs, None) + } + }; - match *intr.output { - intrinsics::Type::Aggregate(flatten, ref elems) => { - // the output is a tuple so we need to munge it properly - assert!(!flatten); + match *intr.output { + intrinsics::Type::Aggregate(flatten, ref elems) => { + // the output is a tuple so we need to munge it properly + assert!(!flatten); - for i in 0..elems.len() { - let dest = result.project_field(bx, i); - let val = bx.extract_value(val, i as u64); - bx.store(val, dest.llval, dest.align); + for i in 0..elems.len() { + let dest = result.project_field(self, i); + let val = self.extract_value(val, i as u64); + self.store(val, dest.llval, dest.align); + } + return; } - return; + _ => val, } - _ => val, } - } - }; + }; - if !fn_ty.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to()); - bx.store(llval, ptr, result.align); - } else { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val.store(bx, result); + if !fn_ty.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_ty.ret.mode { + let ptr_llty = self.cx().type_ptr_to(ty.llvm_type(self.cx())); + let ptr = self.pointercast(result.llval, ptr_llty); + self.store(llval, ptr, result.align); + } else { + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + .val.store(self, result); + } } } } fn copy_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, allow_overlap: bool, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, src: &'ll Value, count: &'ll Value, -) -> &'ll Value { - let cx = bx.cx; - let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = align.abi(); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); +) { + let (size, align) = bx.cx().size_and_align_of(ty); + let size = bx.mul(bx.cx().const_usize(size.bytes()), count); + let flags = if volatile { + MemFlags::VOLATILE + } else { + MemFlags::empty() + }; if allow_overlap { - bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile) + bx.memmove(dst, align, src, align, size, flags); } else { - bx.memcpy(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile) + bx.memcpy(dst, align, src, align, size, flags); } } fn memset_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, val: &'ll Value, count: &'ll Value -) -> &'ll Value { - let cx = bx.cx; - let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); - let dst = bx.pointercast(dst, Type::i8p(cx)); - call_memset(bx, dst, val, bx.mul(size, count), align, volatile) +) { + let (size, align) = bx.cx().size_and_align_of(ty); + let size = bx.mul(bx.cx().const_usize(size.bytes()), count); + let flags = if volatile { + MemFlags::VOLATILE + } else { + MemFlags::empty() + }; + bx.memset(dst, val, size, align, flags); } fn try_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - if bx.sess().no_landing_pads() { + if bx.cx().sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align); - } else if wants_msvc_seh(bx.sess()) { - codegen_msvc_try(bx, cx, func, data, local_ptr, dest); + bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align); + } else if wants_msvc_seh(bx.cx().sess()) { + codegen_msvc_try(bx, func, data, local_ptr, dest); } else { - codegen_gnu_try(bx, cx, func, data, local_ptr, dest); + codegen_gnu_try(bx, func, data, local_ptr, dest); } } @@ -766,22 +832,19 @@ fn try_intrinsic( // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. fn codegen_msvc_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; + let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| { + bx.set_personality_fn(bx.cx().eh_personality()); - bx.set_personality_fn(bx.cx.eh_personality()); - - let normal = bx.build_sibling_block("normal"); - let catchswitch = bx.build_sibling_block("catchswitch"); - let catchpad = bx.build_sibling_block("catchpad"); - let caught = bx.build_sibling_block("caught"); + let mut normal = bx.build_sibling_block("normal"); + let mut catchswitch = bx.build_sibling_block("catchswitch"); + let mut catchpad = bx.build_sibling_block("catchpad"); + let mut caught = bx.build_sibling_block("caught"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); @@ -826,34 +889,35 @@ fn codegen_msvc_try( // } // // More information can be found in libstd's seh.rs implementation. - let i64p = Type::i64(cx).ptr_to(); + let i64p = bx.cx().type_ptr_to(bx.cx().type_i64()); let ptr_align = bx.tcx().data_layout.pointer_align; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(C_i32(cx, 0)); + normal.ret(bx.cx().const_i32(0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); - let tcx = cx.tcx; - let tydesc = match tcx.lang_items().msvc_try_filter() { - Some(did) => ::consts::get_static(cx, did), + let tydesc = match bx.tcx().lang_items().msvc_try_filter() { + Some(did) => bx.cx().get_static(did), None => bug!("msvc_try_filter not defined"), }; - let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]); + let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); let i64_align = bx.tcx().data_layout.i64_align; let arg1 = catchpad.load(addr, i64_align); - let val1 = C_i32(cx, 1); - let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); + let val1 = bx.cx().const_i32(1); + let gep1 = catchpad.inbounds_gep(addr, &[val1]); + let arg2 = catchpad.load(gep1, i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); + let gep2 = catchpad.inbounds_gep(local_ptr, &[val1]); catchpad.store(arg1, local_ptr, i64_align); - catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); - catchpad.catch_ret(tok, caught.llbb()); + catchpad.store(arg2, gep2, i64_align); + catchpad.catch_ret(&funclet, caught.llbb()); - caught.ret(C_i32(cx, 1)); + caught.ret(bx.cx().const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -875,16 +939,13 @@ fn codegen_msvc_try( // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. fn codegen_gnu_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; - + let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| { // Codegens the shims described above: // // bx: @@ -902,14 +963,14 @@ fn codegen_gnu_try( // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bx.build_sibling_block("then"); - let catch = bx.build_sibling_block("catch"); + let mut then = bx.build_sibling_block("then"); + let mut catch = bx.build_sibling_block("catch"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(C_i32(cx, 0)); + then.ret(bx.cx().const_i32(0)); // Type indicator for the exception being thrown. // @@ -917,13 +978,14 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); - let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1); - catch.add_clause(vals, C_null(Type::i8p(cx))); + let lpad_ty = bx.cx().type_struct(&[bx.cx().type_i8p(), bx.cx().type_i32()], false); + let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); + catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; - catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); - catch.ret(C_i32(cx, 1)); + let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p())); + catch.store(ptr, bitcast, ptr_align); + catch.ret(bx.cx().const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -949,7 +1011,7 @@ fn gen_fn<'ll, 'tcx>( hir::Unsafety::Unsafe, Abi::Rust )); - let llfn = declare::define_internal_fn(cx, name, rust_fn_sig); + let llfn = cx.define_internal_fn(name, rust_fn_sig); attributes::from_fn_attrs(cx, llfn, None); let bx = Builder::new_block(cx, llfn, "entry-block"); codegen(bx); @@ -989,10 +1051,10 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, name: &str, callee_ty: Ty<'tcx>, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, llret_ty: &'ll Type, span: Span @@ -1004,7 +1066,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.sess(), span, + bx.cx().sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1065,7 +1127,7 @@ fn generic_simd_intrinsic( found `{}` with length {}", in_len, in_ty, ret_ty, out_len); - require!(llret_ty.element_type().kind() == TypeKind::Integer, + require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer, "expected return type with integer elements, found `{}` with non-integer `{}`", ret_ty, ret_ty.simd_type(tcx)); @@ -1101,8 +1163,8 @@ fn generic_simd_intrinsic( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, i as u64); - match const_to_opt_u128(val, true) { + let val = bx.cx().const_get_elt(vector, i as u64); + match bx.cx().const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); None @@ -1112,18 +1174,18 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(C_i32(bx.cx, idx as i32)), + Some(idx) => Some(bx.cx().const_i32(idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(C_null(llret_ty)) + None => return Ok(bx.cx().const_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - C_vector(&indices))) + bx.cx().const_vector(&indices))) } if name == "simd_insert" { @@ -1154,8 +1216,8 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, m_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1165,9 +1227,9 @@ fn simd_simple_float_intrinsic( in_elem: &::rustc::ty::TyS, in_ty: &::rustc::ty::TyS, in_len: usize, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx>, span: Span, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ) -> Result<&'ll Value, ()> { macro_rules! emit_error { ($msg: tt) => { @@ -1175,7 +1237,7 @@ fn simd_simple_float_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.sess(), span, + bx.cx().sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1216,7 +1278,7 @@ fn simd_simple_float_intrinsic( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); - let intrinsic = bx.cx.get_intrinsic(&llvm_name); + let intrinsic = bx.cx().get_intrinsic(&llvm_name); let c = bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::>(), None); @@ -1287,16 +1349,16 @@ fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize, mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { - ty::Int(v) => Type::int_from_ty(cx, v), - ty::Uint(v) => Type::uint_from_ty(cx, v), - ty::Float(v) => Type::float_from_ty(cx, v), + ty::Int(v) => cx.type_int_from_ty( v), + ty::Uint(v) => cx.type_uint_from_ty( v), + ty::Float(v) => cx.type_float_from_ty( v), _ => unreachable!(), }; while no_pointers > 0 { - elem_ty = elem_ty.ptr_to(); + elem_ty = cx.type_ptr_to(elem_ty); no_pointers -= 1; } - Type::vector(elem_ty, vec_len as u64) + cx.type_vector(elem_ty, vec_len as u64) } @@ -1373,29 +1435,32 @@ fn non_ptr(t: ty::Ty) -> ty::Ty { } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = bx.cx().type_i32(); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty, - llvm_elem_vec_ty], llvm_elem_vec_ty)); + let f = bx.cx().declare_cfn(&llvm_intrinsic, + bx.cx().type_func(&[ + llvm_pointer_vec_ty, + alignment_ty, + mask_ty, + llvm_elem_vec_ty], llvm_elem_vec_ty)); llvm::SetUnnamedAddr(f, false); let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); @@ -1470,30 +1535,30 @@ fn non_ptr(t: ty::Ty) -> ty::Ty { } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = bx.cx().type_i32(); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = Type::void(bx.cx); + let ret_t = bx.cx().type_void(); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_elem_vec_ty, + let f = bx.cx().declare_cfn(&llvm_intrinsic, + bx.cx().type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1533,7 +1598,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty { // code is generated // * if the accumulator of the fmul isn't 1, incorrect // code is generated - match const_get_real(acc) { + match bx.cx().const_get_real(acc) { None => return_error!("accumulator of {} is not a constant", $name), Some((v, loses_info)) => { if $name.contains("mul") && v != 1.0_f64 { @@ -1549,8 +1614,8 @@ fn non_ptr(t: ty::Ty) -> ty::Ty { } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => C_undef(Type::f32(bx.cx)), - 64 => C_undef(Type::f64(bx.cx)), + 32 => bx.cx().const_undef(bx.cx().type_f32()), + 64 => bx.cx().const_undef(bx.cx().type_f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1627,8 +1692,8 @@ fn non_ptr(t: ty::Ty) -> ty::Ty { } // boolean reductions operate on vectors of i1s: - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { @@ -1638,7 +1703,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty { if !$boolean { r } else { - bx.zext(r, Type::bool(bx.cx)) + bx.zext(r, bx.cx().type_bool()) } ) }, diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 5d9bae5412e1a38fbe9db46214311b51355ea81b..f904a928d53e609bb8de34d4e09be181b4b5a8b2 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -37,11 +37,10 @@ #![feature(static_nobundle)] use back::write::create_target_machine; -use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; -#[macro_use] extern crate bitflags; extern crate flate2; +#[macro_use] extern crate bitflags; extern crate libc; #[macro_use] extern crate rustc; extern crate jobserver; @@ -56,6 +55,7 @@ extern crate rustc_llvm; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_codegen_utils; +extern crate rustc_codegen_ssa; extern crate rustc_fs_util; #[macro_use] extern crate log; @@ -67,28 +67,30 @@ extern crate tempfile; extern crate memmap; -use back::bytecode::RLIB_BYTECODE_EXTENSION; - +use rustc_codegen_ssa::traits::*; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; +use rustc_codegen_ssa::CompiledModule; +use errors::{FatalError, Handler}; +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use syntax_pos::symbol::InternedString; +use rustc::mir::mono::Stats; pub use llvm_util::target_features; use std::any::Any; -use std::sync::mpsc; -use rustc_data_structures::sync::Lrc; +use std::sync::{mpsc, Arc}; use rustc::dep_graph::DepGraph; -use rustc::hir::def_id::CrateNum; -use rustc::middle::cstore::MetadataLoader; -use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; -use rustc::middle::lang_items::LangItem; +use rustc::middle::allocator::AllocatorKind; +use rustc::middle::cstore::{EncodedMetadata, MetadataLoader}; use rustc::session::{Session, CompileIncomplete}; use rustc::session::config::{OutputFilenames, OutputType, PrintRequest}; use rustc::ty::{self, TyCtxt}; use rustc::util::time_graph; -use rustc::util::nodemap::{FxHashSet, FxHashMap}; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; -use rustc_codegen_utils::{CompiledModule, ModuleKind}; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_utils::codegen_backend::CodegenBackend; -use rustc_data_structures::svh::Svh; mod diagnostics; @@ -114,22 +116,110 @@ mod back { mod context; mod debuginfo; mod declare; -mod glue; mod intrinsic; -pub mod llvm; + +// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912. +#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; } + mod llvm_util; mod metadata; -mod meth; -mod mir; mod mono_item; mod type_; mod type_of; mod value; +#[derive(Clone)] pub struct LlvmCodegenBackend(()); -impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis -impl !Sync for LlvmCodegenBackend {} +impl ExtraBackendMethods for LlvmCodegenBackend { + fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm { + ModuleLlvm::new(sess, mod_name) + } + fn write_metadata<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + metadata: &ModuleLlvm + ) -> EncodedMetadata { + base::write_metadata(tcx, metadata) + } + fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { + unsafe { allocator::codegen(tcx, mods, kind) } + } + fn compile_codegen_unit<'a, 'tcx: 'a>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu_name: InternedString, + ) -> Stats { + base::compile_codegen_unit(tcx, cgu_name) + } + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool + ) -> Arc + Result<&'static mut llvm::TargetMachine, String> + Send + Sync> { + back::write::target_machine_factory(sess, find_features) + } + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { + llvm_util::target_cpu(sess) + } +} + +impl WriteBackendMethods for LlvmCodegenBackend { + type Module = ModuleLlvm; + type ModuleBuffer = back::lto::ModuleBuffer; + type Context = llvm::Context; + type TargetMachine = &'static mut llvm::TargetMachine; + type ThinData = back::lto::ThinData; + type ThinBuffer = back::lto::ThinBuffer; + fn print_pass_timings(&self) { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline + ) -> Result<(Vec>, Vec), FatalError> { + back::lto::run(cgcx, modules, cached_modules, timeline) + } + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result<(), FatalError> { + back::write::optimize(cgcx, diag_handler, module, config, timeline) + } + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline + ) -> Result, FatalError> { + back::lto::optimize_thin_module(thin, cgcx, timeline) + } + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result { + back::write::codegen(cgcx, diag_handler, module, config, timeline) + } + fn run_lto_pass_manager( + cgcx: &CodegenContext, + module: &ModuleCodegen, + config: &ModuleConfig, + thin: bool + ) { + back::lto::run_pass_manager(cgcx, module, config, thin) + } +} + +unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis +unsafe impl Sync for LlvmCodegenBackend {} impl LlvmCodegenBackend { pub fn new() -> Box { @@ -190,24 +280,24 @@ fn metadata_loader(&self) -> Box { } fn provide(&self, providers: &mut ty::query::Providers) { - rustc_codegen_utils::symbol_export::provide(providers); rustc_codegen_utils::symbol_names::provide(providers); - base::provide_both(providers); + rustc_codegen_ssa::back::symbol_export::provide(providers); + rustc_codegen_ssa::base::provide_both(providers); attributes::provide(providers); } fn provide_extern(&self, providers: &mut ty::query::Providers) { - rustc_codegen_utils::symbol_export::provide_extern(providers); - base::provide_both(providers); + rustc_codegen_ssa::back::symbol_export::provide_extern(providers); + rustc_codegen_ssa::base::provide_both(providers); attributes::provide_extern(providers); } - fn codegen_crate<'a, 'tcx>( + fn codegen_crate<'b, 'tcx>( &self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'b, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> Box { - box base::codegen_crate(tcx, rx) + box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) } fn join_codegen_and_link( @@ -218,12 +308,13 @@ fn join_codegen_and_link( outputs: &OutputFilenames, ) -> Result<(), CompileIncomplete>{ use rustc::util::common::time; - let (ongoing_codegen, work_products) = - ongoing_codegen.downcast::<::back::write::OngoingCodegen>() + let (codegen_results, work_products) = + ongoing_codegen.downcast:: + >() .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box") .join(sess); if sess.opts.debugging_opts.incremental_info { - back::write::dump_incremental_data(&ongoing_codegen); + rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results); } time(sess, @@ -241,14 +332,14 @@ fn join_codegen_and_link( // This should produce either a finished executable or library. sess.profiler(|p| p.start_activity(ProfileCategory::Linking)); time(sess, "linking", || { - back::link::link_binary(sess, &ongoing_codegen, - outputs, &ongoing_codegen.crate_name.as_str()); + back::link::link_binary(sess, &codegen_results, + outputs, &codegen_results.crate_name.as_str()); }); sess.profiler(|p| p.end_activity(ProfileCategory::Linking)); // Now that we won't touch anything in the incremental compilation directory // any more, we can finalize it (which involves renaming it) - rustc_incremental::finalize_session_directory(sess, ongoing_codegen.crate_hash); + rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash); Ok(()) } @@ -260,57 +351,7 @@ pub fn __rustc_codegen_backend() -> Box { LlvmCodegenBackend::new() } -struct ModuleCodegen { - /// The name of the module. When the crate may be saved between - /// compilations, incremental compilation requires that name be - /// unique amongst **all** crates. Therefore, it should contain - /// something unique to this crate (e.g., a module path) as well - /// as the crate name and disambiguator. - /// We currently generate these names via CodegenUnit::build_cgu_name(). - name: String, - module_llvm: ModuleLlvm, - kind: ModuleKind, -} - -struct CachedModuleCodegen { - name: String, - source: WorkProduct, -} - -impl ModuleCodegen { - fn into_compiled_module(self, - emit_obj: bool, - emit_bc: bool, - emit_bc_compressed: bool, - outputs: &OutputFilenames) -> CompiledModule { - let object = if emit_obj { - Some(outputs.temp_path(OutputType::Object, Some(&self.name))) - } else { - None - }; - let bytecode = if emit_bc { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) - } else { - None - }; - let bytecode_compressed = if emit_bc_compressed { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) - .with_extension(RLIB_BYTECODE_EXTENSION)) - } else { - None - }; - - CompiledModule { - name: self.name.clone(), - kind: self.kind, - object, - bytecode, - bytecode_compressed, - } - } -} - -struct ModuleLlvm { +pub struct ModuleLlvm { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, tm: &'static mut llvm::TargetMachine, @@ -349,35 +390,4 @@ fn drop(&mut self) { } } -struct CodegenResults { - crate_name: Symbol, - modules: Vec, - allocator_module: Option, - metadata_module: CompiledModule, - crate_hash: Svh, - metadata: rustc::middle::cstore::EncodedMetadata, - windows_subsystem: Option, - linker_info: rustc_codegen_utils::linker::LinkerInfo, - crate_info: CrateInfo, -} - -/// Misc info we load from metadata to persist beyond the tcx -struct CrateInfo { - panic_runtime: Option, - compiler_builtins: Option, - profiler_runtime: Option, - sanitizer_runtime: Option, - is_no_builtins: FxHashSet, - native_libraries: FxHashMap>>, - crate_name: FxHashMap, - used_libraries: Lrc>, - link_args: Lrc>, - used_crate_source: FxHashMap>, - used_crates_static: Vec<(CrateNum, LibSource)>, - used_crates_dynamic: Vec<(CrateNum, LibSource)>, - wasm_imports: FxHashMap, - lang_item_to_crate: FxHashMap, - missing_lang_items: FxHashMap>, -} - __build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 612581c1ac671067cbb4efb219aa93260fed01db..f1a966d7654388c1389e0a5b37c1cb007b25c829 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,6 +19,8 @@ use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; +use syntax; +use rustc_codegen_ssa; use super::RustString; @@ -141,6 +143,23 @@ pub enum IntPredicate { IntSLE = 41, } +impl IntPredicate { + pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self { + match intpre { + rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ, + rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE, + rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT, + rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE, + rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT, + rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE, + rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT, + rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE, + rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT, + rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE, + } + } +} + /// LLVMRealPredicate #[derive(Copy, Clone)] #[repr(C)] @@ -163,6 +182,31 @@ pub enum RealPredicate { RealPredicateTrue = 15, } +impl RealPredicate { + pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self { + match realpred { + rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => + RealPredicate::RealPredicateFalse, + rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT, + rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE, + rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT, + rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE, + rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE, + rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD, + rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO, + rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT, + rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE, + rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT, + rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE, + rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE, + rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => + RealPredicate::RealPredicateTrue + } + } +} + /// LLVMTypeKind #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] @@ -186,6 +230,30 @@ pub enum TypeKind { Token = 16, } +impl TypeKind { + pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind { + match self { + TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void, + TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half, + TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float, + TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double, + TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80, + TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128, + TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128, + TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label, + TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer, + TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function, + TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct, + TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array, + TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer, + TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector, + TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata, + TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX, + TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token, + } + } +} + /// LLVMAtomicRmwBinOp #[derive(Copy, Clone)] #[repr(C)] @@ -203,6 +271,24 @@ pub enum AtomicRmwBinOp { AtomicUMin = 10, } +impl AtomicRmwBinOp { + pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self { + match op { + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + } + } +} + /// LLVMAtomicOrdering #[derive(Copy, Clone)] #[repr(C)] @@ -218,6 +304,23 @@ pub enum AtomicOrdering { SequentiallyConsistent = 7, } +impl AtomicOrdering { + pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self { + match ao { + rustc_codegen_ssa::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + rustc_codegen_ssa::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release, + rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => + AtomicOrdering::AcquireRelease, + rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => + AtomicOrdering::SequentiallyConsistent + } + } +} + + /// LLVMRustSynchronizationScope #[derive(Copy, Clone)] #[repr(C)] @@ -229,6 +332,18 @@ pub enum SynchronizationScope { CrossThread, } +impl SynchronizationScope { + pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self { + match sc { + rustc_codegen_ssa::common::SynchronizationScope::Other => SynchronizationScope::Other, + rustc_codegen_ssa::common::SynchronizationScope::SingleThread => + SynchronizationScope::SingleThread, + rustc_codegen_ssa::common::SynchronizationScope::CrossThread => + SynchronizationScope::CrossThread, + } + } +} + /// LLVMRustFileType #[derive(Copy, Clone)] #[repr(C)] @@ -269,6 +384,15 @@ pub enum AsmDialect { Intel, } +impl AsmDialect { + pub fn from_generic(asm: syntax::ast::AsmDialect) -> Self { + match asm { + syntax::ast::AsmDialect::Att => AsmDialect::Att, + syntax::ast::AsmDialect::Intel => AsmDialect::Intel + } + } +} + /// LLVMRustCodeGenOptLevel #[derive(Copy, Clone, PartialEq)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs deleted file mode 100644 index 586a490774023f9f000a68b8c63da9eb2b4636c1..0000000000000000000000000000000000000000 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use rustc::mir::interpret::{ErrorHandled, read_target_uint}; -use rustc_mir::const_eval::const_field; -use rustc::hir::def_id::DefId; -use rustc::mir; -use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType}; -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; -use builder::Builder; -use common::{CodegenCx}; -use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize}; -use consts; -use type_of::LayoutLlvmExt; -use type_::Type; -use syntax::ast::Mutability; -use syntax::source_map::Span; -use value::Value; - -use super::super::callee; -use super::FunctionCx; - -pub fn scalar_to_llvm( - cx: &CodegenCx<'ll, '_>, - cv: Scalar, - layout: &layout::Scalar, - llty: &'ll Type, -) -> &'ll Value { - let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; - match cv { - Scalar::Bits { size: 0, .. } => { - assert_eq!(0, layout.value.size(cx).bytes()); - C_undef(Type::ix(cx, 0)) - }, - Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = C_uint_big(Type::ix(cx, bitsize), bits); - if layout.value == layout::Pointer { - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } - } else { - consts::bitcast(llval, llty) - } - }, - Scalar::Ptr(ptr) => { - let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id); - let base_addr = match alloc_type { - Some(AllocType::Memory(alloc)) => { - let init = const_alloc_to_llvm(cx, alloc); - if alloc.mutability == Mutability::Mutable { - consts::addr_of_mut(cx, init, alloc.align, None) - } else { - consts::addr_of(cx, init, alloc.align, None) - } - } - Some(AllocType::Function(fn_instance)) => { - callee::get_fn(cx, fn_instance) - } - Some(AllocType::Static(def_id)) => { - assert!(cx.tcx.is_static(def_id).is_some()); - consts::get_static(cx, def_id) - } - None => bug!("missing allocation {:?}", ptr.alloc_id), - }; - let llval = unsafe { llvm::LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(cx)), - &C_usize(cx, ptr.offset.bytes()), - 1, - ) }; - if layout.value != layout::Pointer { - unsafe { llvm::LLVMConstPtrToInt(llval, llty) } - } else { - consts::bitcast(llval, llty) - } - } - } -} - -pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { - let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); - let dl = cx.data_layout(); - let pointer_size = dl.pointer_size.bytes() as usize; - - let mut next_offset = 0; - for &(offset, ((), alloc_id)) in alloc.relocations.iter() { - let offset = offset.bytes(); - assert_eq!(offset as usize as u64, offset); - let offset = offset as usize; - if offset > next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset])); - } - let ptr_offset = read_target_uint( - dl.endian, - &alloc.bytes[offset..(offset + pointer_size)], - ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - llvals.push(scalar_to_llvm( - cx, - Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), - &layout::Scalar { - value: layout::Primitive::Pointer, - valid_range: 0..=!0 - }, - Type::i8p(cx) - )); - next_offset = offset + pointer_size; - } - if alloc.bytes.len() >= next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset ..])); - } - - C_struct(cx, &llvals, true) -} - -pub fn codegen_static_initializer( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, -) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> { - let instance = ty::Instance::mono(cx.tcx, def_id); - let cid = GlobalId { - instance, - promoted: None, - }; - let param_env = ty::ParamEnv::reveal_all(); - let static_ = cx.tcx.const_eval(param_env.and(cid))?; - - let alloc = match static_.val { - ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, - _ => bug!("static const eval returned {:#?}", static_), - }; - Ok((const_alloc_to_llvm(cx, alloc), alloc)) -} - -impl FunctionCx<'a, 'll, 'tcx> { - fn fully_evaluate( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - constant: &'tcx ty::Const<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { - match constant.val { - ConstValue::Unevaluated(def_id, ref substs) => { - let tcx = bx.tcx(); - let param_env = ty::ParamEnv::reveal_all(); - let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); - let cid = GlobalId { - instance, - promoted: None, - }; - tcx.const_eval(param_env.and(cid)) - }, - _ => Ok(constant), - } - } - - pub fn eval_mir_constant( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - constant: &mir::Constant<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { - let c = self.monomorphize(&constant.literal); - self.fully_evaluate(bx, c) - } - - /// process constant containing SIMD shuffle indices - pub fn simd_shuffle_indices( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - span: Span, - ty: Ty<'tcx>, - constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>, - ) -> (&'ll Value, Ty<'tcx>) { - constant - .and_then(|c| { - let field_ty = c.ty.builtin_index().unwrap(); - let fields = match c.ty.sty { - ty::Array(_, n) => n.unwrap_usize(bx.tcx()), - ref other => bug!("invalid simd shuffle type: {}", other), - }; - let values: Result, ErrorHandled> = (0..fields).map(|field| { - let field = const_field( - bx.tcx(), - ty::ParamEnv::reveal_all(), - self.instance, - None, - mir::Field::new(field as usize), - c, - )?; - if let Some(prim) = field.val.try_to_scalar() { - let layout = bx.cx.layout_of(field_ty); - let scalar = match layout.abi { - layout::Abi::Scalar(ref x) => x, - _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) - }; - Ok(scalar_to_llvm( - bx.cx, prim, scalar, - layout.immediate_llvm_type(bx.cx), - )) - } else { - bug!("simd shuffle field {:?}", field) - } - }).collect(); - let llval = C_struct(bx.cx, &values?, false); - Ok((llval, c.ty)) - }) - .unwrap_or_else(|_| { - bx.tcx().sess.span_err( - span, - "could not evaluate shuffle_indices at compile time", - ); - // We've errored, so we don't have to produce working code. - let ty = self.monomorphize(&ty); - let llty = bx.cx.layout_of(ty).llvm_type(bx.cx); - (C_undef(llty), ty) - }) - } -} diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index 91c1ccbe00213dd6e53176c60544aa314df4064e..9b2d17d65caa30fe5c20c8b26c79ea2ea18d72cb 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -8,181 +8,82 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Walks the crate looking for items/impl-items/trait-items that have -//! either a `rustc_symbol_name` or `rustc_item_path` attribute and -//! generates an error giving, respectively, the symbol name or -//! item-path. This is used for unit testing the code that generates -//! paths etc in all kinds of annoying scenarios. - -use asm; use attributes; use base; -use consts; use context::CodegenCx; -use declare; use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; -use rustc::hir; -use rustc::hir::def::Def; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::TypeFoldable; -use rustc::ty::layout::LayoutOf; -use std::fmt; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; +use rustc_codegen_ssa::traits::*; pub use rustc::mir::mono::MonoItem; -pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; - -pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { - fn define(&self, cx: &CodegenCx<'a, 'tcx>) { - debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); +impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn predefine_static(&self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); + + let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { + self.sess().span_fatal(self.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); - match *self.as_mono_item() { - MonoItem::Static(def_id) => { - let tcx = cx.tcx; - let is_mutable = match tcx.describe_def(def_id) { - Some(Def::Static(_, is_mutable)) => is_mutable, - Some(other) => { - bug!("Expected Def::Static, found {:?}", other) - } - None => { - bug!("Expected Def::Static for {:?}, found nothing", def_id) - } - }; - consts::codegen_static(&cx, def_id, is_mutable); - } - MonoItem::GlobalAsm(node_id) => { - let item = cx.tcx.hir.expect_item(node_id); - if let hir::ItemKind::GlobalAsm(ref ga) = item.node { - asm::codegen_global_asm(cx, ga); - } else { - span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") - } - } - MonoItem::Fn(instance) => { - base::codegen_instance(&cx, instance); - } + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); } - debug!("END IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); + self.instances.borrow_mut().insert(instance, g); } - fn predefine(&self, - cx: &CodegenCx<'a, 'tcx>, - linkage: Linkage, - visibility: Visibility) { - debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); - - let symbol_name = self.symbol_name(cx.tcx).as_str(); - - debug!("symbol {}", &symbol_name); - - match *self.as_mono_item() { - MonoItem::Static(def_id) => { - predefine_static(cx, def_id, linkage, visibility, &symbol_name); - } - MonoItem::Fn(instance) => { - predefine_fn(cx, instance, linkage, visibility, &symbol_name); - } - MonoItem::GlobalAsm(..) => {} + fn predefine_fn(&self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let mono_sig = instance.fn_sig(self.tcx()); + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + let lldecl = self.declare_fn(symbol_name, mono_sig); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(self.llmod, lldecl); } - debug!("END PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); - } - - fn to_raw_string(&self) -> String { - match *self.as_mono_item() { - MonoItem::Fn(instance) => { - format!("Fn({:?}, {})", - instance.def, - instance.substs.as_ptr() as usize) - } - MonoItem::Static(id) => { - format!("Static({:?})", id) + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + self.tcx.is_compiler_builtins(LOCAL_CRATE) { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); } - MonoItem::GlobalAsm(id) => { - format!("GlobalAsm({:?})", id) + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); } } - } -} -impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} - -fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - def_id: DefId, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - - let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| { - cx.sess().span_fatal(cx.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name)) - }); - - unsafe { - llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); - llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); - } - - cx.instances.borrow_mut().insert(instance, g); -} - -fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - instance: Instance<'tcx>, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - assert!(!instance.substs.needs_infer() && - !instance.substs.has_param_types()); - - let mono_sig = instance.fn_sig(cx.tcx); - let attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); - let lldecl = declare::declare_fn(cx, symbol_name, mono_sig); - unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; - base::set_link_section(lldecl, &attrs); - if linkage == Linkage::LinkOnceODR || - linkage == Linkage::WeakODR { - llvm::SetUniqueComdat(cx.llmod, lldecl); - } - - // If we're compiling the compiler-builtins crate, e.g. the equivalent of - // compiler-rt, then we want to implicitly compile everything with hidden - // visibility as we're going to link this object all over the place but - // don't want the symbols to get exported. - if linkage != Linkage::Internal && linkage != Linkage::Private && - cx.tcx.is_compiler_builtins(LOCAL_CRATE) { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + debug!("predefine_fn: mono_sig = {:?} instance = {:?}", mono_sig, instance); + if instance.def.is_inline(self.tcx) { + attributes::inline(self, lldecl, attributes::InlineAttr::Hint); } - } else { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); - } - } + attributes::from_fn_attrs(self, lldecl, Some(instance.def.def_id())); - debug!("predefine_fn: mono_sig = {:?} instance = {:?}", mono_sig, instance); - if instance.def.is_inline(cx.tcx) { - attributes::inline(cx, lldecl, attributes::InlineAttr::Hint); + self.instances.borrow_mut().insert(instance, lldecl); } - attributes::from_fn_attrs(cx, lldecl, Some(instance.def.def_id())); - - cx.instances.borrow_mut().insert(instance, lldecl); } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 6fb78fe4aa5a4124c590b755153451f3a4fab0c0..5c4ebc35240d45042031f66e9b1cdc47f3e70727 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -13,15 +13,23 @@ pub use llvm::Type; use llvm; -use llvm::{Bool, False, True, TypeKind}; - +use llvm::{Bool, False, True}; use context::CodegenCx; +use rustc_codegen_ssa::traits::*; +use value::Value; -use syntax::ast; -use rustc::ty::layout::{self, Align, Size}; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::Ty; +use rustc::ty::layout::TyLayout; +use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; +use common; +use rustc_codegen_ssa::common::TypeKind; +use type_of::LayoutLlvmExt; +use abi::{LlvmType, FnTypeExt}; use std::fmt; +use std::cell::RefCell; use libc::c_uint; @@ -39,233 +47,182 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { } } -impl Type { - pub fn void(cx: &CodegenCx<'ll, '_>) -> &'ll Type { +impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn type_void(&self) -> &'ll Type { unsafe { - llvm::LLVMVoidTypeInContext(cx.llcx) + llvm::LLVMVoidTypeInContext(self.llcx) } } - pub fn metadata(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_metadata(&self) -> &'ll Type { unsafe { - llvm::LLVMRustMetadataTypeInContext(cx.llcx) + llvm::LLVMRustMetadataTypeInContext(self.llcx) } } - pub fn i1(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i1(&self) -> &'ll Type { unsafe { - llvm::LLVMInt1TypeInContext(cx.llcx) + llvm::LLVMInt1TypeInContext(self.llcx) } } - pub fn i8(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i8(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(cx.llcx) + llvm::LLVMInt8TypeInContext(self.llcx) } } - pub fn i8_llcx(llcx: &llvm::Context) -> &Type { - unsafe { - llvm::LLVMInt8TypeInContext(llcx) - } - } - pub fn i16(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i16(&self) -> &'ll Type { unsafe { - llvm::LLVMInt16TypeInContext(cx.llcx) - } - } - pub fn i32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - unsafe { - llvm::LLVMInt32TypeInContext(cx.llcx) + llvm::LLVMInt16TypeInContext(self.llcx) } } - pub fn i64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i32(&self) -> &'ll Type { unsafe { - llvm::LLVMInt64TypeInContext(cx.llcx) + llvm::LLVMInt32TypeInContext(self.llcx) } } - pub fn i128(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i64(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, 128) + llvm::LLVMInt64TypeInContext(self.llcx) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix(cx: &CodegenCx<'ll, '_>, num_bits: u64) -> &'ll Type { + fn type_i128(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) + llvm::LLVMIntTypeInContext(self.llcx, 128) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { + fn type_ix(&self, num_bits: u64) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) } } - pub fn f32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - unsafe { - llvm::LLVMFloatTypeInContext(cx.llcx) - } + fn type_isize(&self) -> &'ll Type { + self.isize_ty } - pub fn f64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_f32(&self) -> &'ll Type { unsafe { - llvm::LLVMDoubleTypeInContext(cx.llcx) - } - } - - pub fn bool(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i8(cx) - } - - pub fn char(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i32(cx) - } - - pub fn i8p(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i8(cx).ptr_to() - } - - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to() - } - - pub fn isize(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - cx.isize_ty - } - - pub fn c_int(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - match &cx.tcx.sess.target.target.target_c_int_width[..] { - "16" => Type::i16(cx), - "32" => Type::i32(cx), - "64" => Type::i64(cx), - width => bug!("Unsupported target_c_int_width: {}", width), - } - } - - pub fn int_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::IntTy) -> &'ll Type { - match t { - ast::IntTy::Isize => cx.isize_ty, - ast::IntTy::I8 => Type::i8(cx), - ast::IntTy::I16 => Type::i16(cx), - ast::IntTy::I32 => Type::i32(cx), - ast::IntTy::I64 => Type::i64(cx), - ast::IntTy::I128 => Type::i128(cx), + llvm::LLVMFloatTypeInContext(self.llcx) } } - pub fn uint_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::UintTy) -> &'ll Type { - match t { - ast::UintTy::Usize => cx.isize_ty, - ast::UintTy::U8 => Type::i8(cx), - ast::UintTy::U16 => Type::i16(cx), - ast::UintTy::U32 => Type::i32(cx), - ast::UintTy::U64 => Type::i64(cx), - ast::UintTy::U128 => Type::i128(cx), + fn type_f64(&self) -> &'ll Type { + unsafe { + llvm::LLVMDoubleTypeInContext(self.llcx) } } - pub fn float_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::FloatTy) -> &'ll Type { - match t { - ast::FloatTy::F32 => Type::f32(cx), - ast::FloatTy::F64 => Type::f64(cx), + fn type_x86_mmx(&self) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(self.llcx) } } - pub fn func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + fn type_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) } } - pub fn variadic_func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + fn type_variadic_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } - pub fn struct_(cx: &CodegenCx<'ll, '_>, els: &[&'ll Type], packed: bool) -> &'ll Type { + fn type_struct( + &self, + els: &[&'ll Type], + packed: bool + ) -> &'ll Type { unsafe { - llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), + llvm::LLVMStructTypeInContext(self.llcx, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn named_struct(cx: &CodegenCx<'ll, '_>, name: &str) -> &'ll Type { + fn type_named_struct(&self, name: &str) -> &'ll Type { let name = SmallCStr::new(name); unsafe { - llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()) + llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) } } - pub fn array(ty: &Type, len: u64) -> &Type { + fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMRustArrayType(ty, len) } } - pub fn vector(ty: &Type, len: u64) -> &Type { + fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - pub fn kind(&self) -> TypeKind { + fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { - llvm::LLVMRustGetTypeKind(self) + llvm::LLVMRustGetTypeKind(ty).to_generic() } } - pub fn set_struct_body(&'ll self, els: &[&'ll Type], packed: bool) { + fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { unsafe { - llvm::LLVMStructSetBody(self, els.as_ptr(), + llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn ptr_to(&self) -> &Type { - assert_ne!(self.kind(), TypeKind::Function, + fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { + assert_ne!(self.type_kind(ty), TypeKind::Function, "don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead"); - unsafe { - llvm::LLVMPointerType(self, 0) - } + ty.ptr_to() } - pub fn element_type(&self) -> &Type { + fn element_type(&self, ty: &'ll Type) -> &'ll Type { unsafe { - llvm::LLVMGetElementType(self) + llvm::LLVMGetElementType(ty) } } - /// Return the number of elements in `self` if it is a LLVM vector type. - pub fn vector_length(&self) -> usize { + fn vector_length(&self, ty: &'ll Type) -> usize { unsafe { - llvm::LLVMGetVectorSize(self) as usize + llvm::LLVMGetVectorSize(ty) as usize } } - pub fn func_params(&self) -> Vec<&Type> { + fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { - let n_args = llvm::LLVMCountParamTypes(self) as usize; + let n_args = llvm::LLVMCountParamTypes(ty) as usize; let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(self, args.as_mut_ptr()); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); args.set_len(n_args); args } } - pub fn float_width(&self) -> usize { - match self.kind() { + fn float_width(&self, ty: &'ll Type) -> usize { + match self.type_kind(ty) { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, @@ -274,45 +231,84 @@ pub fn float_width(&self) -> usize { } } - /// Retrieve the bit width of the integer type `self`. - pub fn int_width(&self) -> u64 { + fn int_width(&self, ty: &'ll Type) -> u64 { unsafe { - llvm::LLVMGetIntTypeWidth(self) as u64 + llvm::LLVMGetIntTypeWidth(ty) as u64 } } - pub fn from_integer(cx: &CodegenCx<'ll, '_>, i: layout::Integer) -> &'ll Type { - use rustc::ty::layout::Integer::*; - match i { - I8 => Type::i8(cx), - I16 => Type::i16(cx), - I32 => Type::i32(cx), - I64 => Type::i64(cx), - I128 => Type::i128(cx), + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + common::val_ty(v) + } + + fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { + &self.scalar_lltypes + } +} + +impl Type { + pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + unsafe { + llvm::LLVMInt8TypeInContext(llcx) } } - /// Return a LLVM type that has at most the required alignment, - /// as a conservative approximation for unknown pointee types. - pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_>, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_abi_align(cx, align); - Type::from_integer(cx, ity) + // Creates an integer type with the given number of bits, e.g. i24 + pub fn ix_llcx( + llcx: &llvm::Context, + num_bits: u64 + ) -> &Type { + unsafe { + llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + } } - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. - pub fn padding_filler(cx: &CodegenCx<'ll, '_>, size: Size, align: Align) -> &'ll Type { - let unit = layout::Integer::approximate_abi_align(cx, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - Type::array(Type::from_integer(cx, unit), size / unit_size) + pub fn i8p_llcx(llcx: &'ll llvm::Context) -> &'ll Type { + Type::i8_llcx(llcx).ptr_to() } - pub fn x86_mmx(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn ptr_to(&self) -> &Type { unsafe { - llvm::LLVMX86MMXTypeInContext(cx.llcx) + llvm::LLVMPointerType(&self, 0) } } } + + +impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { + fn backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type { + layout.llvm_type(self) + } + fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type { + layout.immediate_llvm_type(self) + } + fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool { + layout.is_llvm_immediate() + } + fn is_backend_scalar_pair(&self, layout: TyLayout<'tcx>) -> bool { + layout.is_llvm_scalar_pair() + } + fn backend_field_index(&self, layout: TyLayout<'tcx>, index: usize) -> u64 { + layout.llvm_field_index(index) + } + fn scalar_pair_element_backend_type<'a>( + &self, + layout: TyLayout<'tcx>, + index: usize, + immediate: bool + ) -> &'ll Type { + layout.scalar_pair_element_llvm_type(self, index, immediate) + } + fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { + ty.llvm_type(self) + } + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.llvm_type(self) + } + fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.ptr_to_llvm_type(self) + } + fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { + ty.llvm_type(self) + } +} diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index fea02edf7be01b70748d68f73f53285515b6e5d2..90c02cddb2b6058c42f0d6dd2d0d3a7f4ec3dec0 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -16,6 +16,7 @@ use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; +use rustc_codegen_ssa::traits::*; use std::fmt::Write; @@ -37,14 +38,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, (cx.sess().target.target.arch == "x86" || cx.sess().target.target.arch == "x86_64"); if use_x86_mmx { - return Type::x86_mmx(cx) + return cx.type_x86_mmx() } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return Type::vector(element, count); + return cx.type_vector(element, count); } } layout::Abi::ScalarPair(..) => { - return Type::struct_(cx, &[ + return cx.type_struct( &[ layout.scalar_pair_element_llvm_type(cx, 0, false), layout.scalar_pair_element_llvm_type(cx, 1, false), ], false); @@ -79,30 +80,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, match layout.fields { layout::FieldPlacement::Union(_) => { - let fill = Type::padding_filler(cx, layout.size, layout.align); + let fill = cx.type_padding_filler( layout.size, layout.align); let packed = false; match name { None => { - Type::struct_(cx, &[fill], packed) + cx.type_struct( &[fill], packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], packed); + let llty = cx.type_named_struct( name); + cx.set_struct_body(llty, &[fill], packed); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(layout.field(cx, 0).llvm_type(cx), count) + cx.type_array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { let (llfields, packed) = struct_llfields(cx, layout); - Type::struct_(cx, &llfields, packed) + cx.type_struct( &llfields, packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); + let llty = cx.type_named_struct( name); *defer = Some((llty, layout)); llty } @@ -136,7 +137,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); assert_eq!(offset.abi_align(padding_align) + padding, target_offset); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.type_padding_filler( padding, padding_align)); debug!(" padding before: {:?}", padding); result.push(field.llvm_type(cx)); @@ -153,7 +154,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, assert_eq!(offset.abi_align(padding_align) + padding, layout.size); debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, layout.size); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.type_padding_filler(padding, padding_align)); assert_eq!(result.len(), 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -255,17 +256,17 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { let llty = match self.ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.layout_of(ty).llvm_type(cx).ptr_to() + cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to() + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } ty::FnPtr(sig) => { let sig = cx.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - FnType::new(cx, sig, &[]).ptr_to_llvm_type(cx) + cx.fn_ptr_backend_type(&FnType::new(cx, sig, &[])) } _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; @@ -307,7 +308,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { if let Some((llty, layout)) = defer { let (llfields, packed) = struct_llfields(cx, layout); - llty.set_struct_body(&llfields, packed) + cx.set_struct_body(llty, &llfields, packed) } llty @@ -316,7 +317,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { - return Type::i1(cx); + return cx.type_i1(); } } self.llvm_type(cx) @@ -325,17 +326,17 @@ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { - layout::Int(i, _) => Type::from_integer(cx, i), - layout::Float(FloatTy::F32) => Type::f32(cx), - layout::Float(FloatTy::F64) => Type::f64(cx), + layout::Int(i, _) => cx.type_from_integer( i), + layout::Float(FloatTy::F32) => cx.type_f32(), + layout::Float(FloatTy::F64) => cx.type_f64(), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - Type::pointee_for_abi_align(cx, pointee.align) + cx.type_pointee_for_abi_align( pointee.align) } else { - Type::i8(cx) + cx.type_i8() }; - pointee.ptr_to() + cx.type_ptr_to(pointee) } } } @@ -369,7 +370,7 @@ fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. if immediate && scalar.is_bool() { - return Type::i1(cx); + return cx.type_i1(); } let offset = if index == 0 { diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a158c34f9d1c2a1257bbfd6bded4221133ffa36a --- /dev/null +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_codegen_ssa" +version = "0.0.0" + +[lib] +name = "rustc_codegen_ssa" +path = "lib.rs" +test = false + +[dependencies] +cc = "1.0.1" +num_cpus = "1.0" +rustc-demangle = "0.1.4" +memmap = "0.6" diff --git a/src/librustc_codegen_ssa/README.md b/src/librustc_codegen_ssa/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9e1d429180367c199e435743644faeaa32cbc9fa --- /dev/null +++ b/src/librustc_codegen_ssa/README.md @@ -0,0 +1,121 @@ +# Refactoring of `rustc_codegen_llvm` +by Denis Merigoux, October 23rd 2018 + +## State of the code before the refactoring + +All the code related to the compilation of MIR into LLVM IR was contained inside the `rustc_codegen_llvm` crate. Here is the breakdown of the most important elements: +* the `back` folder (7,800 LOC) implements the mechanisms for creating the different object files and archive through LLVM, but also the communication mechanisms for parallel code generation; +* the `debuginfo` (3,200 LOC) folder contains all code that passes debug information down to LLVM; +* the `llvm` (2,200 LOC) folder defines the FFI necessary to communicate with LLVM using the C++ API; +* the `mir` (4,300 LOC) folder implements the actual lowering from MIR to LLVM IR; +* the `base.rs` (1,300 LOC) file contains some helper functions but also the high-level code that launches the code generation and distributes the work. +* the `builder.rs` (1,200 LOC) file contains all the functions generating individual LLVM IR instructions inside a basic block; +* the `common.rs` (450 LOC) contains various helper functions and all the functions generating LLVM static values; +* the `type_.rs` (300 LOC) defines most of the type translations to LLVM IR. + +The goal of this refactoring is to separate inside this crate code that is specific to the LLVM from code that can be reused for other rustc backends. For instance, the `mir` folder is almost entirely backend-specific but it relies heavily on other parts of the crate. The separation of the code must not affect the logic of the code nor its performance. + +For these reasons, the separation process involves two transformations that have to be done at the same time for the resulting code to compile : + +1. replace all the LLVM-specific types by generics inside function signatures and structure definitions; +2. encapsulate all functions calling the LLVM FFI inside a set of traits that will define the interface between backend-agnostic code and the backend. + +While the LLVM-specific code will be left in `rustc_codegen_llvm`, all the new traits and backend-agnostic code will be moved in `rustc_codegen_ssa` (name suggestion by @eddyb). + +## Generic types and structures + +@irinagpopa started to parametrize the types of `rustc_codegen_llvm` by a generic `Value` type, implemented in LLVM by a reference `&'ll Value`. This work has been extended to all structures inside the `mir` folder and elsewhere, as well as for LLVM's `BasicBlock` and `Type` types. + +The two most important structures for the LLVM codegen are `CodegenCx` and `Builder`. They are parametrized by multiple liftime parameters and the type for `Value`. + +```rust +struct CodegenCx<'ll, 'tcx: 'll> { + /* ... */ +} + +struct Builder<'a, 'll: 'a, 'tcx: 'll> { + cx: &'a CodegenCx<'ll, 'tcx>, + /* ... */ +} +``` + +`CodegenCx` is used to compile one codegen-unit that can contain multiple functions, whereas `Builder` is created to compile one basic block. + +The code in `rustc_codegen_llvm` has to deal with multiple explicit lifetime parameters, that correspond to the following: +* `'tcx` is the longest lifetime, that corresponds to the original `TyCtxt` containing the program's information; +* `'a` is a short-lived reference of a `CodegenCx` or another object inside a struct; +* `'ll` is the lifetime of references to LLVM objects such as `Value` or `Type`. + +Although there are already many lifetime parameters in the code, making it generic uncovered situations where the borrow-checker was passing only due to the special nature of the LLVM objects manipulated (they are extern pointers). For instance, a additional lifetime parameter had to be added to `LocalAnalyser` in `analyse.rs`, leading to the definition: + +```rust +struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a> { + /* ... */ +} +``` + +However, the two most important structures `CodegenCx` and `Builder` are not defined in the backend-agnostic code. Indeed, their content is highly specific of the backend and it makes more sense to leave their definition to the backend implementor than to allow just a narrow spot via a generic field for the backend's context. + +## Traits and interface + +Because they have to be defined by the backend, `CodegenCx` and `Builder` will be the structures implementing all the traits defining the backend's interface. These traits are defined in the folder `rustc_codegen_ssa/traits` and all the backend-agnostic code is parametrized by them. For instance, let us explain how a function in `base.rs` is parametrized: + +```rust +pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx> +) { + /* ... */ +} +``` + +In this signature, we have the two lifetime parameters explained earlier and the master type `Bx` which satisfies the trait `BuilderMethods` corresponding to the interface satisfied by the `Builder` struct. The `BuilderMethods` defines an associated type `Bx::CodegenCx` that itself satisfies the `CodegenMethods` traits implemented by the struct `CodegenCx`. + +On the trait side, here is an example with part of the definition of `BuilderMethods` in `traits/builder.rs`: + +```rust +pub trait BuilderMethods<'a, 'tcx: 'a>: + HasCodegen<'tcx> + + DebugInfoBuilderMethods<'tcx> + + ArgTypeMethods<'tcx> + + AbiBuilderMethods<'tcx> + + IntrinsicCallMethods<'tcx> + + AsmBuilderMethods<'tcx> +{ + fn new_block<'b>( + cx: &'a Self::CodegenCx, + llfn: Self::Value, + name: &'b str + ) -> Self; + /* ... */ + fn cond_br( + &mut self, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, + ); + /* ... */ +} +``` + +Finally, a master structure implementing the `ExtraBackendMethods` trait is used for high-level codegen-driving functions like `codegen_crate` in `base.rs`. For LLVM, it is the empty `LlvmCodegenBackend`. `ExtraBackendMethods` should be implemented by the same structure that implements the `CodegenBackend` defined in `rustc_codegen_utils/codegen_backend.rs`. + +During the traitification process, certain functions have been converted from methods of a local structure to methods of `CodegenCx` or `Builder` and a corresponding `self` parameter has been added. Indeed, LLVM stores information internally that it can access when called through its API. This information does not show up in a Rust data structure carried around when these methods are called. However, when implementing a Rust backend for `rustc`, these methods will need information from `CodegenCx`, hence the additional parameter (unused in the LLVM implementation of the trait). + +## State of the code after the refactoring + +The traits offer an API which is very similar to the API of LLVM. This is not the best solution since LLVM has a very special way of doing things: when addding another backend, the traits definition might be changed in order to offer more flexibility. + +However, the current separation between backend-agnostic and LLVM-specific code has allows the reuse of a significant part of the old `rustc_codegen_llvm`. Here is the new LOC breakdown between backend-agnostic (BA) and LLVM for the most important elements: + +* `back` folder: 3,800 (BA) vs 4,100 (LLVM); +* `mir` folder: 4,400 (BA) vs 0 (LLVM); +* `base.rs`: 1,100 (BA) vs 250 (LLVM); +* `builder.rs`: 1,400 (BA) vs 0 (LLVM); +* `common.rs`: 350 (BA) vs 350 (LLVM); + +The `debuginfo` folder has been left almost untouched by the splitting and is specific to LLVM. Only its high-level features have been traitified. + +The new `traits` folder has 1500 LOC only for trait definitions. Overall, the 27,000 LOC-sized old `rustc_codegen_llvm` code has been split into the new 18,500 LOC-sized new `rustc_codegen_llvm` and the 12,000 LOC-sized `rustc_codegen_ssa`. We can say that this refactoring allowed the reuse of approximately 10,000 LOC that would otherwise have had to be duplicated between the multiple backends of `rustc`. + +The refactored version of `rustc`'s backend introduced no regression over the test suite nor in performance benchmark, which is in coherence with the nature of the refactoring that used only compile-time parametricity (no trait objects). diff --git a/src/librustc_codegen_ssa/back/archive.rs b/src/librustc_codegen_ssa/back/archive.rs new file mode 100644 index 0000000000000000000000000000000000000000..b5e1deb0d5df3f4837527340eee8d3cc85c17df0 --- /dev/null +++ b/src/librustc_codegen_ssa/back/archive.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::session::Session; + +use std::path::PathBuf; + +pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) + -> PathBuf { + // On Windows, static libraries sometimes show up as libfoo.a and other + // times show up as foo.lib + let oslibname = format!("{}{}{}", + sess.target.target.options.staticlib_prefix, + name, + sess.target.target.options.staticlib_suffix); + let unixlibname = format!("lib{}.a", name); + + for path in search_paths { + debug!("looking for {} inside {:?}", name, path); + let test = path.join(&oslibname); + if test.exists() { return test } + if oslibname != unixlibname { + let test = path.join(&unixlibname); + if test.exists() { return test } + } + } + sess.fatal(&format!("could not find native static library `{}`, \ + perhaps an -L flag is missing?", name)); +} diff --git a/src/librustc_codegen_utils/command.rs b/src/librustc_codegen_ssa/back/command.rs similarity index 100% rename from src/librustc_codegen_utils/command.rs rename to src/librustc_codegen_ssa/back/command.rs diff --git a/src/librustc_codegen_ssa/back/link.rs b/src/librustc_codegen_ssa/back/link.rs new file mode 100644 index 0000000000000000000000000000000000000000..b0575b841d5d5daa1dcfa1e8c3e2331af208b9f5 --- /dev/null +++ b/src/librustc_codegen_ssa/back/link.rs @@ -0,0 +1,208 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. + +use rustc::session::{Session, config}; +use rustc::session::search_paths::PathKind; +use rustc::middle::dependency_format::Linkage; +use rustc::middle::cstore::LibSource; +use rustc_target::spec::LinkerFlavor; +use rustc::hir::def_id::CrateNum; + +use super::command::Command; +use CrateInfo; + +use cc::windows_registry; +use std::fs; +use std::path::{Path, PathBuf}; +use std::env; + +pub fn remove(sess: &Session, path: &Path) { + if let Err(e) = fs::remove_file(path) { + sess.err(&format!("failed to remove {}: {}", + path.display(), + e)); + } +} + +// The third parameter is for env vars, used on windows to set up the +// path for MSVC to find its DLLs, and gcc to find its bundled +// toolchain +pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { + let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); + + // If our linker looks like a batch script on Windows then to execute this + // we'll need to spawn `cmd` explicitly. This is primarily done to handle + // emscripten where the linker is `emcc.bat` and needs to be spawned as + // `cmd /c emcc.bat ...`. + // + // This worked historically but is needed manually since #42436 (regression + // was tagged as #42791) and some more info can be found on #44443 for + // emscripten itself. + let mut cmd = match linker.to_str() { + Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), + _ => match flavor { + LinkerFlavor::Lld(f) => Command::lld(linker, f), + LinkerFlavor::Msvc + if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => + { + Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) + }, + _ => Command::new(linker), + } + }; + + // The compiler's sysroot often has some bundled tools, so add it to the + // PATH for the child. + let mut new_path = sess.host_filesearch(PathKind::All) + .get_tools_search_paths(); + let mut msvc_changed_path = false; + if sess.target.target.options.is_like_msvc { + if let Some(ref tool) = msvc_tool { + cmd.args(tool.args()); + for &(ref k, ref v) in tool.env() { + if k == "PATH" { + new_path.extend(env::split_paths(v)); + msvc_changed_path = true; + } else { + cmd.env(k, v); + } + } + } + } + + if !msvc_changed_path { + if let Some(path) = env::var_os("PATH") { + new_path.extend(env::split_paths(&path)); + } + } + cmd.env("PATH", env::join_paths(new_path).unwrap()); + + (linker.to_path_buf(), cmd) +} + +pub fn each_linked_rlib(sess: &Session, + info: &CrateInfo, + f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { + let crates = info.used_crates_static.iter(); + let fmts = sess.dependency_formats.borrow(); + let fmts = fmts.get(&config::CrateType::Executable) + .or_else(|| fmts.get(&config::CrateType::Staticlib)) + .or_else(|| fmts.get(&config::CrateType::Cdylib)) + .or_else(|| fmts.get(&config::CrateType::ProcMacro)); + let fmts = match fmts { + Some(f) => f, + None => return Err("could not find formats for rlibs".to_string()) + }; + for &(cnum, ref path) in crates { + match fmts.get(cnum.as_usize() - 1) { + Some(&Linkage::NotLinked) | + Some(&Linkage::IncludedFromDylib) => continue, + Some(_) => {} + None => return Err("could not find formats for rlibs".to_string()) + } + let name = &info.crate_name[&cnum]; + let path = match *path { + LibSource::Some(ref p) => p, + LibSource::MetadataOnly => { + return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)) + } + LibSource::None => { + return Err(format!("could not find rlib for: `{}`", name)) + } + }; + f(cnum, &path); + } + Ok(()) +} + +/// Returns a boolean indicating whether the specified crate should be ignored +/// during LTO. +/// +/// Crates ignored during LTO are not lumped together in the "massive object +/// file" that we create and are linked in their normal rlib states. See +/// comments below for what crates do not participate in LTO. +/// +/// It's unusual for a crate to not participate in LTO. Typically only +/// compiler-specific and unstable crates have a reason to not participate in +/// LTO. +pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { + // If our target enables builtin function lowering in LLVM then the + // crates providing these functions don't participate in LTO (e.g. + // no_builtins or compiler builtins crates). + !sess.target.target.options.no_builtins && + (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum)) +} + +pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { + fn infer_from( + sess: &Session, + linker: Option, + flavor: Option, + ) -> Option<(PathBuf, LinkerFlavor)> { + match (linker, flavor) { + (Some(linker), Some(flavor)) => Some((linker, flavor)), + // only the linker flavor is known; use the default linker for the selected flavor + (None, Some(flavor)) => Some((PathBuf::from(match flavor { + LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, + LinkerFlavor::Gcc => "cc", + LinkerFlavor::Ld => "ld", + LinkerFlavor::Msvc => "link.exe", + LinkerFlavor::Lld(_) => "lld", + }), flavor)), + (Some(linker), None) => { + let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { + sess.fatal("couldn't extract file stem from specified linker"); + }).to_owned(); + + let flavor = if stem == "emcc" { + LinkerFlavor::Em + } else if stem == "gcc" || stem.ends_with("-gcc") { + LinkerFlavor::Gcc + } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { + LinkerFlavor::Ld + } else if stem == "link" || stem == "lld-link" { + LinkerFlavor::Msvc + } else if stem == "lld" || stem == "rust-lld" { + LinkerFlavor::Lld(sess.target.target.options.lld_flavor) + } else { + // fall back to the value in the target spec + sess.target.target.linker_flavor + }; + + Some((linker, flavor)) + }, + (None, None) => None, + } + } + + // linker and linker flavor specified via command line have precedence over what the target + // specification specifies + if let Some(ret) = infer_from( + sess, + sess.opts.cg.linker.clone(), + sess.opts.debugging_opts.linker_flavor, + ) { + return ret; + } + + if let Some(ret) = infer_from( + sess, + sess.target.target.options.linker.clone().map(PathBuf::from), + Some(sess.target.target.linker_flavor), + ) { + return ret; + } + + bug!("Not enough information provided to determine how to invoke the linker"); +} diff --git a/src/librustc_codegen_utils/linker.rs b/src/librustc_codegen_ssa/back/linker.rs similarity index 99% rename from src/librustc_codegen_utils/linker.rs rename to src/librustc_codegen_ssa/back/linker.rs index 219bf2566383e2e98c870ee87c9f7d86d37e7a5b..da9cfbb94d1c5425ba2df5239fc51a1723e2337e 100644 --- a/src/librustc_codegen_utils/linker.rs +++ b/src/librustc_codegen_ssa/back/linker.rs @@ -8,6 +8,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use super::symbol_export; +use super::command::Command; +use super::archive; + use rustc_data_structures::fx::FxHashMap; use std::ffi::{OsStr, OsString}; use std::fs::{self, File}; @@ -15,7 +19,6 @@ use std::io::{self, BufWriter}; use std::path::{Path, PathBuf}; -use command::Command; use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; use rustc::middle::dependency_format::Linkage; use rustc::session::Session; @@ -256,7 +259,7 @@ fn link_whole_staticlib(&mut self, lib: &str, search_path: &[PathBuf]) { // -force_load is the macOS equivalent of --whole-archive, but it // involves passing the full path to the library to link. self.linker_arg("-force_load"); - let lib = ::find_library(lib, search_path, &self.sess); + let lib = archive::find_library(lib, search_path, &self.sess); self.linker_arg(&lib); } } @@ -878,36 +881,6 @@ fn cross_lang_lto(&mut self) { } } -fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { - let mut symbols = Vec::new(); - - let export_threshold = - ::symbol_export::crates_export_threshold(&[crate_type]); - for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - - let formats = tcx.sess.dependency_formats.borrow(); - let deps = formats[&crate_type].iter(); - - for (index, dep_format) in deps.enumerate() { - let cnum = CrateNum::new(index + 1); - // For each dependency that we are linking to statically ... - if *dep_format == Linkage::Static { - // ... we add its symbol list to our export list. - for &(symbol, level) in tcx.exported_symbols(cnum).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - } - } - - symbols -} - pub struct WasmLd<'a> { cmd: Command, sess: &'a Session, @@ -1075,3 +1048,32 @@ fn cross_lang_lto(&mut self) { // Do nothing for now } } + +fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { + let mut symbols = Vec::new(); + + let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); + for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + + let formats = tcx.sess.dependency_formats.borrow(); + let deps = formats[&crate_type].iter(); + + for (index, dep_format) in deps.enumerate() { + let cnum = CrateNum::new(index + 1); + // For each dependency that we are linking to statically ... + if *dep_format == Linkage::Static { + // ... we add its symbol list to our export list. + for &(symbol, level) in tcx.exported_symbols(cnum).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + } + } + + symbols +} diff --git a/src/librustc_codegen_ssa/back/lto.rs b/src/librustc_codegen_ssa/back/lto.rs new file mode 100644 index 0000000000000000000000000000000000000000..8d03edca004f6fa95bd0a22c9eb822a96ce69914 --- /dev/null +++ b/src/librustc_codegen_ssa/back/lto.rs @@ -0,0 +1,122 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::write::CodegenContext; +use traits::*; +use ModuleCodegen; + +use rustc::util::time_graph::Timeline; +use rustc_errors::FatalError; + +use std::sync::Arc; +use std::ffi::CString; + +pub struct ThinModule { + pub shared: Arc>, + pub idx: usize, +} + +impl ThinModule { + pub fn name(&self) -> &str { + self.shared.module_names[self.idx].to_str().unwrap() + } + + pub fn cost(&self) -> u64 { + // Yes, that's correct, we're using the size of the bytecode as an + // indicator for how costly this codegen unit is. + self.data().len() as u64 + } + + pub fn data(&self) -> &[u8] { + let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); + a.unwrap_or_else(|| { + let len = self.shared.thin_buffers.len(); + self.shared.serialized_modules[self.idx - len].data() + }) + } +} + +pub struct ThinShared { + pub data: B::ThinData, + pub thin_buffers: Vec, + pub serialized_modules: Vec>, + pub module_names: Vec, +} + + +pub enum LtoModuleCodegen { + Fat { + module: Option>, + _serialized_bitcode: Vec>, + }, + + Thin(ThinModule), +} + +impl LtoModuleCodegen { + pub fn name(&self) -> &str { + match *self { + LtoModuleCodegen::Fat { .. } => "everything", + LtoModuleCodegen::Thin(ref m) => m.name(), + } + } + + /// Optimize this module within the given codegen context. + /// + /// This function is unsafe as it'll return a `ModuleCodegen` still + /// points to LLVM data structures owned by this `LtoModuleCodegen`. + /// It's intended that the module returned is immediately code generated and + /// dropped, and then this LTO module is dropped. + pub unsafe fn optimize( + &mut self, + cgcx: &CodegenContext, + timeline: &mut Timeline + ) -> Result, FatalError> { + match *self { + LtoModuleCodegen::Fat { ref mut module, .. } => { + let module = module.take().unwrap(); + { + let config = cgcx.config(module.kind); + B::run_lto_pass_manager(cgcx, &module, config, false); + timeline.record("fat-done"); + } + Ok(module) + } + LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin, timeline), + } + } + + /// A "gauge" of how costly it is to optimize this module, used to sort + /// biggest modules first. + pub fn cost(&self) -> u64 { + match *self { + // Only one module with fat LTO, so the cost doesn't matter. + LtoModuleCodegen::Fat { .. } => 0, + LtoModuleCodegen::Thin(ref m) => m.cost(), + } + } +} + + +pub enum SerializedModule { + Local(M), + FromRlib(Vec), + FromUncompressedFile(memmap::Mmap), +} + +impl SerializedModule { + pub fn data(&self) -> &[u8] { + match *self { + SerializedModule::Local(ref m) => m.data(), + SerializedModule::FromRlib(ref m) => m, + SerializedModule::FromUncompressedFile(ref m) => m, + } + } +} diff --git a/src/librustc_codegen_ssa/back/mod.rs b/src/librustc_codegen_ssa/back/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..3d7ead74d1c5dc69d38a5d94919c0d1a814698cd --- /dev/null +++ b/src/librustc_codegen_ssa/back/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub mod write; +pub mod linker; +pub mod lto; +pub mod link; +pub mod command; +pub mod symbol_export; +pub mod archive; diff --git a/src/librustc_codegen_utils/symbol_export.rs b/src/librustc_codegen_ssa/back/symbol_export.rs similarity index 100% rename from src/librustc_codegen_utils/symbol_export.rs rename to src/librustc_codegen_ssa/back/symbol_export.rs diff --git a/src/librustc_codegen_ssa/back/write.rs b/src/librustc_codegen_ssa/back/write.rs new file mode 100644 index 0000000000000000000000000000000000000000..46aee5339ba9e4396379ebf5bf58acce3566cfef --- /dev/null +++ b/src/librustc_codegen_ssa/back/write.rs @@ -0,0 +1,1853 @@ +// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo, CodegenResults, + RLIB_BYTECODE_EXTENSION}; +use super::linker::LinkerInfo; +use super::lto::{self, SerializedModule}; +use super::link::{self, remove, get_linker}; +use super::command::Command; +use super::symbol_export::ExportedSymbols; + +use memmap; +use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, + in_incr_comp_dir, in_incr_comp_dir_sess}; +use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; +use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; +use rustc::middle::cstore::EncodedMetadata; +use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::Session; +use rustc::util::nodemap::FxHashMap; +use rustc::util::time_graph::{self, TimeGraph, Timeline}; +use traits::*; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::util::common::{time_depth, set_time_depth, print_time_passes_entry}; +use rustc_fs_util::link_or_copy; +use rustc_data_structures::svh::Svh; +use rustc_errors::{Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; +use rustc_errors::emitter::{Emitter}; +use syntax::attr; +use syntax::ext::hygiene::Mark; +use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; +use jobserver::{Client, Acquired}; + +use std::any::Any; +use std::fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::Arc; +use std::sync::mpsc::{channel, Sender, Receiver}; +use std::time::Instant; +use std::thread; + +const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; + +/// Module-specific configuration for `optimize_and_codegen`. +pub struct ModuleConfig { + /// Names of additional optimization passes to run. + pub passes: Vec, + /// Some(level) to optimize at a certain level, or None to run + /// absolutely no optimizations (used for the metadata module). + pub opt_level: Option, + + /// Some(level) to optimize binary size, or None to not affect program size. + pub opt_size: Option, + + pub pgo_gen: Option, + pub pgo_use: String, + + // Flags indicating which outputs to produce. + pub emit_pre_thin_lto_bc: bool, + pub emit_no_opt_bc: bool, + pub emit_bc: bool, + pub emit_bc_compressed: bool, + pub emit_lto_bc: bool, + pub emit_ir: bool, + pub emit_asm: bool, + pub emit_obj: bool, + // Miscellaneous flags. These are mostly copied from command-line + // options. + pub verify_llvm_ir: bool, + pub no_prepopulate_passes: bool, + pub no_builtins: bool, + pub time_passes: bool, + pub vectorize_loop: bool, + pub vectorize_slp: bool, + pub merge_functions: bool, + pub inline_threshold: Option, + // Instead of creating an object file by doing LLVM codegen, just + // make the object file bitcode. Provides easy compatibility with + // emscripten's ecc compiler, when used as the linker. + pub obj_is_bitcode: bool, + pub no_integrated_as: bool, + pub embed_bitcode: bool, + pub embed_bitcode_marker: bool, +} + +impl ModuleConfig { + fn new(passes: Vec) -> ModuleConfig { + ModuleConfig { + passes, + opt_level: None, + opt_size: None, + + pgo_gen: None, + pgo_use: String::new(), + + emit_no_opt_bc: false, + emit_pre_thin_lto_bc: false, + emit_bc: false, + emit_bc_compressed: false, + emit_lto_bc: false, + emit_ir: false, + emit_asm: false, + emit_obj: false, + obj_is_bitcode: false, + embed_bitcode: false, + embed_bitcode_marker: false, + no_integrated_as: false, + + verify_llvm_ir: false, + no_prepopulate_passes: false, + no_builtins: false, + time_passes: false, + vectorize_loop: false, + vectorize_slp: false, + merge_functions: false, + inline_threshold: None + } + } + + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { + self.verify_llvm_ir = sess.verify_llvm_ir(); + self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; + self.no_builtins = no_builtins || sess.target.target.options.no_builtins; + self.time_passes = sess.time_passes(); + self.inline_threshold = sess.opts.cg.inline_threshold; + self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || + sess.opts.debugging_opts.cross_lang_lto.enabled(); + let embed_bitcode = sess.target.target.options.embed_bitcode || + sess.opts.debugging_opts.embed_bitcode; + if embed_bitcode { + match sess.opts.optimize { + config::OptLevel::No | + config::OptLevel::Less => { + self.embed_bitcode_marker = embed_bitcode; + } + _ => self.embed_bitcode = embed_bitcode, + } + } + + // Copy what clang does by turning on loop vectorization at O2 and + // slp vectorization at O3. Otherwise configure other optimization aspects + // of this pass manager builder. + // Turn off vectorization for emscripten, as it's not very well supported. + self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && + (sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive) && + !sess.target.target.options.is_like_emscripten; + + self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && + sess.opts.optimize == config::OptLevel::Aggressive && + !sess.target.target.options.is_like_emscripten; + + self.merge_functions = sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive; + } + + pub fn bitcode_needed(&self) -> bool { + self.emit_bc || self.obj_is_bitcode + || self.emit_bc_compressed || self.embed_bitcode + } +} + +/// Assembler name and command used by codegen when no_integrated_as is enabled +pub struct AssemblerCommand { + name: PathBuf, + cmd: Command, +} + +// HACK(eddyb) work around `#[derive]` producing wrong bounds for `Clone`. +pub struct TargetMachineFactory( + pub Arc Result + Send + Sync>, +); + +impl Clone for TargetMachineFactory { + fn clone(&self) -> Self { + TargetMachineFactory(self.0.clone()) + } +} + +/// Additional resources used by optimize_and_codegen (not module specific) +#[derive(Clone)] +pub struct CodegenContext { + // Resources needed when running LTO + pub backend: B, + pub time_passes: bool, + pub lto: Lto, + pub no_landing_pads: bool, + pub save_temps: bool, + pub fewer_names: bool, + pub exported_symbols: Option>, + pub opts: Arc, + pub crate_types: Vec, + pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, + pub output_filenames: Arc, + pub regular_module_config: Arc, + pub metadata_module_config: Arc, + pub allocator_module_config: Arc, + pub tm_factory: TargetMachineFactory, + pub msvc_imps_needed: bool, + pub target_pointer_width: String, + pub debuginfo: config::DebugInfo, + + // Number of cgus excluding the allocator/metadata modules + pub total_cgus: usize, + // Handler to use for diagnostics produced during codegen. + pub diag_emitter: SharedEmitter, + // LLVM passes added by plugins. + pub plugin_passes: Vec, + // LLVM optimizations for which we want to print remarks. + pub remark: Passes, + // Worker thread number + pub worker: usize, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + pub incr_comp_session_dir: Option, + // Used to update CGU re-use information during the thinlto phase. + pub cgu_reuse_tracker: CguReuseTracker, + // Channel back to the main control thread to send messages to + pub coordinator_send: Sender>, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + pub time_graph: Option, + // The assembler command if no_integrated_as option is enabled, None otherwise + pub assembler_cmd: Option> +} + +impl CodegenContext { + pub fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } + + pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { + match kind { + ModuleKind::Regular => &self.regular_module_config, + ModuleKind::Metadata => &self.metadata_module_config, + ModuleKind::Allocator => &self.allocator_module_config, + } + } +} + +fn generate_lto_work( + cgcx: &CodegenContext, + modules: Vec>, + import_only_modules: Vec<(SerializedModule, WorkProduct)> +) -> Vec<(WorkItem, u64)> { + let mut timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + "generate lto") + }).unwrap_or(Timeline::noop()); + let (lto_modules, copy_jobs) = B::run_lto(cgcx, modules, import_only_modules, &mut timeline) + .unwrap_or_else(|e| e.raise()); + + let lto_modules = lto_modules.into_iter().map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }); + + let copy_jobs = copy_jobs.into_iter().map(|wp| { + (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { + name: wp.cgu_name.clone(), + source: wp, + }), 0) + }); + + lto_modules.chain(copy_jobs).collect() +} + +pub struct CompiledModules { + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, +} + +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateType::Rlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} + +fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { + if sess.opts.incremental.is_none() { + return false + } + + match sess.lto() { + Lto::Fat | + Lto::No => false, + Lto::Thin | + Lto::ThinLocal => true, + } +} + +pub fn start_async_codegen( + backend: B, + tcx: TyCtxt, + time_graph: Option, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize +) -> OngoingCodegen { + let sess = tcx.sess; + let crate_name = tcx.crate_name(LOCAL_CRATE); + let crate_hash = tcx.crate_hash(LOCAL_CRATE); + let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); + let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let linker_info = LinkerInfo::new(tcx); + let crate_info = CrateInfo::new(tcx); + + // Figure out what we actually need to build. + let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(vec![]); + let mut allocator_config = ModuleConfig::new(vec![]); + + if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { + match *sanitizer { + Sanitizer::Address => { + modules_config.passes.push("asan".to_owned()); + modules_config.passes.push("asan-module".to_owned()); + } + Sanitizer::Memory => { + modules_config.passes.push("msan".to_owned()) + } + Sanitizer::Thread => { + modules_config.passes.push("tsan".to_owned()) + } + _ => {} + } + } + + if sess.opts.debugging_opts.profile { + modules_config.passes.push("insert-gcov-profiling".to_owned()) + } + + modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); + modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); + + modules_config.opt_level = Some(sess.opts.optimize); + modules_config.opt_size = Some(sess.opts.optimize); + + // Save all versions of the bytecode if we're saving our temporaries. + if sess.opts.cg.save_temps { + modules_config.emit_no_opt_bc = true; + modules_config.emit_pre_thin_lto_bc = true; + modules_config.emit_bc = true; + modules_config.emit_lto_bc = true; + metadata_config.emit_bc = true; + allocator_config.emit_bc = true; + } + + // Emit compressed bitcode files for the crate if we're emitting an rlib. + // Whenever an rlib is created, the bitcode is inserted into the archive in + // order to allow LTO against it. + if need_crate_bitcode_for_rlib(sess) { + modules_config.emit_bc_compressed = true; + allocator_config.emit_bc_compressed = true; + } + + modules_config.emit_pre_thin_lto_bc = + need_pre_thin_lto_bitcode_for_incr_comp(sess); + + modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + tcx.sess.target.target.options.no_integrated_as; + + for output_type in sess.opts.output_types.keys() { + match *output_type { + OutputType::Bitcode => { modules_config.emit_bc = true; } + OutputType::LlvmAssembly => { modules_config.emit_ir = true; } + OutputType::Assembly => { + modules_config.emit_asm = true; + // If we're not using the LLVM assembler, this function + // could be invoked specially with output_type_assembly, so + // in this case we still want the metadata object file. + if !sess.opts.output_types.contains_key(&OutputType::Assembly) { + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + } + } + OutputType::Object => { modules_config.emit_obj = true; } + OutputType::Metadata => { metadata_config.emit_obj = true; } + OutputType::Exe => { + modules_config.emit_obj = true; + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + }, + OutputType::Mir => {} + OutputType::DepInfo => {} + } + } + + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); + + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; + + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (codegen_worker_send, codegen_worker_receive) = channel(); + + let coordinator_thread = start_executing_work(backend.clone(), + tcx, + &crate_info, + shared_emitter, + codegen_worker_send, + coordinator_receive, + total_cgus, + sess.jobserver.clone(), + time_graph.clone(), + Arc::new(modules_config), + Arc::new(metadata_config), + Arc::new(allocator_config)); + + OngoingCodegen { + backend, + crate_name, + crate_hash, + metadata, + windows_subsystem, + linker_info, + crate_info, + + time_graph, + coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), + codegen_worker_receive, + shared_emitter_main, + future: coordinator_thread, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + } +} + +fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( + sess: &Session, + compiled_modules: &CompiledModules, +) -> FxHashMap { + let mut work_products = FxHashMap::default(); + + if sess.opts.incremental.is_none() { + return work_products; + } + + for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { + let mut files = vec![]; + + if let Some(ref path) = module.object { + files.push((WorkProductFileKind::Object, path.clone())); + } + if let Some(ref path) = module.bytecode { + files.push((WorkProductFileKind::Bytecode, path.clone())); + } + if let Some(ref path) = module.bytecode_compressed { + files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); + } + + if let Some((id, product)) = + copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { + work_products.insert(id, product); + } + } + + work_products +} + +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + let mut user_wants_bitcode = false; + let mut user_wants_objects = false; + + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); + } + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if compiled_modules.modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&compiled_modules.modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } + } + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in crate_output.outputs.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} + } + } + + // Clean up unwanted temporary files. + + // We create the following files by default: + // - #crate#.#module-name#.bc + // - #crate#.#module-name#.o + // - #crate#.crate.metadata.bc + // - #crate#.crate.metadata.o + // - #crate#.o (linked from crate.##.o) + // - #crate#.bc (copied from crate.##.bc) + // We may create additional files if requested by the user (through + // `-C save-temps` or `--emit=` flags). + + if !sess.opts.cg.save_temps { + // Remove the temporary .#module-name#.o objects. If the user didn't + // explicitly request bitcode (with --emit=bc), and the bitcode is not + // needed for building an rlib, then we must remove .#module-name#.bc as + // well. + + // Specific rules for keeping .#module-name#.bc: + // - If the user requested bitcode (`user_wants_bitcode`), and + // codegen_units > 1, then keep it. + // - If the user requested bitcode but codegen_units == 1, then we + // can toss .#module-name#.bc because we copied it to .bc earlier. + // - If we're not building an rlib and the user didn't request + // bitcode, then delete .#module-name#.bc. + // If you change how this works, also update back::link::link_rlib, + // where .#module-name#.bc files are (maybe) deleted after making an + // rlib. + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + + let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; + + let keep_numbered_objects = needs_crate_object || + (user_wants_objects && sess.codegen_units() > 1); + + for module in compiled_modules.modules.iter() { + if let Some(ref path) = module.object { + if !keep_numbered_objects { + remove(sess, path); + } + } + + if let Some(ref path) = module.bytecode { + if !keep_numbered_bitcode { + remove(sess, path); + } + } + } + + if !user_wants_bitcode { + if let Some(ref path) = compiled_modules.metadata_module.bytecode { + remove(sess, &path); + } + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if let Some(ref path) = allocator_module.bytecode { + remove(sess, path); + } + } + } + } + + // We leave the following files around by default: + // - #crate#.o + // - #crate#.crate.metadata.o + // - #crate#.bc + // These are used in linking steps and will be cleaned up afterward. +} + +pub fn dump_incremental_data(_codegen_results: &CodegenResults) { + // FIXME(mw): This does not work at the moment because the situation has + // become more complicated due to incremental LTO. Now a CGU + // can have more than two caching states. + // println!("[incremental] Re-using {} out of {} modules", + // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), + // codegen_results.modules.len()); +} + +pub enum WorkItem { + /// Optimize a newly codegened, totally unoptimized module. + Optimize(ModuleCodegen), + /// Copy the post-LTO artifacts from the incremental cache to the output + /// directory. + CopyPostLtoArtifacts(CachedModuleCodegen), + /// Perform (Thin)LTO on the given module. + LTO(lto::LtoModuleCodegen), +} + +impl WorkItem { + pub fn module_kind(&self) -> ModuleKind { + match *self { + WorkItem::Optimize(ref m) => m.kind, + WorkItem::CopyPostLtoArtifacts(_) | + WorkItem::LTO(_) => ModuleKind::Regular, + } + } + + pub fn name(&self) -> String { + match *self { + WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), + WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), + WorkItem::LTO(ref m) => format!("lto: {}", m.name()), + } + } +} + +enum WorkItemResult { + Compiled(CompiledModule), + NeedsLTO(ModuleCodegen), +} + +fn execute_work_item( + cgcx: &CodegenContext, + work_item: WorkItem, + timeline: &mut Timeline +) -> Result, FatalError> { + let module_config = cgcx.config(work_item.module_kind()); + + match work_item { + WorkItem::Optimize(module) => { + execute_optimize_work_item(cgcx, module, module_config, timeline) + } + WorkItem::CopyPostLtoArtifacts(module) => { + execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) + } + WorkItem::LTO(module) => { + execute_lto_work_item(cgcx, module, module_config, timeline) + } + } +} + +fn execute_optimize_work_item( + cgcx: &CodegenContext, + module: ModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + B::optimize(cgcx, &diag_handler, &module, module_config, timeline)?; + } + + let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); + + // After we've done the initial round of optimizations we need to + // decide whether to synchronously codegen this module or ship it + // back to the coordinator thread for further LTO processing (which + // has to wait for all the initial modules to be optimized). + // + // Here we dispatch based on the `cgcx.lto` and kind of module we're + // codegenning... + let needs_lto = match cgcx.lto { + Lto::No => false, + + // If the linker does LTO, we don't have to do it. Note that we + // keep doing full LTO, if it is requested, as not to break the + // assumption that the output will be a single module. + Lto::Thin | Lto::ThinLocal if linker_does_lto => false, + + // Here we've got a full crate graph LTO requested. We ignore + // this, however, if the crate type is only an rlib as there's + // no full crate graph to process, that'll happen later. + // + // This use case currently comes up primarily for targets that + // require LTO so the request for LTO is always unconditionally + // passed down to the backend, but we don't actually want to do + // anything about it yet until we've got a final product. + Lto::Fat | Lto::Thin => { + cgcx.crate_types.len() != 1 || + cgcx.crate_types[0] != config::CrateType::Rlib + } + + // When we're automatically doing ThinLTO for multi-codegen-unit + // builds we don't actually want to LTO the allocator modules if + // it shows up. This is due to various linker shenanigans that + // we'll encounter later. + Lto::ThinLocal => { + module.kind != ModuleKind::Allocator + } + }; + + // Metadata modules never participate in LTO regardless of the lto + // settings. + let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; + + if needs_lto { + Ok(WorkItemResult::NeedsLTO(module)) + } else { + let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config, timeline)? }; + Ok(WorkItemResult::Compiled(module)) + } +} + +fn execute_copy_from_cache_work_item( + cgcx: &CodegenContext, + module: CachedModuleCodegen, + module_config: &ModuleConfig, + _: &mut Timeline +) -> Result, FatalError> { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let mut object = None; + let mut bytecode = None; + let mut bytecode_compressed = None; + for (kind, saved_file) in &module.source.saved_files { + let obj_out = match kind { + WorkProductFileKind::Object => { + let path = cgcx.output_filenames.temp_path(OutputType::Object, + Some(&module.name)); + object = Some(path.clone()); + path + } + WorkProductFileKind::Bytecode => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)); + bytecode = Some(path.clone()); + path + } + WorkProductFileKind::BytecodeCompressed => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)) + .with_extension(RLIB_BYTECODE_EXTENSION); + bytecode_compressed = Some(path.clone()); + path + } + }; + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + module.name, + source_file, + obj_out.display()); + if let Err(err) = link_or_copy(&source_file, &obj_out) { + let diag_handler = cgcx.create_diag_handler(); + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); + } + } + + assert_eq!(object.is_some(), module_config.emit_obj); + assert_eq!(bytecode.is_some(), module_config.emit_bc); + assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); + + Ok(WorkItemResult::Compiled(CompiledModule { + name: module.name, + kind: ModuleKind::Regular, + object, + bytecode, + bytecode_compressed, + })) +} + +fn execute_lto_work_item( + cgcx: &CodegenContext, + mut module: lto::LtoModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + let module = module.optimize(cgcx, timeline)?; + let module = B::codegen(cgcx, &diag_handler, module, module_config, timeline)?; + Ok(WorkItemResult::Compiled(module)) + } +} + +pub enum Message { + Token(io::Result), + NeedsLTO { + result: ModuleCodegen, + worker_id: usize, + }, + Done { + result: Result, + worker_id: usize, + }, + CodegenDone { + llvm_work_item: WorkItem, + cost: u64, + }, + AddImportOnlyModule { + module_data: SerializedModule, + work_product: WorkProduct, + }, + CodegenComplete, + CodegenItem, + CodegenAborted, +} + +struct Diagnostic { + msg: String, + code: Option, + lvl: Level, +} + +#[derive(PartialEq, Clone, Copy, Debug)] +enum MainThreadWorkerState { + Idle, + Codegenning, + LLVMing, +} + +fn start_executing_work( + backend: B, + tcx: TyCtxt, + crate_info: &CrateInfo, + shared_emitter: SharedEmitter, + codegen_worker_send: Sender>, + coordinator_receive: Receiver>, + total_cgus: usize, + jobserver: Client, + time_graph: Option, + modules_config: Arc, + metadata_config: Arc, + allocator_config: Arc +) -> thread::JoinHandle> { + let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); + let sess = tcx.sess; + + // Compute the set of symbols we need to retain when doing LTO (if we need to) + let exported_symbols = { + let mut exported_symbols = FxHashMap::default(); + + let copy_symbols = |cnum| { + let symbols = tcx.exported_symbols(cnum) + .iter() + .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) + .collect(); + Arc::new(symbols) + }; + + match sess.lto() { + Lto::No => None, + Lto::ThinLocal => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + Some(Arc::new(exported_symbols)) + } + Lto::Fat | Lto::Thin => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + for &cnum in tcx.crates().iter() { + exported_symbols.insert(cnum, copy_symbols(cnum)); + } + Some(Arc::new(exported_symbols)) + } + } + }; + + // First up, convert our jobserver into a helper thread so we can use normal + // mpsc channels to manage our messages and such. + // After we've requested tokens then we'll, when we can, + // get tokens on `coordinator_receive` which will + // get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); + let helper = jobserver.into_helper_thread(move |token| { + drop(coordinator_send2.send(Box::new(Message::Token::(token)))); + }).expect("failed to spawn helper thread"); + + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { + if link::ignored_for_lto(sess, crate_info, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let assembler_cmd = if modules_config.no_integrated_as { + // HACK: currently we use linker (gcc) as our assembler + let (linker, flavor) = link::linker_and_flavor(sess); + + let (name, mut cmd) = get_linker(sess, &linker, flavor); + cmd.args(&sess.target.target.options.asm_args); + Some(Arc::new(AssemblerCommand { + name, + cmd, + })) + } else { + None + }; + + let cgcx = CodegenContext:: { + backend: backend.clone(), + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + fewer_names: sess.fewer_names(), + save_temps: sess.opts.cg.save_temps, + opts: Arc::new(sess.opts.clone()), + time_passes: sess.time_passes(), + exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), + coordinator_send, + diag_emitter: shared_emitter.clone(), + time_graph, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, false)), + total_cgus, + msvc_imps_needed: msvc_imps_needed(tcx), + target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), + debuginfo: tcx.sess.opts.debuginfo, + assembler_cmd, + }; + + // This is the "main loop" of parallel work happening for parallel codegen. + // It's here that we manage parallelism, schedule work, and work with + // messages coming from clients. + // + // There are a few environmental pre-conditions that shape how the system + // is set up: + // + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Codegen can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. + // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing codegen work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to codegen CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will codegen one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since codegen usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. + // + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). + // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- + // It's worth also touching on the management of parallelism here. We don't + // want to just spawn a thread per work item because while that's optimal + // parallelism it may overload a system with too many threads or violate our + // configuration for the maximum amount of cpu to use for this process. To + // manage this we use the `jobserver` crate. + // + // Job servers are an artifact of GNU make and are used to manage + // parallelism between processes. A jobserver is a glorified IPC semaphore + // basically. Whenever we want to run some work we acquire the semaphore, + // and whenever we're done with that work we release the semaphore. In this + // manner we can ensure that the maximum number of parallel workers is + // capped at any one point in time. + // + // LTO and the coordinator thread + // ------------------------------ + // + // The final job the coordinator thread is responsible for is managing LTO + // and how that works. When LTO is requested what we'll to is collect all + // optimized LLVM modules into a local vector on the coordinator. Once all + // modules have been codegened and optimized we hand this to the `lto` + // module for further optimization. The `lto` module will return back a list + // of more modules to work on, which the coordinator will continue to spawn + // work for. + // + // Each LLVM module is automatically sent back to the coordinator for LTO if + // necessary. There's already optimizations in place to avoid sending work + // back to the coordinator if LTO isn't requested. + return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + + let max_workers = ::num_cpus::get(); + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id + } + }; + + // This is where we collect codegen units that have gone all the way + // through codegen and LLVM. + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + let mut needs_lto = Vec::new(); + let mut lto_import_only_modules = Vec::new(); + let mut started_lto = false; + let mut codegen_aborted = false; + + // This flag tracks whether all items have gone through codegens + let mut codegen_done = false; + + // This is the queue of LLVM work items that still need processing. + let mut work_items = Vec::<(WorkItem, u64)>::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. + let mut tokens = Vec::new(); + + let mut main_thread_worker_state = MainThreadWorkerState::Idle; + let mut running = 0; + + let mut llvm_start_time = None; + + // Run the message loop while there's still anything that needs message + // processing. Note that as soon as codegen is aborted we simply want to + // wait for all existing work to finish, so many of the conditions here + // only apply if codegen hasn't been aborted as they represent pending + // work to be done. + while !codegen_done || + running > 0 || + (!codegen_aborted && ( + work_items.len() > 0 || + needs_lto.len() > 0 || + lto_import_only_modules.len() > 0 || + main_thread_worker_state != MainThreadWorkerState::Idle + )) + { + + // While there are still CGUs to be codegened, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For codegenning more CGU or for running them through LLVM. + if !codegen_done { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, codegen more items: + if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { + panic!("Could not send Message::CodegenItem to main thread") + } + main_thread_worker_state = MainThreadWorkerState::Codegenning; + } else { + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else if codegen_aborted { + // don't queue up any more work if codegen was aborted, we're + // just waiting for our existing children to finish + } else { + // If we've finished everything related to normal codegen + // then it must be the case that we've got some LTO work to do. + // Perform the serial work here of figuring out what we're + // going to LTO and then push a bunch of work items onto our + // queue to do LTO + if work_items.len() == 0 && + running == 0 && + main_thread_worker_state == MainThreadWorkerState::Idle { + assert!(!started_lto); + assert!(needs_lto.len() + lto_import_only_modules.len() > 0); + started_lto = true; + let modules = mem::replace(&mut needs_lto, Vec::new()); + let import_only_modules = + mem::replace(&mut lto_import_only_modules, Vec::new()); + for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { + let insertion_index = work_items + .binary_search_by_key(&cost, |&(_, cost)| cost) + .unwrap_or_else(|e| e); + work_items.insert(insertion_index, (work, cost)); + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + } + } + + // In this branch, we know that everything has been codegened, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } else { + // There is no unstarted work, so let the main thread + // take over for a running worker. Otherwise the + // implicit token would just go to waste. + // We reduce the `running` counter by one. The + // `tokens.truncate()` below will take care of + // giving the Token back. + debug_assert!(running > 0); + running -= 1; + main_thread_worker_state = MainThreadWorkerState::LLVMing; + } + } + MainThreadWorkerState::Codegenning => { + bug!("codegen worker should not be codegenning after \ + codegen was already completed") + } + MainThreadWorkerState::LLVMing => { + // Already making good use of that token + } + } + } + + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while !codegen_aborted && work_items.len() > 0 && running < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + + spawn_work(cgcx, item); + running += 1; + } + + // Relinquish accidentally acquired extra tokens + tokens.truncate(running); + + let msg = coordinator_receive.recv().unwrap(); + match *msg.downcast::>().ok().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + match token { + Ok(token) => { + tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to codegen demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!("{}", msg) + } + } + } + + Message::CodegenDone { llvm_work_item, cost } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); + + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + assert!(!codegen_aborted); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + Message::CodegenComplete => { + codegen_done = true; + assert!(!codegen_aborted); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + // If codegen is aborted that means translation was aborted due + // to some normal-ish compiler error. In this situation we want + // to exit as soon as possible, but we want to make sure all + // existing work has finished. Flag codegen as being done, and + // then conditions above will ensure no more work is spawned but + // we'll keep executing this loop until `running` hits 0. + Message::CodegenAborted => { + assert!(!codegen_aborted); + codegen_done = true; + codegen_aborted = true; + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + } + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { result: Ok(compiled_module), worker_id } => { + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } + } + Message::NeedsLTO { result, worker_id } => { + assert!(!started_lto); + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + free_worker_ids.push(worker_id); + needs_lto.push(result); + } + Message::AddImportOnlyModule { module_data, work_product } => { + assert!(!started_lto); + assert!(!codegen_done); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + lto_import_only_modules.push((module_data, work_product)); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + Message::Done { result: Err(()), worker_id: _ } => { + bug!("worker thread panicked"); + } + Message::CodegenItem => { + bug!("the coordinator should not receive codegen requests") + } + } + } + + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); + } + + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); + + Ok(CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module, + allocator_module: compiled_allocator_module, + }) + }); + + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of codegen + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue > 0 && + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } + + fn maybe_start_llvm_timer(config: &ModuleConfig, + llvm_start_time: &mut Option) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } + } + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + +fn spawn_work( + cgcx: CodegenContext, + work: WorkItem +) { + let depth = time_depth(); + + thread::spawn(move || { + set_time_depth(depth); + + // Set up a destructor which will fire off a message that we're done as + // we exit. + struct Bomb { + coordinator_send: Sender>, + result: Option>, + worker_id: usize, + } + impl Drop for Bomb { + fn drop(&mut self) { + let worker_id = self.worker_id; + let msg = match self.result.take() { + Some(WorkItemResult::Compiled(m)) => { + Message::Done:: { result: Ok(m), worker_id } + } + Some(WorkItemResult::NeedsLTO(m)) => { + Message::NeedsLTO:: { result: m, worker_id } + } + None => Message::Done:: { result: Err(()), worker_id } + }; + drop(self.coordinator_send.send(Box::new(msg))); + } + } + + let mut bomb = Bomb:: { + coordinator_send: cgcx.coordinator_send.clone(), + result: None, + worker_id: cgcx.worker, + }; + + // Execute the work itself, and if it finishes successfully then flag + // ourselves as a success as well. + // + // Note that we ignore any `FatalError` coming out of `execute_work_item`, + // as a diagnostic was already sent off to the main thread - just + // surface that there was an error in this worker. + bomb.result = { + let timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND, + &work.name()) + }); + let mut timeline = timeline.unwrap_or(Timeline::noop()); + execute_work_item(&cgcx, work, &mut timeline).ok() + }; + }); +} + +pub fn run_assembler( + cgcx: &CodegenContext, + handler: &Handler, + assembly: &Path, + object: &Path +) { + let assembler = cgcx.assembler_cmd + .as_ref() + .expect("cgcx.assembler_cmd is missing?"); + + let pname = &assembler.name; + let mut cmd = assembler.cmd.clone(); + cmd.arg("-c").arg("-o").arg(object).arg(assembly); + debug!("{:?}", cmd); + + match cmd.output() { + Ok(prog) => { + if !prog.status.success() { + let mut note = prog.stderr.clone(); + note.extend_from_slice(&prog.stdout); + + handler.struct_err(&format!("linking with `{}` failed: {}", + pname.display(), + prog.status)) + .note(&format!("{:?}", &cmd)) + .note(str::from_utf8(¬e[..]).unwrap()) + .emit(); + handler.abort_if_errors(); + } + }, + Err(e) => { + handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); + handler.abort_if_errors(); + } + } +} + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender, +} + +pub struct SharedEmitterMain { + receiver: Receiver, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + pub fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + pub fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session, blocking: bool) { + loop { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + code.clone(), + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} + +pub struct OngoingCodegen { + pub backend: B, + pub crate_name: Symbol, + pub crate_hash: Svh, + pub metadata: EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: LinkerInfo, + pub crate_info: CrateInfo, + pub time_graph: Option, + pub coordinator_send: Sender>, + pub codegen_worker_receive: Receiver>, + pub shared_emitter_main: SharedEmitterMain, + pub future: thread::JoinHandle>, + pub output_filenames: Arc, +} + +impl OngoingCodegen { + pub fn join( + self, + sess: &Session + ) -> (CodegenResults, FxHashMap) { + self.shared_emitter_main.check(sess, true); + let compiled_modules = match self.future.join() { + Ok(Ok(compiled_modules)) => compiled_modules, + Ok(Err(())) => { + sess.abort_if_errors(); + panic!("expected abort due to worker thread errors") + }, + Err(_) => { + bug!("panic during codegen/LLVM phase"); + } + }; + + sess.cgu_reuse_tracker.check_expected_reuse(sess); + + sess.abort_if_errors(); + + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + + let work_products = + copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, + &compiled_modules); + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.codegen_units() == 1 && sess.time_llvm_passes() { + self.backend.print_pass_timings() + } + + (CodegenResults { + crate_name: self.crate_name, + crate_hash: self.crate_hash, + metadata: self.metadata, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + crate_info: self.crate_info, + + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + metadata_module: compiled_modules.metadata_module, + }, work_products) + } + + pub fn submit_pre_codegened_module_to_llvm(&self, + tcx: TyCtxt, + module: ModuleCodegen) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + submit_codegened_module_to_llvm(&self.backend, tcx, module, cost); + } + + pub fn codegen_finished(&self, tcx: TyCtxt) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::))); + } + + /// Consume this context indicating that codegen was entirely aborted, and + /// we need to exit as quickly as possible. + /// + /// This method blocks the current thread until all worker threads have + /// finished, and all worker threads should have exited or be real close to + /// exiting at this point. + pub fn codegen_aborted(self) { + // Signal to the coordinator it should spawn no more work and start + // shutdown. + drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::))); + drop(self.future.join()); + } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } + + pub fn wait_for_signal_to_codegen_item(&self) { + match self.codegen_worker_receive.recv() { + Ok(Message::CodegenItem) => { + // Nothing to do + } + Ok(_) => panic!("unexpected message"), + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } +} + +pub fn submit_codegened_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: ModuleCodegen, + cost: u64 +) { + let llvm_work_item = WorkItem::Optimize(module); + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone:: { + llvm_work_item, + cost, + }))); +} + +pub fn submit_post_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone:: { + llvm_work_item, + cost: 0, + }))); +} + +pub fn submit_pre_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let filename = pre_lto_bitcode_filename(&module.name); + let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); + let file = fs::File::open(&bc_path).unwrap_or_else(|e| { + panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) + }); + + let mmap = unsafe { + memmap::Mmap::map(&file).unwrap_or_else(|e| { + panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) + }) + }; + // Schedule the module to be loaded + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::AddImportOnlyModule:: { + module_data: SerializedModule::FromUncompressedFile(mmap), + work_product: module.source, + }))); +} + +pub fn pre_lto_bitcode_filename(module_name: &str) -> String { + format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) +} + +fn msvc_imps_needed(tcx: TyCtxt) -> bool { + // This should never be true (because it's not supported). If it is true, + // something is wrong with commandline arg validation. + assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.opts.cg.prefer_dynamic)); + + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() +} diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs new file mode 100644 index 0000000000000000000000000000000000000000..856bb9533c859013c4cc23b55bd93f577a4e973a --- /dev/null +++ b/src/librustc_codegen_ssa/base.rs @@ -0,0 +1,985 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Codegen the completed AST to the LLVM IR. +//! +//! Some functions here, such as codegen_block and codegen_expr, return a value -- +//! the result of the codegen to LLVM -- while others, such as codegen_fn +//! and mono_item, are called only for the side effect of adding a +//! particular definition to the LLVM IR output we're producing. +//! +//! Hopefully useful general knowledge about codegen: +//! +//! * There's no way to find out the Ty type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, +//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, +//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen}; + +use rustc::dep_graph::cgu_reuse_tracker::CguReuse; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::middle::lang_items::StartFnLangItem; +use rustc::middle::weak_lang_items; +use rustc::mir::mono::{Stats, CodegenUnitNameBuilder}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; +use rustc::ty::query::Providers; +use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::util::common::{time, print_time_passes_entry}; +use rustc::util::profiling::ProfileCategory; +use rustc::session::config::{self, EntryFnType, Lto}; +use rustc::session::Session; +use mir::place::PlaceRef; +use back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm, + submit_post_lto_module_to_llvm}; +use {MemFlags, CrateInfo}; +use callee; +use rustc_mir::monomorphize::item::DefPathBasedNames; +use common::{RealPredicate, TypeKind, IntPredicate}; +use meth; +use mir; +use rustc::util::time_graph; +use rustc_mir::monomorphize::Instance; +use rustc_mir::monomorphize::partitioning::{CodegenUnit, CodegenUnitExt}; +use mono_item::MonoItem; +use rustc::util::nodemap::FxHashMap; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::sync::Lrc; +use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr}; +use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; + +use traits::*; + +use std::any::Any; +use std::cmp; +use std::ops::{Deref, DerefMut}; +use std::time::{Instant, Duration}; +use std::sync::mpsc; +use syntax_pos::Span; +use syntax::attr; +use rustc::hir; + +use mir::operand::OperandValue; + +use std::marker::PhantomData; + +pub struct StatRecorder<'a, 'tcx, Cx: 'a + CodegenMethods<'tcx>> { + cx: &'a Cx, + name: Option, + istart: usize, + _marker: PhantomData<&'tcx ()>, +} + +impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> StatRecorder<'a, 'tcx, Cx> { + pub fn new(cx: &'a Cx, name: String) -> Self { + let istart = cx.stats().borrow().n_llvm_insns; + StatRecorder { + cx, + name: Some(name), + istart, + _marker: PhantomData, + } + } +} + +impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> Drop for StatRecorder<'a, 'tcx, Cx> { + fn drop(&mut self) { + if self.cx.sess().codegen_stats() { + let mut stats = self.cx.stats().borrow_mut(); + let iend = stats.n_llvm_insns; + stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); + stats.n_fns += 1; + // Reset LLVM insn count to avoid compound costs. + stats.n_llvm_insns = self.istart; + } + } +} + +pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, + signed: bool) + -> IntPredicate { + match op { + hir::BinOpKind::Eq => IntPredicate::IntEQ, + hir::BinOpKind::Ne => IntPredicate::IntNE, + hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT }, + hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE }, + hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT }, + hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE }, + op => { + bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ + found {:?}", + op) + } + } +} + +pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { + match op { + hir::BinOpKind::Eq => RealPredicate::RealOEQ, + hir::BinOpKind::Ne => RealPredicate::RealUNE, + hir::BinOpKind::Lt => RealPredicate::RealOLT, + hir::BinOpKind::Le => RealPredicate::RealOLE, + hir::BinOpKind::Gt => RealPredicate::RealOGT, + hir::BinOpKind::Ge => RealPredicate::RealOGE, + op => { + bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ + found {:?}", + op); + } + } +} + +pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + lhs: Bx::Value, + rhs: Bx::Value, + t: Ty<'tcx>, + ret_ty: Bx::Type, + op: hir::BinOpKind +) -> Bx::Value { + let signed = match t.sty { + ty::Float(_) => { + let cmp = bin_op_to_fcmp_predicate(op); + let cmp = bx.fcmp(cmp, lhs, rhs); + return bx.sext(cmp, ret_ty); + }, + ty::Uint(_) => false, + ty::Int(_) => true, + _ => bug!("compare_simd_types: invalid SIMD type"), + }; + + let cmp = bin_op_to_icmp_predicate(op, signed); + let cmp = bx.icmp(cmp, lhs, rhs); + // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension + // to get the correctly sized type. This will compile to a single instruction + // once the IR is converted to assembly if the SIMD instruction is supported + // by the target architecture. + bx.sext(cmp, ret_ty) +} + +/// Retrieve the information we are losing (making dynamic) in an unsizing +/// adjustment. +/// +/// The `old_info` argument is a bit funny. It is intended for use +/// in an upcast, where the new vtable for an object will be derived +/// from the old one. +pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( + cx: &Cx, + source: Ty<'tcx>, + target: Ty<'tcx>, + old_info: Option, +) -> Cx::Value { + let (source, target) = cx.tcx().struct_lockstep_tails(source, target); + match (&source.sty, &target.sty) { + (&ty::Array(_, len), &ty::Slice(_)) => { + cx.const_usize(len.unwrap_usize(cx.tcx())) + } + (&ty::Dynamic(..), &ty::Dynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + old_info.expect("unsized_info: missing old info for trait upcast") + } + (_, &ty::Dynamic(ref data, ..)) => { + let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) + .field(cx, FAT_PTR_EXTRA); + cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()), + cx.backend_type(vtable_ptr)) + } + _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", + source, + target), + } +} + +/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. +pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + src: Bx::Value, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (Bx::Value, Bx::Value) { + debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(_, a, _), + &ty::Ref(_, b, _)) | + (&ty::Ref(_, a, _), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bx.cx().layout_of(src_ty); + let dst_layout = bx.cx().layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bx.cx(), i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bx.cx(), i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)), + bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true))) + } + _ => bug!("unsize_thin_ptr: called on bad types"), + } +} + +/// Coerce `src`, which is a reference to a value of type `src_ty`, +/// to a value of type `dst_ty` and store the result in `dst` +pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + src: PlaceRef<'tcx, Bx::Value>, + dst: PlaceRef<'tcx, Bx::Value> +) { + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; + let mut coerce_ptr = || { + let (base, info) = match bx.load_operand(src).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); + (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() + }; + OperandValue::Pair(base, info).store(bx, dst); + }; + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(..), &ty::Ref(..)) | + (&ty::Ref(..), &ty::RawPtr(..)) | + (&ty::RawPtr(..), &ty::RawPtr(..)) => { + coerce_ptr() + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + coerce_ptr() + } + + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() { + let src_f = src.project_field(bx, i); + let dst_f = dst.project_field(bx, i); + + if dst_f.layout.is_zst() { + continue; + } + + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align, + src_f.layout, MemFlags::empty()); + } else { + coerce_unsized_into(bx, src_f, dst_f); + } + } + } + _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", + src_ty, + dst_ty), + } +} + +pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + op: hir::BinOpKind, + lhs: Bx::Value, + rhs: Bx::Value +) -> Bx::Value { + cast_shift_rhs(bx, op, lhs, rhs) +} + +fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + op: hir::BinOpKind, + lhs: Bx::Value, + rhs: Bx::Value, +) -> Bx::Value { + // Shifts may have any size int on the rhs + if op.is_shift() { + let mut rhs_llty = bx.cx().val_ty(rhs); + let mut lhs_llty = bx.cx().val_ty(lhs); + if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.cx().element_type(rhs_llty) + } + if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.cx().element_type(lhs_llty) + } + let rhs_sz = bx.cx().int_width(rhs_llty); + let lhs_sz = bx.cx().int_width(lhs_llty); + if lhs_sz < rhs_sz { + bx.trunc(rhs, lhs_llty) + } else if lhs_sz > rhs_sz { + // FIXME (#1877: If in the future shifting by negative + // values is no longer undefined then this is wrong. + bx.zext(rhs, lhs_llty) + } else { + rhs + } + } else { + rhs + } +} + +/// Returns whether this session's target will use SEH-based unwinding. +/// +/// This is only true for MSVC targets, and even then the 64-bit MSVC target +/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as +/// 64-bit MinGW) instead of "full SEH". +pub fn wants_msvc_seh(sess: &Session) -> bool { + sess.target.target.options.is_like_msvc +} + +pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value +) { + let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); + bx.call(assume_intrinsic, &[val], None); +} + +pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value +) -> Bx::Value { + if bx.cx().val_ty(val) == bx.cx().type_i1() { + bx.zext(val, bx.cx().type_i8()) + } else { + val + } +} + +pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value, + layout: layout::TyLayout, +) -> Bx::Value { + if let layout::Abi::Scalar(ref scalar) = layout.abi { + return to_immediate_scalar(bx, val, scalar); + } + val +} + +pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + val: Bx::Value, + scalar: &layout::Scalar, +) -> Bx::Value { + if scalar.is_bool() { + return bx.trunc(val, bx.cx().type_i1()); + } + val +} + +pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + dst: Bx::Value, + dst_align: Align, + src: Bx::Value, + src_align: Align, + layout: TyLayout<'tcx>, + flags: MemFlags, +) { + let size = layout.size.bytes(); + if size == 0 { + return; + } + + bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags); +} + +pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx>, +) { + let _s = if cx.sess().codegen_stats() { + let mut instance_name = String::new(); + DefPathBasedNames::new(cx.tcx(), true, true) + .push_def_path(instance.def_id(), &mut instance_name); + Some(StatRecorder::new(cx, instance_name)) + } else { + None + }; + + // this is an info! to allow collecting monomorphization statistics + // and to allow finding the last function before LLVM aborts from + // release builds. + info!("codegen_instance({})", instance); + + let sig = instance.fn_sig(cx.tcx()); + let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + + let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(|| + bug!("Instance `{:?}` not already declared", instance)); + + cx.stats().borrow_mut().n_closures += 1; + + let mir = cx.tcx().instance_mir(instance.def); + mir::codegen_mir::(cx, lldecl, &mir, instance, sig); +} + +/// Create the `main` function which will initialize the rust runtime and call +/// users main function. +pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx +) { + let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { + Some((id, span, _)) => { + (cx.tcx().hir.local_def_id(id), span) + } + None => return, + }; + + let instance = Instance::mono(cx.tcx(), main_def_id); + + if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { + // We want to create the wrapper in the same codegen unit as Rust's main + // function. + return; + } + + let main_llfn = cx.get_fn(instance); + + let et = cx.sess().entry_fn.get().map(|e| e.2); + match et { + Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), + Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), + None => {} // Do nothing. + } + + fn create_entry_fn<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + sp: Span, + rust_main: Bx::Value, + rust_main_def_id: DefId, + use_start_lang_item: bool, + ) { + let llfty = + cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); + + let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); + // Given that `main()` has no arguments, + // then its return type cannot have + // late-bound regions, since late-bound + // regions must appear in the argument + // listing. + let main_ret_ty = cx.tcx().erase_regions( + &main_ret_ty.no_bound_vars().unwrap(), + ); + + if cx.get_defined_value("main").is_some() { + // FIXME: We should be smart and show a better diagnostic here. + cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") + .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") + .emit(); + cx.sess().abort_if_errors(); + bug!(); + } + let llfn = cx.declare_cfn("main", llfty); + + // `main` should respect same config for frame pointer elimination as rest of code + cx.set_frame_pointer_elimination(llfn); + cx.apply_target_cpu_attr(llfn); + + let mut bx = Bx::new_block(&cx, llfn, "top"); + + bx.insert_reference_to_gdb_debug_scripts_section_global(); + + // Params from native main() used as args for rust start function + let param_argc = cx.get_param(llfn, 0); + let param_argv = cx.get_param(llfn, 1); + let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); + let arg_argv = param_argv; + + let (start_fn, args) = if use_start_lang_item { + let start_def_id = cx.tcx().require_lang_item(StartFnLangItem); + let start_fn = callee::resolve_and_get_fn( + cx, + start_def_id, + cx.tcx().intern_substs(&[main_ret_ty.into()]), + ); + (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), + arg_argc, arg_argv]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![arg_argc, arg_argv]) + }; + + let result = bx.call(start_fn, &args, None); + let cast = bx.intcast(result, cx.type_int(), true); + bx.ret(cast); + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); + + +pub fn codegen_crate( + backend: B, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + rx: mpsc::Receiver> +) -> OngoingCodegen { + + check_for_rustc_errors_attr(tcx); + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); + + // Codegen the metadata. + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + + let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("metadata")).as_str() + .to_string(); + let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); + let metadata = time(tcx.sess, "write metadata", || { + backend.write_metadata(tcx, &metadata_llvm_module) + }); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); + + let metadata_module = ModuleCodegen { + name: metadata_cgu_name, + module_llvm: metadata_llvm_module, + kind: ModuleKind::Metadata, + }; + + let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; + + // Skip crate items and just output metadata in -Z no-codegen mode. + if tcx.sess.opts.debugging_opts.no_codegen || + !tcx.sess.opts.output_types.should_codegen() { + let ongoing_codegen = start_async_codegen( + backend, + tcx, + time_graph, + metadata, + rx, + 1); + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + ongoing_codegen.codegen_finished(tcx); + + assert_and_save_dep_graph(tcx); + + ongoing_codegen.check_for_errors(tcx.sess); + + return ongoing_codegen; + } + + // Run the monomorphization collector and partition the collected items into + // codegen units. + let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; + let codegen_units = (*codegen_units).clone(); + + // Force all codegen_unit queries so they are already either red or green + // when compile_codegen_unit accesses them. We are not able to re-execute + // the codegen_unit query from just the DepNode, so an unknown color would + // lead to having to re-execute compile_codegen_unit, possibly + // unnecessarily. + if tcx.dep_graph.is_fully_enabled() { + for cgu in &codegen_units { + tcx.codegen_unit(cgu.name().clone()); + } + } + + let ongoing_codegen = start_async_codegen( + backend.clone(), + tcx, + time_graph.clone(), + metadata, + rx, + codegen_units.len()); + let ongoing_codegen = AbortCodegenOnDrop::(Some(ongoing_codegen)); + + // Codegen an allocator shim, if necessary. + // + // If the crate doesn't have an `allocator_kind` set then there's definitely + // no shim to generate. Otherwise we also check our dependency graph for all + // our output crate types. If anything there looks like its a `Dynamic` + // linkage, then it's already got an allocator shim and we'll be using that + // one instead. If nothing exists then it's our job to generate the + // allocator! + let any_dynamic_crate = tcx.sess.dependency_formats.borrow() + .iter() + .any(|(_, list)| { + use rustc::middle::dependency_format::Linkage; + list.iter().any(|&linkage| linkage == Linkage::Dynamic) + }); + let allocator_module = if any_dynamic_crate { + None + } else if let Some(kind) = *tcx.sess.allocator_kind.get() { + let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("allocator")).as_str() + .to_string(); + let modules = backend.new_metadata(tcx.sess, &llmod_id); + time(tcx.sess, "write allocator module", || { + backend.codegen_allocator(tcx, &modules, kind) + }); + + Some(ModuleCodegen { + name: llmod_id, + module_llvm: modules, + kind: ModuleKind::Allocator, + }) + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); + } + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + codegen_units + }; + + let mut total_codegen_time = Duration::new(0, 0); + let mut all_stats = Stats::default(); + + for cgu in codegen_units.into_iter() { + ongoing_codegen.wait_for_signal_to_codegen_item(); + ongoing_codegen.check_for_errors(tcx.sess); + + let cgu_reuse = determine_cgu_reuse(tcx, &cgu); + tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); + + match cgu_reuse { + CguReuse::No => { + let _timing_guard = time_graph.as_ref().map(|time_graph| { + time_graph.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + &format!("codegen {}", cgu.name())) + }); + let start_time = Instant::now(); + let stats = backend.compile_codegen_unit(tcx, *cgu.name()); + all_stats.extend(stats); + total_codegen_time += start_time.elapsed(); + false + } + CguReuse::PreLto => { + submit_pre_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + CguReuse::PostLto => { + submit_post_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + }; + } + + ongoing_codegen.codegen_finished(tcx); + + // Since the main thread is sometimes blocked during codegen, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "codegen to LLVM IR", + total_codegen_time); + + ::rustc_incremental::assert_module_sources::assert_module_sources(tcx); + + symbol_names_test::report_symbol_names(tcx); + + if tcx.sess.codegen_stats() { + println!("--- codegen stats ---"); + println!("n_glues_created: {}", all_stats.n_glues_created); + println!("n_null_glues: {}", all_stats.n_null_glues); + println!("n_real_glues: {}", all_stats.n_real_glues); + + println!("n_fns: {}", all_stats.n_fns); + println!("n_inlines: {}", all_stats.n_inlines); + println!("n_closures: {}", all_stats.n_closures); + println!("fn stats:"); + all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); + for &(ref name, insns) in all_stats.fn_stats.iter() { + println!("{} insns, {}", insns, *name); + } + } + + if tcx.sess.count_llvm_insns() { + for (k, v) in all_stats.llvm_insns.iter() { + println!("{:7} {}", *v, *k); + } + } + + ongoing_codegen.check_for_errors(tcx.sess); + + assert_and_save_dep_graph(tcx); + ongoing_codegen.into_inner() +} + +/// A curious wrapper structure whose only purpose is to call `codegen_aborted` +/// when it's dropped abnormally. +/// +/// In the process of working on rust-lang/rust#55238 a mysterious segfault was +/// stumbled upon. The segfault was never reproduced locally, but it was +/// suspected to be related to the fact that codegen worker threads were +/// sticking around by the time the main thread was exiting, causing issues. +/// +/// This structure is an attempt to fix that issue where the `codegen_aborted` +/// message will block until all workers have finished. This should ensure that +/// even if the main codegen thread panics we'll wait for pending work to +/// complete before returning from the main thread, hopefully avoiding +/// segfaults. +/// +/// If you see this comment in the code, then it means that this workaround +/// worked! We may yet one day track down the mysterious cause of that +/// segfault... +struct AbortCodegenOnDrop(Option>); + +impl AbortCodegenOnDrop { + fn into_inner(mut self) -> OngoingCodegen { + self.0.take().unwrap() + } +} + +impl Deref for AbortCodegenOnDrop { + type Target = OngoingCodegen; + + fn deref(&self) -> &OngoingCodegen { + self.0.as_ref().unwrap() + } +} + +impl DerefMut for AbortCodegenOnDrop { + fn deref_mut(&mut self) -> &mut OngoingCodegen { + self.0.as_mut().unwrap() + } +} + +impl Drop for AbortCodegenOnDrop { + fn drop(&mut self) { + if let Some(codegen) = self.0.take() { + codegen.codegen_aborted(); + } + } +} + +fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { + time(tcx.sess, + "assert dep graph", + || ::rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess, + "serialize dep graph", + || ::rustc_incremental::save_dep_graph(tcx)); +} + +impl CrateInfo { + pub fn new(tcx: TyCtxt) -> CrateInfo { + let mut info = CrateInfo { + panic_runtime: None, + compiler_builtins: None, + profiler_runtime: None, + sanitizer_runtime: None, + is_no_builtins: Default::default(), + native_libraries: Default::default(), + used_libraries: tcx.native_libraries(LOCAL_CRATE), + link_args: tcx.link_args(LOCAL_CRATE), + crate_name: Default::default(), + used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), + used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), + used_crate_source: Default::default(), + wasm_imports: Default::default(), + lang_item_to_crate: Default::default(), + missing_lang_items: Default::default(), + }; + let lang_items = tcx.lang_items(); + + let load_wasm_items = tcx.sess.crate_types.borrow() + .iter() + .any(|c| *c != config::CrateType::Rlib) && + tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; + + if load_wasm_items { + info.load_wasm_imports(tcx, LOCAL_CRATE); + } + + let crates = tcx.crates(); + + let n_crates = crates.len(); + info.native_libraries.reserve(n_crates); + info.crate_name.reserve(n_crates); + info.used_crate_source.reserve(n_crates); + info.missing_lang_items.reserve(n_crates); + + for &cnum in crates.iter() { + info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); + info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); + info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); + if tcx.is_panic_runtime(cnum) { + info.panic_runtime = Some(cnum); + } + if tcx.is_compiler_builtins(cnum) { + info.compiler_builtins = Some(cnum); + } + if tcx.is_profiler_runtime(cnum) { + info.profiler_runtime = Some(cnum); + } + if tcx.is_sanitizer_runtime(cnum) { + info.sanitizer_runtime = Some(cnum); + } + if tcx.is_no_builtins(cnum) { + info.is_no_builtins.insert(cnum); + } + if load_wasm_items { + info.load_wasm_imports(tcx, cnum); + } + let missing = tcx.missing_lang_items(cnum); + for &item in missing.iter() { + if let Ok(id) = lang_items.require(item) { + info.lang_item_to_crate.insert(item, id.krate); + } + } + + // No need to look for lang items that are whitelisted and don't + // actually need to exist. + let missing = missing.iter() + .cloned() + .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) + .collect(); + info.missing_lang_items.insert(cnum, missing); + } + + return info + } + + fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { + self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { + let instance = Instance::mono(tcx, id); + let import_name = tcx.symbol_name(instance); + + (import_name.to_string(), module.clone()) + })); + } +} + +fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { + let (all_mono_items, _) = + tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all_mono_items.contains(&id) +} + +pub fn provide_both(providers: &mut Providers) { + providers.dllimport_foreign_items = |tcx, krate| { + let module_map = tcx.foreign_modules(krate); + let module_map = module_map.iter() + .map(|lib| (lib.def_id, lib)) + .collect::>(); + + let dllimports = tcx.native_libraries(krate) + .iter() + .filter(|lib| { + if lib.kind != cstore::NativeLibraryKind::NativeUnknown { + return false + } + let cfg = match lib.cfg { + Some(ref cfg) => cfg, + None => return true, + }; + attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) + }) + .filter_map(|lib| lib.foreign_module) + .map(|id| &module_map[&id]) + .flat_map(|module| module.foreign_items.iter().cloned()) + .collect(); + Lrc::new(dllimports) + }; + + providers.is_dllimport_foreign_item = |tcx, def_id| { + tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) + }; +} + +fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: &CodegenUnit<'tcx>) + -> CguReuse { + if !tcx.dep_graph.is_fully_enabled() { + return CguReuse::No + } + + let work_product_id = &cgu.work_product_id(); + if tcx.dep_graph.previous_work_product(work_product_id).is_none() { + // We don't have anything cached for this CGU. This can happen + // if the CGU did not exist in the previous session. + return CguReuse::No + } + + // Try to mark the CGU as green. If it we can do so, it means that nothing + // affecting the LLVM module has changed and we can re-use a cached version. + // If we compile with any kind of LTO, this means we can re-use the bitcode + // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only + // know that later). If we are not doing LTO, there is only one optimized + // version of each module, so we re-use that. + let dep_node = cgu.codegen_dep_node(tcx); + assert!(!tcx.dep_graph.dep_node_exists(&dep_node), + "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", + cgu.name()); + + if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { + // We can re-use either the pre- or the post-thinlto state + if tcx.sess.lto() != Lto::No { + CguReuse::PreLto + } else { + CguReuse::PostLto + } + } else { + CguReuse::No + } +} diff --git a/src/librustc_codegen_ssa/callee.rs b/src/librustc_codegen_ssa/callee.rs new file mode 100644 index 0000000000000000000000000000000000000000..5ff1d9b59923a789eb77fe20231c414aa34890ef --- /dev/null +++ b/src/librustc_codegen_ssa/callee.rs @@ -0,0 +1,46 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use traits::*; +use rustc::ty; +use rustc::ty::subst::Substs; +use rustc::hir::def_id::DefId; + +pub fn resolve_and_get_fn<'tcx, Cx: CodegenMethods<'tcx>>( + cx: &Cx, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Cx::Value { + cx.get_fn( + ty::Instance::resolve( + cx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} + +pub fn resolve_and_get_fn_for_vtable<'tcx, + Cx: Backend<'tcx> + MiscMethods<'tcx> + TypeMethods<'tcx> +>( + cx: &Cx, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Cx::Value { + cx.get_fn( + ty::Instance::resolve_for_vtable( + cx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs new file mode 100644 index 0000000000000000000000000000000000000000..6259318a3c97f2aba734bdd56bc7104224aed5a1 --- /dev/null +++ b/src/librustc_codegen_ssa/common.rs @@ -0,0 +1,230 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(non_camel_case_types, non_snake_case)] + +use rustc::ty::{self, Ty, TyCtxt}; +use syntax_pos::{DUMMY_SP, Span}; + +use rustc::hir::def_id::DefId; +use rustc::middle::lang_items::LangItem; +use base; +use traits::*; + +use rustc::hir; +use traits::BuilderMethods; + +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) +} + +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) +} + +pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) +} + +pub enum IntPredicate { + IntEQ, + IntNE, + IntUGT, + IntUGE, + IntULT, + IntULE, + IntSGT, + IntSGE, + IntSLT, + IntSLE +} + + +#[allow(dead_code)] +pub enum RealPredicate { + RealPredicateFalse, + RealOEQ, + RealOGT, + RealOGE, + RealOLT, + RealOLE, + RealONE, + RealORD, + RealUNO, + RealUEQ, + RealUGT, + RealUGE, + RealULT, + RealULE, + RealUNE, + RealPredicateTrue +} + +pub enum AtomicRmwBinOp { + AtomicXchg, + AtomicAdd, + AtomicSub, + AtomicAnd, + AtomicNand, + AtomicOr, + AtomicXor, + AtomicMax, + AtomicMin, + AtomicUMax, + AtomicUMin +} + +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic, + Unordered, + Monotonic, + // Consume, // Not specified yet. + Acquire, + Release, + AcquireRelease, + SequentiallyConsistent, +} + +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TypeKind { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPC_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, +} + +// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement +// the HashStable trait. Normally DepGraph::with_task() calls are +// hidden behind queries, but CGU creation is a special case in two +// ways: (1) it's not a query and (2) CGU are output nodes, so their +// Fingerprints are not actually needed. It remains to be clarified +// how exactly this case will be handled in the red/green system but +// for now we content ourselves with providing a no-op HashStable +// implementation for CGUs. +mod temp_stable_hash_impls { + use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, + HashStable}; + use ModuleCodegen; + + impl HashStable for ModuleCodegen { + fn hash_stable(&self, + _: &mut HCX, + _: &mut StableHasher) { + // do nothing + } + } +} + +pub fn langcall(tcx: TyCtxt, + span: Option, + msg: &str, + li: LangItem) + -> DefId { + tcx.lang_items().require(li).unwrap_or_else(|s| { + let msg = format!("{} {}", msg, s); + match span { + Some(span) => tcx.sess.span_fatal(span, &msg[..]), + None => tcx.sess.fatal(&msg[..]), + } + }) +} + +// To avoid UB from LLVM, these two functions mask RHS with an +// appropriate mask unconditionally (i.e. the fallback behavior for +// all shifts). For 32- and 64-bit types, this matches the semantics +// of Java. (See related discussion on #1877 and #10183.) + +pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + lhs: Bx::Value, + rhs: Bx::Value +) -> Bx::Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + bx.shl(lhs, rhs) +} + +pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + lhs_t: Ty<'tcx>, + lhs: Bx::Value, + rhs: Bx::Value +) -> Bx::Value { + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + let is_signed = lhs_t.is_signed(); + if is_signed { + bx.ashr(lhs, rhs) + } else { + bx.lshr(lhs, rhs) + } +} + +fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + rhs: Bx::Value +) -> Bx::Value { + let rhs_llty = bx.cx().val_ty(rhs); + let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false); + bx.and(rhs, shift_val) +} + +pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + llty: Bx::Type, + mask_llty: Bx::Type, + invert: bool +) -> Bx::Value { + let kind = bx.cx().type_kind(llty); + match kind { + TypeKind::Integer => { + // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. + let val = bx.cx().int_width(llty) - 1; + if invert { + bx.cx().const_int(mask_llty, !val as i64) + } else { + bx.cx().const_uint(mask_llty, val) + } + }, + TypeKind::Vector => { + let mask = shift_mask_val( + bx, + bx.cx().element_type(llty), + bx.cx().element_type(mask_llty), + invert + ); + bx.vector_splat(bx.cx().vector_length(mask_llty), mask) + }, + _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), + } +} diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs new file mode 100644 index 0000000000000000000000000000000000000000..0fc61422bb3a20beba657623d8df9d2ade68d64a --- /dev/null +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -0,0 +1,92 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax_pos::{BytePos, Span}; +use rustc::hir::def_id::CrateNum; +use std::cell::Cell; + +pub enum FunctionDebugContext { + RegularContext(FunctionDebugContextData), + DebugInfoDisabled, + FunctionWithoutDebugInfo, +} + +impl FunctionDebugContext { + pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData { + match *self { + FunctionDebugContext::RegularContext(ref data) => data, + FunctionDebugContext::DebugInfoDisabled => { + span_bug!(span, "{}", FunctionDebugContext::::debuginfo_disabled_message()); + } + FunctionDebugContext::FunctionWithoutDebugInfo => { + span_bug!(span, "{}", FunctionDebugContext::::should_be_ignored_message()); + } + } + } + + fn debuginfo_disabled_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" + } + + fn should_be_ignored_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext for function that should be \ + ignored by debug info!" + } +} + +/// Enables emitting source locations for the given functions. +/// +/// Since we don't want source locations to be emitted for the function prelude, +/// they are disabled when beginning to codegen a new function. This functions +/// switches source location emitting on and must therefore be called before the +/// first real statement/expression of the function is codegened. +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { + match *dbg_context { + FunctionDebugContext::RegularContext(ref data) => { + data.source_locations_enabled.set(true) + }, + _ => { /* safe to ignore */ } + } +} + +pub struct FunctionDebugContextData { + pub fn_metadata: D, + pub source_locations_enabled: Cell, + pub defining_crate: CrateNum, +} + +pub enum VariableAccess<'a, V> { + // The llptr given is an alloca containing the variable's value + DirectVariable { alloca: V }, + // The llptr given is an alloca containing the start of some pointer chain + // leading to the variable's content. + IndirectVariable { alloca: V, address_operations: &'a [i64] } +} + +pub enum VariableKind { + ArgumentVariable(usize /*index*/), + LocalVariable, +} + + +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: Option, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_none() + } +} diff --git a/src/librustc_codegen_ssa/diagnostics.rs b/src/librustc_codegen_ssa/diagnostics.rs new file mode 100644 index 0000000000000000000000000000000000000000..abe19068889c1008153b090b2a982e1af6d74943 --- /dev/null +++ b/src/librustc_codegen_ssa/diagnostics.rs @@ -0,0 +1,48 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +register_long_diagnostics! { + +E0668: r##" +Malformed inline assembly rejected by LLVM. + +LLVM checks the validity of the constraints and the assembly string passed to +it. This error implies that LLVM seems something wrong with the inline +assembly call. + +In particular, it can happen if you forgot the closing bracket of a register +constraint (see issue #51430): +```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) +#![feature(asm)] + +fn main() { + let rax: u64; + unsafe { + asm!("" :"={rax"(rax)); + println!("Accumulator is: {}", rax); + } +} +``` +"##, + +E0669: r##" +Cannot convert inline assembly operand to a single LLVM value. + +This error usually happens when trying to pass in a value to an input inline +assembly operand that is actually a pair of values. In particular, this can +happen when trying to pass in a slice, for instance a `&str`. In Rust, these +values are represented internally as a pair of values, the pointer and its +length. When passed as an input operand, this pair of values can not be +coerced into a register and thus we must fail with an error. +"##, + +} diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_ssa/glue.rs similarity index 71% rename from src/librustc_codegen_llvm/glue.rs rename to src/librustc_codegen_ssa/glue.rs index 842bdf3cb493ff1b01772c5d4026c8388ae440e3..515f36b5c65deb6e35d0757afeb9fbb76aeceb43 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -14,24 +14,25 @@ use std; -use builder::Builder; -use common::*; -use llvm; +use common::IntPredicate; use meth; use rustc::ty::layout::LayoutOf; use rustc::ty::{self, Ty}; -use value::Value; +use traits::*; -pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Option<&'ll Value>) - -> (&'ll Value, &'ll Value) { +pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + t: Ty<'tcx>, + info: Option +) -> (Bx::Value, Bx::Value) { debug!("calculate size of DST: {}; with lost info: {:?}", t, info); - if bx.cx.type_is_sized(t) { - let (size, align) = bx.cx.size_and_align_of(t); + if bx.cx().type_is_sized(t) { + let (size, align) = bx.cx().layout_of(t).size_and_align(); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); - let size = C_usize(bx.cx, size.bytes()); - let align = C_usize(bx.cx, align.abi()); + let size = bx.cx().const_usize(size.bytes()); + let align = bx.cx().const_usize(align.abi()); return (size, align); } match t.sty { @@ -44,17 +45,16 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt let unit = t.sequence_element_type(bx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let (size, align) = bx.cx.size_and_align_of(unit); - (bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())), - C_usize(bx.cx, align.abi())) + let (size, align) = bx.cx().layout_of(unit).size_and_align(); + (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())), + bx.cx().const_usize(align.abi())) } _ => { - let cx = bx.cx; // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we // want to avoid, as the unsized field's alignment could be smaller. assert!(!t.is_simd()); - let layout = cx.layout_of(t); + let layout = bx.cx().layout_of(t); debug!("DST {} layout: {:?}", t, layout); let i = layout.fields.count() - 1; @@ -62,12 +62,12 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = C_usize(cx, sized_size); - let sized_align = C_usize(cx, sized_align); + let sized_size = bx.cx().const_usize(sized_size); + let sized_align = bx.cx().const_usize(sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). - let field_ty = layout.field(cx, i).ty; + let field_ty = layout.field(bx.cx(), i).ty; let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info); // FIXME (#26403, #27023): We should be adding padding @@ -89,16 +89,17 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = match (const_to_opt_u128(sized_align, false), - const_to_opt_u128(unsized_align, false)) { + let align = match (bx.cx().const_to_opt_u128(sized_align, false), + bx.cx().const_to_opt_u128(unsized_align, false)) { (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) + bx.cx().const_usize(std::cmp::max(sized_align, unsized_align) as u64) + } + _ => { + let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align); + bx.select(cmp, sized_align, unsized_align) } - _ => bx.select(bx.icmp(llvm::IntUGT, sized_align, unsized_align), - sized_align, - unsized_align) }; // Issue #27023: must add any necessary padding to `size` @@ -111,9 +112,11 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt // emulated via the semi-standard fast bit trick: // // `(size + (align-1)) & -align` - - let addend = bx.sub(align, C_usize(bx.cx, 1)); - let size = bx.and(bx.add(size, addend), bx.neg(align)); + let one = bx.cx().const_usize(1); + let addend = bx.sub(align, one); + let add = bx.add(size, addend); + let neg = bx.neg(align); + let size = bx.and(add, neg); (size, align) } diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..24ede4db6e3df0bc7402cb219b86fac138644894 --- /dev/null +++ b/src/librustc_codegen_ssa/lib.rs @@ -0,0 +1,187 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(box_patterns)] +#![feature(box_syntax)] +#![feature(custom_attribute)] +#![feature(libc)] +#![feature(rustc_diagnostic_macros)] +#![feature(in_band_lifetimes)] +#![feature(slice_sort_by_cached_key)] +#![feature(nll)] +#![allow(unused_attributes)] +#![allow(dead_code)] +#![feature(quote)] + +//! This crate contains codegen code that is used by all codegen backends (LLVM and others). +//! The backend-agnostic functions of this crate use functions defined in various traits that +//! have to be implemented by each backends. + +#[macro_use] extern crate bitflags; +#[macro_use] extern crate log; +extern crate rustc_apfloat; +#[macro_use] extern crate rustc; +extern crate rustc_target; +extern crate rustc_mir; +#[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_incremental; +extern crate rustc_codegen_utils; +extern crate rustc_data_structures; +extern crate rustc_allocator; +extern crate rustc_fs_util; +extern crate serialize; +extern crate rustc_errors; +extern crate rustc_demangle; +extern crate cc; +extern crate libc; +extern crate jobserver; +extern crate memmap; +extern crate num_cpus; + +use std::path::PathBuf; +use rustc::dep_graph::WorkProduct; +use rustc::session::config::{OutputFilenames, OutputType}; +use rustc::middle::lang_items::LangItem; +use rustc::hir::def_id::CrateNum; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync::Lrc; +use rustc_data_structures::svh::Svh; +use rustc::middle::cstore::{LibSource, CrateSource, NativeLibrary}; +use syntax_pos::symbol::Symbol; + +// NB: This module needs to be declared first so diagnostics are +// registered before they are used. +mod diagnostics; + +pub mod common; +pub mod traits; +pub mod mir; +pub mod debuginfo; +pub mod base; +pub mod callee; +pub mod glue; +pub mod meth; +pub mod mono_item; +pub mod back; + +pub struct ModuleCodegen { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + /// We currently generate these names via CodegenUnit::build_cgu_name(). + pub name: String, + pub module_llvm: M, + pub kind: ModuleKind, +} + +pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z"; + +impl ModuleCodegen { + pub fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + emit_bc_compressed: bool, + outputs: &OutputFilenames) -> CompiledModule { + let object = if emit_obj { + Some(outputs.temp_path(OutputType::Object, Some(&self.name))) + } else { + None + }; + let bytecode = if emit_bc { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) + } else { + None + }; + let bytecode_compressed = if emit_bc_compressed { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) + .with_extension(RLIB_BYTECODE_EXTENSION)) + } else { + None + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + object, + bytecode, + bytecode_compressed, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub object: Option, + pub bytecode: Option, + pub bytecode_compressed: Option, +} + +pub struct CachedModuleCodegen { + pub name: String, + pub source: WorkProduct, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + +bitflags! { + pub struct MemFlags: u8 { + const VOLATILE = 1 << 0; + const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; + } +} + +/// Misc info we load from metadata to persist beyond the tcx +pub struct CrateInfo { + pub panic_runtime: Option, + pub compiler_builtins: Option, + pub profiler_runtime: Option, + pub sanitizer_runtime: Option, + pub is_no_builtins: FxHashSet, + pub native_libraries: FxHashMap>>, + pub crate_name: FxHashMap, + pub used_libraries: Lrc>, + pub link_args: Lrc>, + pub used_crate_source: FxHashMap>, + pub used_crates_static: Vec<(CrateNum, LibSource)>, + pub used_crates_dynamic: Vec<(CrateNum, LibSource)>, + pub wasm_imports: FxHashMap, + pub lang_item_to_crate: FxHashMap, + pub missing_lang_items: FxHashMap>, +} + + +pub struct CodegenResults { + pub crate_name: Symbol, + pub modules: Vec, + pub allocator_module: Option, + pub metadata_module: CompiledModule, + pub crate_hash: Svh, + pub metadata: rustc::middle::cstore::EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: back::linker::LinkerInfo, + pub crate_info: CrateInfo, +} + +__build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_ssa/meth.rs similarity index 63% rename from src/librustc_codegen_llvm/meth.rs rename to src/librustc_codegen_ssa/meth.rs index 0dc5a4ddde82c538bc820518cff99dfb18b3f5cd..06c4f7a87d88017de8337a6c123700da70ea0950 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -8,18 +8,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, FnTypeExt}; +use rustc_target::abi::call::FnType; use callee; -use common::*; -use builder::Builder; -use consts; -use monomorphize; -use type_::Type; -use value::Value; +use rustc_mir::monomorphize; + +use traits::*; use rustc::ty::{self, Ty}; -use rustc::ty::layout::HasDataLayout; -use debuginfo; #[derive(Copy, Clone, Debug)] pub struct VirtualIndex(u64); @@ -28,33 +23,45 @@ pub const SIZE: VirtualIndex = VirtualIndex(1); pub const ALIGN: VirtualIndex = VirtualIndex(2); -impl<'a, 'tcx> VirtualIndex { +impl<'a, 'tcx: 'a> VirtualIndex { pub fn from_index(index: usize) -> Self { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>, - llvtable: &'ll Value, - fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value { + pub fn get_fn>( + self, + bx: &mut Bx, + llvtable: Bx::Value, + fn_ty: &FnType<'tcx, Ty<'tcx>> + ) -> Bx::Value { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx).ptr_to()); + let llvtable = bx.pointercast( + llvtable, + bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty)) + ); let ptr_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align); + let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); + let ptr = bx.load(gep, ptr_align); bx.nonnull_metadata(ptr); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr } - pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'ll Value { + pub fn get_usize>( + self, + bx: &mut Bx, + llvtable: Bx::Value + ) -> Bx::Value { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to()); + let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize())); let usize_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align); + let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); + let ptr = bx.load(gep, usize_align); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr @@ -69,22 +76,22 @@ pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'l /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. -pub fn get_vtable( - cx: &CodegenCx<'ll, 'tcx>, +pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( + cx: &Cx, ty: Ty<'tcx>, trait_ref: ty::PolyExistentialTraitRef<'tcx>, -) -> &'ll Value { - let tcx = cx.tcx; +) -> Cx::Value { + let tcx = cx.tcx(); debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); // Check the cache. - if let Some(&val) = cx.vtables.borrow().get(&(ty, trait_ref)) { + if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) { return val; } // Not in the cache. Build it. - let nullptr = C_null(Type::i8p(cx)); + let nullptr = cx.const_null(cx.type_i8p()); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { @@ -93,23 +100,23 @@ pub fn get_vtable( }) }); - let (size, align) = cx.size_and_align_of(ty); + let (size, align) = cx.layout_of(ty).size_and_align(); // ///////////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_mir/interpret/traits.rs // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ - callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), - C_usize(cx, size.bytes()), - C_usize(cx, align.abi()) + cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)), + cx.const_usize(size.bytes()), + cx.const_usize(align.abi()) ].iter().cloned().chain(methods).collect(); - let vtable_const = C_struct(cx, &components, false); + let vtable_const = cx.const_struct(&components, false); let align = cx.data_layout().pointer_align; - let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); + let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); - debuginfo::create_vtable_metadata(cx, ty, vtable); + cx.create_vtable_metadata(ty, vtable); - cx.vtables.borrow_mut().insert((ty, trait_ref), vtable); + cx.vtables().borrow_mut().insert((ty, trait_ref), vtable); vtable } diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs similarity index 93% rename from src/librustc_codegen_llvm/mir/analyze.rs rename to src/librustc_codegen_ssa/mir/analyze.rs index 2af772bd7ce22abaf9f3229c97c56c674324588d..c7e2c76c3e503b82ddd288edad10241adfdaed50 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_ssa/mir/analyze.rs @@ -18,11 +18,13 @@ use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext}; use rustc::mir::traversal; use rustc::ty; -use rustc::ty::layout::LayoutOf; -use type_of::LayoutLlvmExt; +use rustc::ty::layout::{LayoutOf, HasTyCtxt}; use super::FunctionCx; +use traits::*; -pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { +pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + fx: &FunctionCx<'a, 'tcx, Bx> +) -> BitSet { let mir = fx.mir; let mut analyzer = LocalAnalyzer::new(fx); @@ -32,10 +34,10 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { let ty = fx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); let layout = fx.cx.layout_of(ty); - if layout.is_llvm_immediate() { + if fx.cx.is_backend_immediate(layout) { // These sorts of types are immediates that we can store // in an Value without an alloca. - } else if layout.is_llvm_scalar_pair() { + } else if fx.cx.is_backend_scalar_pair(layout) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -51,8 +53,8 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { analyzer.non_ssa_locals } -struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { - fx: &'mir FunctionCx<'a, 'll, 'tcx>, +struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { + fx: &'mir FunctionCx<'a, 'tcx, Bx>, dominators: Dominators, non_ssa_locals: BitSet, // The location of the first visited direct assignment to each @@ -60,8 +62,8 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { first_assignment: IndexVec } -impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { - fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx>) -> Self { +impl> LocalAnalyzer<'mir, 'a, 'tcx, Bx> { + fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self { let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); let mut analyzer = LocalAnalyzer { @@ -102,7 +104,8 @@ fn assign(&mut self, local: mir::Local, location: Location) { } } -impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { +impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> + for LocalAnalyzer<'mir, 'a, 'tcx, Bx> { fn visit_assign(&mut self, block: mir::BasicBlock, place: &mir::Place<'tcx>, @@ -141,7 +144,7 @@ fn visit_terminator_kind(&mut self, _ => None, }; if let Some((def_id, args)) = check { - if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() { + if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -173,21 +176,21 @@ fn visit_place(&mut self, _ => false }; if is_consume { - let base_ty = proj.base.ty(self.fx.mir, cx.tcx); + let base_ty = proj.base.ty(self.fx.mir, cx.tcx()); let base_ty = self.fx.monomorphize(&base_ty); // ZSTs don't require any actual memory access. let elem_ty = base_ty - .projection_ty(cx.tcx, &proj.elem) - .to_ty(cx.tcx); + .projection_ty(cx.tcx(), &proj.elem) + .to_ty(cx.tcx()); let elem_ty = self.fx.monomorphize(&elem_ty); if cx.layout_of(elem_ty).is_zst() { return; } if let mir::ProjectionElem::Field(..) = proj.elem { - let layout = cx.layout_of(base_ty.to_ty(cx.tcx)); - if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { + let layout = cx.layout_of(base_ty.to_ty(cx.tcx())); + if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) { // Recurse with the same context, instead of `Projection`, // potentially stopping at non-operand projections, // which would trigger `not_ssa` on locals. @@ -252,8 +255,8 @@ fn visit_local(&mut self, } PlaceContext::MutatingUse(MutatingUseContext::Drop) => { - let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx); - let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx)); + let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx()); + let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx())); // Only need the place if we're actually dropping it. if self.fx.cx.type_needs_drop(ty) { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs similarity index 73% rename from src/librustc_codegen_llvm/mir/block.rs rename to src/librustc_codegen_ssa/mir/block.rs index 3f9921a5cf930b68372a3584a17f6b725727d6f4..1702ad19b76fd04e22c8f9e98f778f21c75953f8 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -8,23 +8,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, BasicBlock}; use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; -use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; +use rustc_target::abi::call::{ArgType, FnType, PassMode}; +use rustc_target::spec::abi::Abi; use base; -use callee; -use builder::{Builder, MemFlags}; -use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef}; -use consts; +use MemFlags; +use common::{self, IntPredicate}; use meth; -use monomorphize; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; +use rustc_mir::monomorphize; + +use traits::*; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -34,8 +31,11 @@ use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_block(&mut self, bb: mir::BasicBlock) { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_block( + &mut self, + bb: mir::BasicBlock, + ) { let mut bx = self.build_block(bb); let data = &self.mir[bb]; @@ -48,21 +48,33 @@ pub fn codegen_block(&mut self, bb: mir::BasicBlock) { self.codegen_terminator(bx, bb, data.terminator()); } - fn codegen_terminator(&mut self, - mut bx: Builder<'a, 'll, 'tcx>, - bb: mir::BasicBlock, - terminator: &mir::Terminator<'tcx>) - { + fn codegen_terminator( + &mut self, + mut bx: Bx, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx> + ) { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. - let tcx = bx.tcx(); + let tcx = self.cx.tcx(); let span = terminator.source_info.span; let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); - let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref()); - let cleanup_pad = funclet.map(|lp| lp.cleanuppad()); - let cleanup_bundle = funclet.map(|l| l.bundle()); + // HACK(eddyb) force the right lifetimes, NLL can't figure them out. + fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + funclet_bb: Option + ) -> impl for<'b> Fn( + &'b FunctionCx<'a, 'tcx, Bx>, + ) -> Option<&'b Bx::Funclet> { + move |this| { + match funclet_bb { + Some(funclet_bb) => this.funclets[funclet_bb].as_ref(), + None => None, + } + } + } + let funclet = funclet_closure_factory(funclet_bb); let lltarget = |this: &mut Self, target: mir::BasicBlock| { let lltarget = this.blocks[target]; @@ -90,32 +102,33 @@ fn codegen_terminator(&mut self, debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.new_block(name); - trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + let mut trampoline = this.new_block(name); + trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget)); trampoline.llbb() } else { lltarget } }; - let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| { - let (lltarget, is_cleanupret) = lltarget(this, target); - if is_cleanupret { - // micro-optimization: generate a `ret` rather than a jump - // to a trampoline. - bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); - } else { - bx.br(lltarget); - } - }; + let funclet_br = + |this: &mut Self, bx: &mut Bx, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(funclet(this).unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + }; let do_call = | this: &mut Self, - bx: Builder<'a, 'll, 'tcx>, + bx: &mut Bx, fn_ty: FnType<'tcx, Ty<'tcx>>, - fn_ptr: &'ll Value, - llargs: &[&'ll Value], - destination: Option<(ReturnDest<'ll, 'tcx>, mir::BasicBlock)>, + fn_ptr: Bx::Value, + llargs: &[Bx::Value], + destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { @@ -128,27 +141,27 @@ fn codegen_terminator(&mut self, &llargs, ret_bx, llblock(this, cleanup), - cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, invokeret); + funclet(this)); + bx.apply_attrs_callsite(&fn_ty, invokeret); if let Some((ret_dest, target)) = destination { - let ret_bx = this.build_block(target); - this.set_debug_loc(&ret_bx, terminator.source_info); - this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret); + let mut ret_bx = this.build_block(target); + this.set_debug_loc(&mut ret_bx, terminator.source_info); + this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); } } else { - let llret = bx.call(fn_ptr, &llargs, cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, llret); + let llret = bx.call(fn_ptr, &llargs, funclet(this)); + bx.apply_attrs_callsite(&fn_ty, llret); if this.mir[bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested // struct, there are "symmetry" issues that cause // exponential inlining - see issue #41696. - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + bx.do_not_inline(llret); } if let Some((ret_dest, target)) = destination { - this.store_return(&bx, ret_dest, &fn_ty.ret, llret); + this.store_return(bx, ret_dest, &fn_ty.ret, llret); funclet_br(this, bx, target); } else { bx.unreachable(); @@ -156,24 +169,26 @@ fn codegen_terminator(&mut self, } }; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { - if let Some(cleanup_pad) = cleanup_pad { - bx.cleanup_ret(cleanup_pad, None); + if let Some(funclet) = funclet(self) { + bx.cleanup_ret(funclet, None); } else { - let slot = self.get_personality_slot(&bx); - let lp0 = slot.project_field(&bx, 0).load(&bx).immediate(); - let lp1 = slot.project_field(&bx, 1).load(&bx).immediate(); - slot.storage_dead(&bx); - - if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = C_undef(self.landing_pad_type()); + let slot = self.get_personality_slot(&mut bx); + let lp0 = slot.project_field(&mut bx, 0); + let lp0 = bx.load_operand(lp0).immediate(); + let lp1 = slot.project_field(&mut bx, 1); + let lp1 = bx.load_operand(lp1).immediate(); + slot.storage_dead(&mut bx); + + if !bx.cx().sess().target.target.options.custom_unwind_resume { + let mut lp = bx.cx().const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); } else { - bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle); + bx.call(bx.cx().eh_unwind_resume(), &[lp0], funclet(self)); bx.unreachable(); } } @@ -181,17 +196,17 @@ fn codegen_terminator(&mut self, mir::TerminatorKind::Abort => { // Call core::intrinsics::abort() - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); bx.unreachable(); } mir::TerminatorKind::Goto { target } => { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { - let discr = self.codegen_operand(&bx, discr); + let discr = self.codegen_operand(&mut bx, discr); if targets.len() == 2 { // If there are two targets, emit br instead of switch let lltrue = llblock(self, targets[0]); @@ -205,9 +220,11 @@ fn codegen_terminator(&mut self, bx.cond_br(discr.immediate(), lltrue, llfalse); } } else { - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); - let llval = C_uint_big(switch_llty, values[0]); - let cmp = bx.icmp(llvm::IntEQ, discr.immediate(), llval); + let switch_llty = bx.cx().immediate_backend_type( + bx.cx().layout_of(switch_ty) + ); + let llval = bx.cx().const_uint_big(switch_llty, values[0]); + let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } } else { @@ -215,9 +232,11 @@ fn codegen_terminator(&mut self, let switch = bx.switch(discr.immediate(), llblock(self, *otherwise), values.len()); - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + let switch_llty = bx.cx().immediate_backend_type( + bx.cx().layout_of(switch_ty) + ); for (&value, target) in values.iter().zip(targets) { - let llval = C_uint_big(switch_llty, value); + let llval = bx.cx().const_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -232,11 +251,12 @@ fn codegen_terminator(&mut self, } PassMode::Direct(_) | PassMode::Pair(..) => { - let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE)); + let op = + self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE)); if let Ref(llval, _, align) = op.val { bx.load(llval, align) } else { - op.immediate_or_packed_pair(&bx) + op.immediate_or_packed_pair(&mut bx) } } @@ -254,8 +274,9 @@ fn codegen_terminator(&mut self, }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret"); - op.val.store(&bx, scratch); + let scratch = + PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + op.val.store(&mut bx, scratch); scratch.llval } Ref(llval, _, align) => { @@ -264,9 +285,10 @@ fn codegen_terminator(&mut self, llval } }; - bx.load( - bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()), - self.fn_ty.ret.layout.align) + let addr = bx.pointercast(llslot, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&cast_ty) + )); + bx.load(addr, self.fn_ty.ret.layout.align) } }; bx.ret(llval); @@ -279,15 +301,15 @@ fn codegen_terminator(&mut self, mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty); + let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return } - let place = self.codegen_place(&bx, location); + let place = self.codegen_place(&mut bx, location); let (args1, args2); let mut args = if let Some(llextra) = place.llextra { args2 = [place.llval, llextra]; @@ -298,29 +320,29 @@ fn codegen_terminator(&mut self, }; let (drop_fn, fn_ty) = match ty.sty { ty::Dynamic(..) => { - let sig = drop_fn.fn_sig(bx.cx.tcx); - let sig = bx.tcx().normalize_erasing_late_bound_regions( + let sig = drop_fn.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - let fn_ty = FnType::new_vtable(bx.cx, sig, &[]); + let fn_ty = bx.cx().new_vtable(sig, &[]); let vtable = args[1]; args = &args[..1]; - (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) + (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) } _ => { - (callee::get_fn(bx.cx, drop_fn), - FnType::of_instance(bx.cx, &drop_fn)) + (bx.cx().get_fn(drop_fn), + bx.cx().fn_type_of_instance(&drop_fn)) } }; - do_call(self, bx, fn_ty, drop_fn, args, + do_call(self, &mut bx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond = self.codegen_operand(&bx, cond).immediate(); - let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); + let cond = self.codegen_operand(&mut bx, cond).immediate(); + let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from @@ -329,7 +351,7 @@ fn codegen_terminator(&mut self, // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow() { if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { const_cond = Some(expected); } @@ -337,13 +359,13 @@ fn codegen_terminator(&mut self, // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return; } // Pass the condition through llvm.expect for branch hinting. - let expect = bx.cx.get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None); + let expect = bx.cx().get_intrinsic(&"llvm.expect.i1"); + let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); @@ -356,14 +378,14 @@ fn codegen_terminator(&mut self, // After this point, bx is the block for the call to panic. bx = panic_block; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); // Get the location information. - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = bx.cx().const_str_slice(filename); + let line = bx.cx().const_u32(loc.line as u32); + let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -374,25 +396,28 @@ fn codegen_terminator(&mut self, let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = C_struct(bx.cx, &[filename, line, col], false); - let file_line_col = consts::addr_of(bx.cx, - file_line_col, - align, - Some("panic_bounds_check_loc")); + let file_line_col = bx.cx().const_struct(&[filename, line, col], false); + let file_line_col = bx.cx().static_addr_of( + file_line_col, + align, + Some("panic_bounds_check_loc") + ); (lang_items::PanicBoundsCheckFnLangItem, vec![file_line_col, index, len]) } _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.cx().const_str_slice(msg_str); + let msg_file_line_col = bx.cx().const_struct( + &[msg_str, filename, line, col], + false + ); + let msg_file_line_col = bx.cx().static_addr_of( + msg_file_line_col, + align, + Some("panic_loc") + ); (lang_items::PanicFnLangItem, vec![msg_file_line_col]) } @@ -401,11 +426,11 @@ fn codegen_terminator(&mut self, // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.cx().fn_type_of_instance(&instance); + let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. - do_call(self, bx, fn_ty, llfn, &args, None, cleanup); + do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { @@ -420,11 +445,11 @@ fn codegen_terminator(&mut self, from_hir_call: _ } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.codegen_operand(&bx, func); + let callee = self.codegen_operand(&mut bx, func); let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { - (Some(ty::Instance::resolve(bx.cx.tcx, + (Some(ty::Instance::resolve(bx.cx().tcx(), ty::ParamEnv::reveal_all(), def_id, substs).unwrap()), @@ -454,8 +479,8 @@ fn codegen_terminator(&mut self, if intrinsic == Some("transmute") { if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; - self.codegen_transmute(&bx, &args[0], dest); - funclet_br(self, bx, target); + self.codegen_transmute(&mut bx, &args[0], dest); + funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -463,7 +488,7 @@ fn codegen_terminator(&mut self, // we can do what we like. Here, we declare that transmuting // into an uninhabited type is impossible, so anything following // it must be unreachable. - assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); + assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited); bx.unreachable(); } return; @@ -477,26 +502,26 @@ fn codegen_terminator(&mut self, let fn_ty = match def { Some(ty::InstanceDef::Virtual(..)) => { - FnType::new_vtable(bx.cx, sig, &extra_args) + bx.cx().new_vtable(sig, &extra_args) } Some(ty::InstanceDef::DropGlue(_, None)) => { // empty drop glue - a nop. let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return; } - _ => FnType::new(bx.cx, sig, &extra_args) + _ => bx.cx().new_fn_type(sig, &extra_args) }; // emit a panic instead of instantiating an uninhabited type if (intrinsic == Some("init") || intrinsic == Some("uninit")) && fn_ty.ret.layout.abi.is_uninhabited() { - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = bx.cx().const_str_slice(filename); + let line = bx.cx().const_u32(loc.line as u32); + let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -507,26 +532,28 @@ fn codegen_terminator(&mut self, if intrinsic == Some("init") { "zeroed" } else { "uninitialized" } ); let msg_str = Symbol::intern(&str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.cx().const_str_slice(msg_str); + let msg_file_line_col = bx.cx().const_struct( + &[msg_str, filename, line, col], + false, + ); + let msg_file_line_col = bx.cx().static_addr_of( + msg_file_line_col, + align, + Some("panic_loc"), + ); // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.cx().fn_type_of_instance(&instance); + let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. do_call( self, - bx, + &mut bx, fn_ty, llfn, &[msg_file_line_col], @@ -543,19 +570,17 @@ fn codegen_terminator(&mut self, // Prepare the return value destination let ret_dest = if let Some((ref dest, _)) = *destination { let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs, + self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, is_intrinsic) } else { ReturnDest::Nothing }; if intrinsic.is_some() && intrinsic != Some("drop_in_place") { - use intrinsic::codegen_intrinsic_call; - let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) + bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -589,7 +614,7 @@ fn codegen_terminator(&mut self, ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty), + layout: bx.cx().layout_of(ty), }; }, @@ -607,26 +632,26 @@ fn codegen_terminator(&mut self, ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty) + layout: bx.cx().layout_of(ty) }; } } } - self.codegen_operand(&bx, arg) + self.codegen_operand(&mut bx, arg) }).collect(); - let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx); - codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest, - terminator.source_info.span); + let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx()); + bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, + terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval); + self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); } else { bx.unreachable(); } @@ -643,7 +668,7 @@ fn codegen_terminator(&mut self, }; 'make_args: for (i, arg) in first_args.iter().enumerate() { - let mut op = self.codegen_operand(&bx, arg); + let mut op = self.codegen_operand(&mut bx, arg); if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { if let Pair(..) = op.val { @@ -657,7 +682,7 @@ fn codegen_terminator(&mut self, && !op.layout.ty.is_region_ptr() { 'iter_fields: for i in 0..op.layout.fields.count() { - let field = op.extract_field(&bx, i); + let field = op.extract_field(&mut bx, i); if !field.layout.is_zst() { // we found the one non-zero-sized field that is allowed // now find *its* non-zero-sized field, or stop if it's a @@ -676,7 +701,7 @@ fn codegen_terminator(&mut self, match op.val { Pair(data_ptr, meta) => { llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&bx, meta, &fn_ty)); + .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue 'make_args } @@ -685,7 +710,7 @@ fn codegen_terminator(&mut self, } else if let Ref(data_ptr, Some(meta), _) = op.val { // by-value dynamic dispatch llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&bx, meta, &fn_ty)); + .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue; } else { @@ -698,27 +723,27 @@ fn codegen_terminator(&mut self, match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&bx, op.layout, "const"); - op.val.store(&bx, tmp); + let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } _ => {} } - self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]); + self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { - self.codegen_arguments_untupled(&bx, tup, &mut llargs, + self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, &fn_ty.args[first_args.len()..]) } let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, - (None, Some(instance)) => callee::get_fn(bx.cx, instance), + (None, Some(instance)) => bx.cx().get_fn(instance), _ => span_bug!(span, "no llfn for call"), }; - do_call(self, bx, fn_ty, fn_ptr, &llargs, + do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } @@ -729,14 +754,16 @@ fn codegen_terminator(&mut self, } } - fn codegen_argument(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - op: OperandRef<'ll, 'tcx>, - llargs: &mut Vec<&'ll Value>, - arg: &ArgType<'tcx, Ty<'tcx>>) { + fn codegen_argument( + &mut self, + bx: &mut Bx, + op: OperandRef<'tcx, Bx::Value>, + llargs: &mut Vec, + arg: &ArgType<'tcx, Ty<'tcx>> + ) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty.llvm_type(bx.cx))); + llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty))) } if arg.is_ignore() { @@ -796,8 +823,10 @@ fn codegen_argument(&mut self, if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()), - align.min(arg.layout.align)); + let addr = bx.pointercast(llval, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&ty)) + ); + llval = bx.load(addr, align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -818,11 +847,13 @@ fn codegen_argument(&mut self, llargs.push(llval); } - fn codegen_arguments_untupled(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - operand: &mir::Operand<'tcx>, - llargs: &mut Vec<&'ll Value>, - args: &[ArgType<'tcx, Ty<'tcx>>]) { + fn codegen_arguments_untupled( + &mut self, + bx: &mut Bx, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + args: &[ArgType<'tcx, Ty<'tcx>>] + ) { let tuple = self.codegen_operand(bx, operand); // Handle both by-ref and immediate tuples. @@ -830,7 +861,8 @@ fn codegen_arguments_untupled(&mut self, let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); - self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]); + let field = bx.load_operand(field_ptr); + self.codegen_argument(bx, field, llargs, &args[i]); } } else if let Ref(_, Some(_), _) = tuple.val { bug!("closure arguments must be sized") @@ -843,14 +875,17 @@ fn codegen_arguments_untupled(&mut self, } } - fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, 'tcx> { - let cx = bx.cx; + fn get_personality_slot( + &mut self, + bx: &mut Bx + ) -> PlaceRef<'tcx, Bx::Value> { + let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of(cx.tcx.intern_tup(&[ - cx.tcx.mk_mut_ptr(cx.tcx.types.u8), - cx.tcx.types.i32 + let layout = cx.layout_of(cx.tcx().intern_tup(&[ + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + cx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); @@ -861,7 +896,10 @@ fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock { + fn landing_pad_to( + &mut self, + target_bb: mir::BasicBlock + ) -> Bx::BasicBlock { if let Some(block) = self.landing_pads[target_bb] { return block; } @@ -872,54 +910,65 @@ fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock { landing_pad } - fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBlock { + fn landing_pad_uncached( + &mut self, + target_bb: Bx::BasicBlock + ) -> Bx::BasicBlock { if base::wants_msvc_seh(self.cx.sess()) { span_bug!(self.mir.span, "landing pad was not inserted?") } - let bx = self.new_block("cleanup"); + let mut bx = self.new_block("cleanup"); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); let lp = bx.landing_pad(llretty, llpersonality, 1); bx.set_cleanup(lp); - let slot = self.get_personality_slot(&bx); - slot.storage_live(&bx); - Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot); + let slot = self.get_personality_slot(&mut bx); + slot.storage_live(&mut bx); + Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); bx.llbb() } - fn landing_pad_type(&self) -> &'ll Type { + fn landing_pad_type(&self) -> Bx::Type { let cx = self.cx; - Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false) + cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false) } - fn unreachable_block(&mut self) -> &'ll BasicBlock { + fn unreachable_block( + &mut self + ) -> Bx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let bl = self.new_block("unreachable"); - bl.unreachable(); - self.unreachable_block = Some(bl.llbb()); - bl.llbb() + let mut bx = self.new_block("unreachable"); + bx.unreachable(); + self.unreachable_block = Some(bx.llbb()); + bx.llbb() }) } - pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx> { - Builder::new_block(self.cx, self.llfn, name) + pub fn new_block(&self, name: &str) -> Bx { + Bx::new_block(self.cx, self.llfn, name) } - pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx> { - let bx = Builder::with_cx(self.cx); + pub fn build_block( + &self, + bb: mir::BasicBlock + ) -> Bx { + let mut bx = Bx::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } - fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, - dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, - llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) - -> ReturnDest<'ll, 'tcx> { + fn make_return_dest( + &mut self, + bx: &mut Bx, + dest: &mir::Place<'tcx>, + fn_ret: &ArgType<'tcx, Ty<'tcx>>, + llargs: &mut Vec, is_intrinsic: bool + ) -> ReturnDest<'tcx, Bx::Value> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret.is_ignore() { return ReturnDest::Nothing; @@ -973,20 +1022,23 @@ fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, } } - fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>, - src: &mir::Operand<'tcx>, - dst: &mir::Place<'tcx>) { + fn codegen_transmute( + &mut self, + bx: &mut Bx, + src: &mir::Operand<'tcx>, + dst: &mir::Place<'tcx> + ) { if let mir::Place::Local(index) = *dst { match self.locals[index] { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), LocalRef::Operand(None) => { - let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst)); + let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst)); assert!(!dst_layout.ty.has_erasable_regions()); let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); - let op = place.load(bx); + let op = bx.load_operand(place); place.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -1001,30 +1053,35 @@ fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>, } } - fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>, - src: &mir::Operand<'tcx>, - dst: PlaceRef<'ll, 'tcx>) { + fn codegen_transmute_into( + &mut self, + bx: &mut Bx, + src: &mir::Operand<'tcx>, + dst: PlaceRef<'tcx, Bx::Value> + ) { let src = self.codegen_operand(bx, src); - let llty = src.layout.llvm_type(bx.cx); - let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); + let llty = bx.cx().backend_type(src.layout); + let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty)); let align = src.layout.align.min(dst.layout.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } // Stores the return value of a function call into it's final location. - fn store_return(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - dest: ReturnDest<'ll, 'tcx>, - ret_ty: &ArgType<'tcx, Ty<'tcx>>, - llval: &'ll Value) { + fn store_return( + &mut self, + bx: &mut Bx, + dest: ReturnDest<'tcx, Bx::Value>, + ret_ty: &ArgType<'tcx, Ty<'tcx>>, + llval: Bx::Value + ) { use self::ReturnDest::*; match dest { Nothing => (), - Store(dst) => ret_ty.store(bx, llval, dst), + Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst), IndirectOperand(tmp, index) => { - let op = tmp.load(bx); + let op = bx.load_operand(tmp); tmp.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -1033,8 +1090,8 @@ fn store_return(&mut self, let op = if let PassMode::Cast(_) = ret_ty.mode { let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); - ret_ty.store(bx, llval, tmp); - let op = tmp.load(bx); + bx.store_arg_ty(&ret_ty, llval, tmp); + let op = bx.load_operand(tmp); tmp.storage_dead(bx); op } else { @@ -1046,13 +1103,13 @@ fn store_return(&mut self, } } -enum ReturnDest<'ll, 'tcx> { +enum ReturnDest<'tcx, V> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer - Store(PlaceRef<'ll, 'tcx>), + Store(PlaceRef<'tcx, V>), // Stores an indirect return value to an operand local place - IndirectOperand(PlaceRef<'ll, 'tcx>, mir::Local), + IndirectOperand(PlaceRef<'tcx, V>, mir::Local), // Stores a direct return value to an operand local place DirectOperand(mir::Local) } diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs new file mode 100644 index 0000000000000000000000000000000000000000..568e1f0b38ab1c0fa0433b99eaa059d601dc76b9 --- /dev/null +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -0,0 +1,105 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::mir::interpret::ErrorHandled; +use rustc_mir::const_eval::const_field; +use rustc::mir; +use rustc_data_structures::indexed_vec::Idx; +use rustc::mir::interpret::{GlobalId, ConstValue}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, LayoutOf}; +use syntax::source_map::Span; +use traits::*; + +use super::FunctionCx; + +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + fn fully_evaluate( + &mut self, + bx: &Bx, + constant: &'tcx ty::Const<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { + match constant.val { + ConstValue::Unevaluated(def_id, ref substs) => { + let tcx = bx.tcx(); + let param_env = ty::ParamEnv::reveal_all(); + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); + let cid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(param_env.and(cid)) + }, + _ => Ok(constant), + } + } + + pub fn eval_mir_constant( + &mut self, + bx: &Bx, + constant: &mir::Constant<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { + let c = self.monomorphize(&constant.literal); + self.fully_evaluate(bx, c) + } + + /// process constant containing SIMD shuffle indices + pub fn simd_shuffle_indices( + &mut self, + bx: &Bx, + span: Span, + ty: Ty<'tcx>, + constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>, + ) -> (Bx::Value, Ty<'tcx>) { + constant + .and_then(|c| { + let field_ty = c.ty.builtin_index().unwrap(); + let fields = match c.ty.sty { + ty::Array(_, n) => n.unwrap_usize(bx.tcx()), + ref other => bug!("invalid simd shuffle type: {}", other), + }; + let values: Result, ErrorHandled> = (0..fields).map(|field| { + let field = const_field( + bx.tcx(), + ty::ParamEnv::reveal_all(), + self.instance, + None, + mir::Field::new(field as usize), + c, + )?; + if let Some(prim) = field.val.try_to_scalar() { + let layout = bx.cx().layout_of(field_ty); + let scalar = match layout.abi { + layout::Abi::Scalar(ref x) => x, + _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) + }; + Ok(bx.cx().scalar_to_backend( + prim, scalar, + bx.cx().immediate_backend_type(layout), + )) + } else { + bug!("simd shuffle field {:?}", field) + } + }).collect(); + let llval = bx.cx().const_struct(&values?, false); + Ok((llval, c.ty)) + }) + .unwrap_or_else(|_| { + bx.tcx().sess.span_err( + span, + "could not evaluate shuffle_indices at compile time", + ); + // We've errored, so we don't have to produce working code. + let ty = self.monomorphize(&ty); + let llty = bx.cx().backend_type(bx.cx().layout_of(ty)); + (bx.cx().const_undef(llty), ty) + }) + } +} diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs similarity index 78% rename from src/librustc_codegen_llvm/mir/mod.rs rename to src/librustc_codegen_ssa/mir/mod.rs index e5b25ea068b3b7fd463c7fabc7afd987a7cd3a0b..0579afe1d49c931459e1e929093d78f95b6239a0 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -8,24 +8,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use common::{C_i32, C_null}; use libc::c_uint; -use llvm::{self, BasicBlock}; -use llvm::debuginfo::DIScope; -use llvm_util; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; -use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; use base; -use builder::Builder; -use common::{CodegenCx, Funclet}; -use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; -use monomorphize::Instance; -use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; -use type_::Type; -use value::Value; +use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; +use rustc_mir::monomorphize::Instance; +use rustc_target::abi::call::{FnType, PassMode}; +use traits::*; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -35,8 +28,6 @@ use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::IndexVec; -pub use self::constant::codegen_static_initializer; - use self::analyze::CleanupKind; use self::place::PlaceRef; use rustc::mir::traversal; @@ -44,16 +35,16 @@ use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { +pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, - debug_context: FunctionDebugContext<'ll>, + debug_context: FunctionDebugContext, - llfn: &'ll Value, + llfn: Bx::Value, - cx: &'a CodegenCx<'ll, 'tcx>, + cx: &'a Bx::CodegenCx, fn_ty: FnType<'tcx, Ty<'tcx>>, @@ -64,24 +55,24 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - personality_slot: Option>, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec, + blocks: IndexVec, /// The funclet status of each basic block cleanup_kinds: IndexVec, /// When targeting MSVC, this stores the cleanup info for each funclet /// BB. This is initialized as we compute the funclets' head block in RPO. - funclets: &'a IndexVec>>, + funclets: IndexVec>, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>, + landing_pads: IndexVec>, /// Cached unreachable block - unreachable_block: Option<&'ll BasicBlock>, + unreachable_block: Option, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: @@ -98,32 +89,36 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec>, + locals: IndexVec>, /// Debug information for MIR scopes. - scopes: IndexVec>, + scopes: IndexVec>, /// If this function is being monomorphized, this contains the type substitutions used. param_substs: &'tcx Substs<'tcx>, } -impl FunctionCx<'a, 'll, 'tcx> { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { - self.cx.tcx.subst_and_normalize_erasing_regions( + self.cx.tcx().subst_and_normalize_erasing_regions( self.param_substs, ty::ParamEnv::reveal_all(), value, ) } - pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_>, source_info: mir::SourceInfo) { + pub fn set_debug_loc( + &mut self, + bx: &mut Bx, + source_info: mir::SourceInfo + ) { let (scope, span) = self.debug_loc(source_info); - debuginfo::set_source_location(&self.debug_context, bx, scope, span); + bx.set_source_location(&self.debug_context, scope, span); } - pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) { + pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option, Span) { // Bail out if debug info emission is not enabled. match self.debug_context { FunctionDebugContext::DebugInfoDisabled | @@ -163,34 +158,38 @@ pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScop // corresponding to span's containing source scope. If so, we need to create a DIScope // "extension" into that file. fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos) - -> Option<&'ll DIScope> { + -> Option { let scope_metadata = self.scopes[scope_id].scope_metadata; if pos < self.scopes[scope_id].file_start_pos || pos >= self.scopes[scope_id].file_end_pos { - let cm = self.cx.sess().source_map(); + let sm = self.cx.sess().source_map(); let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate; - Some(debuginfo::extend_scope_to_file(self.cx, - scope_metadata.unwrap(), - &cm.lookup_char_pos(pos).file, - defining_crate)) + Some(self.cx.extend_scope_to_file( + scope_metadata.unwrap(), + &sm.lookup_char_pos(pos).file, + defining_crate + )) } else { scope_metadata } } } -enum LocalRef<'ll, 'tcx> { - Place(PlaceRef<'ll, 'tcx>), +enum LocalRef<'tcx, V> { + Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. - UnsizedPlace(PlaceRef<'ll, 'tcx>), - Operand(Option>), + UnsizedPlace(PlaceRef<'tcx, V>), + Operand(Option>), } -impl LocalRef<'ll, 'tcx> { - fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'ll, 'tcx> { +impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + cx: &Cx, + layout: TyLayout<'tcx>, + ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -204,18 +203,18 @@ fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'l /////////////////////////////////////////////////////////////////////////// -pub fn codegen_mir( - cx: &'a CodegenCx<'ll, 'tcx>, - llfn: &'ll Value, +pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + cx: &'a Bx::CodegenCx, + llfn: Bx::Value, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, ) { - let fn_ty = FnType::new(cx, sig, &[]); + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir); - let bx = Builder::new_block(cx, llfn, "start"); + cx.create_function_debug_context(instance, sig, llfn, mir); + let mut bx = Bx::new_block(cx, llfn, "start"); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); @@ -225,7 +224,7 @@ pub fn codegen_mir( // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); - let block_bxs: IndexVec = + let block_bxs: IndexVec = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK && !reentrant_start_block { bx.llbb() @@ -235,8 +234,8 @@ pub fn codegen_mir( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context); - let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs); + let scopes = cx.create_mir_scopes(mir, &debug_context); + let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { instance, @@ -249,7 +248,7 @@ pub fn codegen_mir( unreachable_block: None, cleanup_kinds, landing_pads, - funclets: &funclets, + funclets, scopes, locals: IndexVec::new(), debug_context, @@ -263,37 +262,38 @@ pub fn codegen_mir( // Allocate variable and temp allocas fx.locals = { - let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals); + let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty)); + let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty)); assert!(!layout.ty.has_erasable_regions()); if let Some(name) = decl.name { // User variable let debug_scope = fx.scopes[decl.visibility_scope]; - let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full; + let dbg = debug_scope.is_valid() && + bx.cx().sess().opts.debuginfo == DebugInfo::Full; if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx, layout); + return LocalRef::new_operand(bx.cx(), layout); } debug!("alloc: {:?} ({}) -> place", local, name); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str()); + PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str()); // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&bx, layout, &name.as_str()); + let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, scope: decl.visibility_scope, }); - declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(), + bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(), VariableAccess::DirectVariable { alloca: place.llval }, VariableKind::LocalVariable, span); } @@ -303,23 +303,26 @@ pub fn codegen_mir( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = llvm::get_param(llfn, 0); + let llretptr = fx.cx.get_param(llfn, 0); LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { - let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local)); + let indirect_place = PlaceRef::alloca_unsized_indirect( + &mut bx, + layout, + &format!("{:?}", local), + ); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local))) + LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) } } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx, layout) + LocalRef::new_operand(bx.cx(), layout) } } }; @@ -357,27 +360,27 @@ pub fn codegen_mir( if !visited.contains(bb.index()) { debug!("codegen_mir: block {:?} was not visited", bb); unsafe { - llvm::LLVMDeleteBasicBlock(fx.blocks[bb]); + bx.delete_basic_block(fx.blocks[bb]); } } } } -fn create_funclets( +fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( mir: &'a Mir<'tcx>, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, cleanup_kinds: &IndexVec, - block_bxs: &IndexVec) - -> (IndexVec>, - IndexVec>>) + block_bxs: &IndexVec) + -> (IndexVec>, + IndexVec>) { block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { - CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {} + CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {} _ => return (None, None) } - let cleanup; + let funclet; let ret_llbb; match mir[bb].terminator.as_ref().map(|t| &t.kind) { // This is a basic block that we're aborting the program for, @@ -401,8 +404,8 @@ fn create_funclets( // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); - let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); + let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); let cs = cs_bx.catch_switch(None, None, 1); @@ -412,40 +415,43 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = C_null(Type::i8p(bx.cx)); - let sixty_four = C_i32(bx.cx, 64); - cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); + let null = bx.cx().const_null(bx.cx().type_i8p()); + let sixty_four = bx.cx().const_i32(64); + funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } _ => { - let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); + let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); ret_llbb = cleanup_bx.llbb(); - cleanup = cleanup_bx.cleanup_pad(None, &[]); + funclet = cleanup_bx.cleanup_pad(None, &[]); cleanup_bx.br(llbb); } }; - (Some(ret_llbb), Some(Funclet::new(cleanup))) + (Some(ret_llbb), Some(funclet)) }).unzip() } /// Produce, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. -fn arg_local_refs( - bx: &Builder<'a, 'll, 'tcx>, - fx: &FunctionCx<'a, 'll, 'tcx>, - scopes: &IndexVec>, +fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + fx: &FunctionCx<'a, 'tcx, Bx>, + scopes: &IndexVec< + mir::SourceScope, + debuginfo::MirDebugScope + >, memory_locals: &BitSet, -) -> Vec> { +) -> Vec> { let mir = fx.mir; - let tcx = bx.tcx(); + let tcx = fx.cx.tcx(); let mut idx = 0; let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; - let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { + let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full { arg_scope.scope_metadata } else { None @@ -472,14 +478,15 @@ fn arg_local_refs( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } - arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i)); + let pr_field = place.project_field(bx, i); + bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } // Now that we have one alloca that contains the aggregate value, @@ -488,8 +495,7 @@ fn arg_local_refs( let variable_access = VariableAccess::DirectVariable { alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, @@ -515,21 +521,21 @@ fn arg_local_refs( let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { - return local(OperandRef::new_zst(bx.cx, arg.layout)); + return local(OperandRef::new_zst(bx.cx(), arg.layout)); } PassMode::Direct(_) => { - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -546,25 +552,25 @@ fn arg_local_refs( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; - let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); - indirect_operand.store(&bx, tmp); + indirect_operand.store(bx, tmp); tmp } else { let tmp = PlaceRef::alloca(bx, arg.layout, &name); - arg.store_fn_arg(bx, &mut llarg_idx, tmp); + bx.store_fn_arg(arg, &mut llarg_idx, tmp); tmp }; arg_scope.map(|scope| { @@ -578,8 +584,7 @@ fn arg_local_refs( alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg.layout.ty, @@ -594,7 +599,7 @@ fn arg_local_refs( // Or is it the closure environment? let (closure_layout, env_ref) = match arg.layout.ty.sty { ty::RawPtr(ty::TypeAndMut { ty, .. }) | - ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true), + ty::Ref(_, ty, _) => (bx.cx().layout_of(ty), true), _ => (arg.layout, false) }; @@ -613,10 +618,10 @@ fn arg_local_refs( // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. // (cuviper) It seems to be fine without the alloca on LLVM 6 and later. - let env_alloca = !env_ref && llvm_util::get_major_version() < 6; + let env_alloca = !env_ref && bx.cx().closure_env_needs_indirect_debuginfo(); let env_ptr = if env_alloca { let scratch = PlaceRef::alloca(bx, - bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)), "__debuginfo_env_ptr"); bx.store(place.llval, scratch.llval, scratch.align); scratch.llval @@ -627,12 +632,7 @@ fn arg_local_refs( for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); - let ops = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref(), - llvm::LLVMRustDIBuilderCreateOpPlusUconst(), - byte_offset_of_var_in_env as i64, - llvm::LLVMRustDIBuilderCreateOpDeref()] - }; + let ops = bx.cx().debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env); // The environment and the capture can each be indirect. @@ -651,8 +651,7 @@ fn arg_local_refs( alloca: env_ptr, address_operations: &ops }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, decl.debug_name, ty, @@ -673,7 +672,7 @@ fn arg_local_refs( mod analyze; mod block; -mod constant; +pub mod constant; pub mod place; pub mod operand; mod rvalue; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs similarity index 69% rename from src/librustc_codegen_llvm/mir/operand.rs rename to src/librustc_codegen_ssa/mir/operand.rs index c76cbfcd9717711e244a7a3b6e1421be3dfd0e2c..d574d89d67e3216815f97384973bd589096feb5c 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -14,33 +14,30 @@ use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; use base; -use common::{CodegenCx, C_undef, C_usize}; -use builder::{Builder, MemFlags}; -use value::Value; -use type_of::LayoutLlvmExt; -use type_::Type; +use MemFlags; use glue; +use traits::*; + use std::fmt; use super::{FunctionCx, LocalRef}; -use super::constant::scalar_to_llvm; use super::place::PlaceRef; /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a /// safety check. #[derive(Copy, Clone, Debug)] -pub enum OperandValue<'ll> { +pub enum OperandValue { /// A reference to the actual operand. The data is guaranteed /// to be valid for the operand's lifetime. /// The second value, if any, is the extra data (vtable or length) /// which indicates that it refers to an unsized rvalue. - Ref(&'ll Value, Option<&'ll Value>, Align), + Ref(V, Option, Align), /// A single LLVM value. - Immediate(&'ll Value), + Immediate(V), /// A pair of immediate LLVM values. Used by fat pointers too. - Pair(&'ll Value, &'ll Value) + Pair(V, V) } /// An `OperandRef` is an "SSA" reference to a Rust value, along with @@ -52,37 +49,40 @@ pub enum OperandValue<'ll> { /// directly is sure to cause problems -- use `OperandRef::store` /// instead. #[derive(Copy, Clone)] -pub struct OperandRef<'ll, 'tcx> { +pub struct OperandRef<'tcx, V> { // The value. - pub val: OperandValue<'ll>, + pub val: OperandValue, // The layout of value, based on its Rust type. pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'ll, 'tcx> { +impl fmt::Debug for OperandRef<'tcx, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } -impl OperandRef<'ll, 'tcx> { - pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>, - layout: TyLayout<'tcx>) -> OperandRef<'ll, 'tcx> { +impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { + pub fn new_zst>( + cx: &Cx, + layout: TyLayout<'tcx> + ) -> OperandRef<'tcx, V> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))), layout } } - pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, - val: &'tcx ty::Const<'tcx>) - -> Result, ErrorHandled> { - let layout = bx.cx.layout_of(val.ty); + pub fn from_const>( + bx: &mut Bx, + val: &'tcx ty::Const<'tcx> + ) -> Result { + let layout = bx.cx().layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx, layout)); + return Ok(OperandRef::new_zst(bx.cx(), layout)); } let val = match val.val { @@ -92,11 +92,10 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = scalar_to_llvm( - bx.cx, + let llval = bx.cx().scalar_to_backend( x, scalar, - layout.immediate_llvm_type(bx.cx), + bx.cx().immediate_backend_type(layout), ); OperandValue::Immediate(llval) }, @@ -105,23 +104,20 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, layout::Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = scalar_to_llvm( - bx.cx, + let a_llval = bx.cx().scalar_to_backend( a, a_scalar, - layout.scalar_pair_element_llvm_type(bx.cx, 0, true), + bx.cx().scalar_pair_element_backend_type(layout, 0, true), ); - let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true); - let b_llval = scalar_to_llvm( - bx.cx, + let b_llval = bx.cx().scalar_to_backend( b, b_scalar, - b_layout, + bx.cx().scalar_pair_element_backend_type(layout, 1, true), ); OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(_, alloc, offset) => { - return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx)); + return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset))); }, }; @@ -133,14 +129,17 @@ pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, /// Asserts that this operand refers to a scalar and returns /// a reference to its value. - pub fn immediate(self) -> &'ll Value { + pub fn immediate(self) -> V { match self.val { OperandValue::Immediate(s) => s, _ => bug!("not immediate: {:?}", self) } } - pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + pub fn deref>( + self, + cx: &Cx + ) -> PlaceRef<'tcx, V> { let projected_ty = self.layout.ty.builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { @@ -159,15 +158,20 @@ pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { /// If this operand is a `Pair`, we return an aggregate with the two values. /// For other cases, see `immediate`. - pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value { + pub fn immediate_or_packed_pair>( + self, + bx: &mut Bx + ) -> V { if let OperandValue::Pair(a, b) = self.val { - let llty = self.layout.llvm_type(bx.cx); + let llty = bx.cx().backend_type(self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = C_undef(llty); - llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); - llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); + let mut llpair = bx.cx().const_undef(llty); + let imm_a = base::from_immediate(bx, a); + let imm_b = base::from_immediate(bx, b); + llpair = bx.insert_value(llpair, imm_a, 0); + llpair = bx.insert_value(llpair, imm_b, 1); llpair } else { self.immediate() @@ -175,17 +179,20 @@ pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value } /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. - pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, - llval: &'ll Value, - layout: TyLayout<'tcx>) - -> OperandRef<'ll, 'tcx> { + pub fn from_immediate_or_packed_pair>( + bx: &mut Bx, + llval: V, + layout: TyLayout<'tcx> + ) -> Self { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. - let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a); - let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b); + let a_llval = bx.extract_value(llval, 0); + let a_llval = base::to_immediate_scalar(bx, a_llval, a); + let b_llval = bx.extract_value(llval, 1); + let b_llval = base::to_immediate_scalar(bx, b_llval, b); OperandValue::Pair(a_llval, b_llval) } else { OperandValue::Immediate(llval) @@ -193,14 +200,18 @@ pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, OperandRef { val, layout } } - pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'ll, 'tcx> { - let field = self.layout.field(bx.cx, i); + pub fn extract_field>( + &self, + bx: &mut Bx, + i: usize + ) -> Self { + let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx, field); + return OperandRef::new_zst(bx.cx(), field); } // Newtype of a scalar, scalar pair or vector. @@ -213,12 +224,12 @@ pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx)); + assert_eq!(field.size, a.value.size(bx.cx())); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx) - .abi_align(b.value.align(bx.cx))); - assert_eq!(field.size, b.value.size(bx.cx)); + assert_eq!(offset, a.value.size(bx.cx()) + .abi_align(b.value.align(bx.cx()))); + assert_eq!(field.size, b.value.size(bx.cx())); OperandValue::Immediate(b_llval) } } @@ -226,7 +237,7 @@ pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, C_usize(bx.cx, i as u64))) + bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -235,11 +246,11 @@ pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. match val { OperandValue::Immediate(ref mut llval) => { - *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx)); + *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field)); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true)); - *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true)); + *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true)); + *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true)); } OperandValue::Ref(..) => bug!() } @@ -251,27 +262,43 @@ pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef } } -impl OperandValue<'ll> { - pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { +impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue { + pub fn store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V> + ) { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn volatile_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V> + ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn unaligned_volatile_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V>, + ) { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn nontemporal_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, V> + ) { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags( + fn store_with_flags>( self, - bx: &Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, + bx: &mut Bx, + dest: PlaceRef<'tcx, V>, flags: MemFlags, ) { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); @@ -301,8 +328,11 @@ fn store_with_flags( } } } - - pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef<'ll, 'tcx>) { + pub fn store_unsized>( + self, + bx: &mut Bx, + indirect_dest: PlaceRef<'tcx, V> + ) { debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); let flags = MemFlags::empty(); @@ -322,22 +352,22 @@ pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef< let min_align = Align::from_bits(8, 8).unwrap(); // Allocate an appropriate region on the stack, and copy the value into it - let (llsize, _) = glue::size_and_align_of_dst(&bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align); - base::call_memcpy(&bx, lldst, max_align, llptr, min_align, llsize, flags); + let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); + let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. let indirect_operand = OperandValue::Pair(lldst, llextra); - indirect_operand.store(&bx, indirect_dest); + indirect_operand.store(bx, indirect_dest); } } -impl FunctionCx<'a, 'll, 'tcx> { - fn maybe_codegen_consume_direct(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> Option> - { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + fn maybe_codegen_consume_direct( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> Option> { debug!("maybe_codegen_consume_direct(place={:?})", place); // watch out for locals that do not have an @@ -368,9 +398,9 @@ fn maybe_codegen_consume_direct(&mut self, // ZSTs don't require any actual memory access. // FIXME(eddyb) deduplicate this with the identical // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx, 0); + let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx, elem)); + return Some(OperandRef::new_zst(bx.cx(), elem)); } } _ => {} @@ -381,19 +411,19 @@ fn maybe_codegen_consume_direct(&mut self, None } - pub fn codegen_consume(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> OperandRef<'ll, 'tcx> - { + pub fn codegen_consume( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> OperandRef<'tcx, Bx::Value> { debug!("codegen_consume(place={:?})", place); let ty = self.monomorphized_place_ty(place); - let layout = bx.cx.layout_of(ty); + let layout = bx.cx().layout_of(ty); // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx, layout); + return OperandRef::new_zst(bx.cx(), layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { @@ -402,14 +432,15 @@ pub fn codegen_consume(&mut self, // for most places, to consume them we just load them // out from their home - self.codegen_place(bx, place).load(bx) + let place = self.codegen_place(bx, place); + bx.load_operand(place) } - pub fn codegen_operand(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - operand: &mir::Operand<'tcx>) - -> OperandRef<'ll, 'tcx> - { + pub fn codegen_operand( + &mut self, + bx: &mut Bx, + operand: &mir::Operand<'tcx> + ) -> OperandRef<'tcx, Bx::Value> { debug!("codegen_operand(operand={:?})", operand); match *operand { @@ -432,15 +463,15 @@ pub fn codegen_operand(&mut self, } // Allow RalfJ to sleep soundly knowing that even refactorings that remove // the above error (or silence it under some conditions) will not cause UB - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); // We've errored, so we don't have to produce working code. - let layout = bx.cx.layout_of(ty); - PlaceRef::new_sized( - C_undef(layout.llvm_type(bx.cx).ptr_to()), + let layout = bx.cx().layout_of(ty); + bx.load_operand(PlaceRef::new_sized( + bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), layout, layout.align, - ).load(bx) + )) }) } } diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs similarity index 59% rename from src/librustc_codegen_llvm/mir/place.rs rename to src/librustc_codegen_ssa/mir/place.rs index 75ec5ead243f9a00bf4f70fbea73189d80358c74..5b36ee8fd183c8ab49b6e8ff59f503bb2a5929a6 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -8,31 +8,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, LLVMConstInBoundsGEP}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use base; -use builder::Builder; -use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big}; -use consts; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; +use MemFlags; +use common::IntPredicate; use glue; -use mir::constant::const_alloc_to_llvm; + +use traits::*; use super::{FunctionCx, LocalRef}; -use super::operand::{OperandRef, OperandValue}; +use super::operand::OperandValue; #[derive(Copy, Clone, Debug)] -pub struct PlaceRef<'ll, 'tcx> { +pub struct PlaceRef<'tcx, V> { /// Pointer to the contents of the place - pub llval: &'ll Value, + pub llval: V, /// This place's extra data if it is unsized, or null - pub llextra: Option<&'ll Value>, + pub llextra: Option, /// Monomorphized type of this place, including variant information pub layout: TyLayout<'tcx>, @@ -41,12 +36,12 @@ pub struct PlaceRef<'ll, 'tcx> { pub align: Align, } -impl PlaceRef<'ll, 'tcx> { +impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized( - llval: &'ll Value, + llval: V, layout: TyLayout<'tcx>, align: Align, - ) -> PlaceRef<'ll, 'tcx> { + ) -> PlaceRef<'tcx, V> { assert!(!layout.is_unsized()); PlaceRef { llval, @@ -56,140 +51,73 @@ pub fn new_sized( } } - pub fn from_const_alloc( - bx: &Builder<'a, 'll, 'tcx>, + pub fn alloca>( + bx: &mut Bx, layout: TyLayout<'tcx>, - alloc: &mir::interpret::Allocation, - offset: Size, - ) -> PlaceRef<'ll, 'tcx> { - let init = const_alloc_to_llvm(bx.cx, alloc); - let base_addr = consts::addr_of(bx.cx, init, layout.align, None); - - let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(bx.cx)), - &C_usize(bx.cx, offset.bytes()), - 1, - )}; - let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to()); - PlaceRef::new_sized(llval, layout, alloc.align) - } - - pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + name: &str + ) -> Self { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align); + let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align); Self::new_sized(tmp, layout, layout.align) } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + pub fn alloca_unsized_indirect>( + bx: &mut Bx, + layout: TyLayout<'tcx>, + name: &str, + ) -> Self { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty); - let ptr_layout = bx.cx.layout_of(ptr_ty); + let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); + let ptr_layout = bx.cx().layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } - pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value { + pub fn len>( + &self, + cx: &Cx + ) -> V { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); self.llextra.unwrap() } else { - C_usize(cx, count) + cx.const_usize(count) } } else { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) } } - pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { - debug!("PlaceRef::load: {:?}", self); - - assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); - - if self.layout.is_zst() { - return OperandRef::new_zst(bx.cx, self.layout); - } - - let scalar_load_metadata = |load, scalar: &layout::Scalar| { - let vr = scalar.valid_range.clone(); - match scalar.value { - layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx.cx); - if range.start != range.end { - bx.range_metadata(load, range); - } - } - layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - bx.nonnull_metadata(load); - } - _ => {} - } - }; - - let val = if let Some(llextra) = self.llextra { - OperandValue::Ref(self.llval, Some(llextra), self.align) - } else if self.layout.is_llvm_immediate() { - let mut const_llval = None; - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - } - let llval = const_llval.unwrap_or_else(|| { - let load = bx.load(self.llval, self.align); - if let layout::Abi::Scalar(ref scalar) = self.layout.abi { - scalar_load_metadata(load, scalar); - } - load - }); - OperandValue::Immediate(base::to_immediate(bx, llval, self.layout)) - } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { - let load = |i, scalar: &layout::Scalar| { - let llptr = bx.struct_gep(self.llval, i as u64); - let load = bx.load(llptr, self.align); - scalar_load_metadata(load, scalar); - if scalar.is_bool() { - bx.trunc(load, Type::i1(bx.cx)) - } else { - load - } - }; - OperandValue::Pair(load(0, a), load(1, b)) - } else { - OperandValue::Ref(self.llval, None, self.align) - }; - - OperandRef { val, layout: self.layout } - } +} +impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. - pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> { - let cx = bx.cx; - let field = self.layout.field(cx, ix); + pub fn project_field>( + self, bx: &mut Bx, + ix: usize, + ) -> Self { + let field = self.layout.field(bx.cx(), ix); let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); - let simple = || { + let mut simple = || { // Unions and newtypes only use an offset of 0. let llval = if offset.bytes() == 0 { self.llval } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { // Offsets have to match either first or second field. - assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx))); + assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx()))); bx.struct_gep(self.llval, 1) } else { - bx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()), - llextra: if cx.type_has_metadata(field.ty) { + llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), + llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None @@ -239,7 +167,7 @@ pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<' let meta = self.llextra; - let unaligned_offset = C_usize(cx, offset.bytes()); + let unaligned_offset = bx.cx().const_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -250,22 +178,23 @@ pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<' // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64)); - let offset = bx.and(bx.add(unaligned_offset, align_sub_1), - bx.neg(unsized_align)); + let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64)); + let and_lhs = bx.add(unaligned_offset, align_sub_1); + let and_rhs = bx.neg(unsized_align); + let offset = bx.and(and_lhs, and_rhs); debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx)); + let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = field.llvm_type(cx); + let ll_fty = bx.cx().backend_type(field); debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()), + llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -273,24 +202,28 @@ pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<' } /// Obtain the actual discriminant of a value. - pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value { - let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); + pub fn codegen_get_discr>( + self, + bx: &mut Bx, + cast_to: Ty<'tcx> + ) -> V { + let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to)); if self.layout.abi.is_uninhabited() { - return C_undef(cast_to); + return bx.cx().const_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index.as_u32() as u128, - |def| def.discriminant_for_variant(bx.cx.tcx, index).val); - return C_uint_big(cast_to, discr_val); + |def| def.discriminant_for_variant(bx.cx().tcx(), index).val); + return bx.cx().const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } let discr = self.project_field(bx, 0); - let lldiscr = discr.load(bx).immediate(); + let lldiscr = bx.load_operand(discr).immediate(); match self.layout.variants { layout::Variants::Single { .. } => bug!(), layout::Variants::Tagged { ref tag, .. } => { @@ -310,26 +243,30 @@ pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) - niche_start, .. } => { - let niche_llty = discr.layout.immediate_llvm_type(bx.cx); + let niche_llty = bx.cx().immediate_backend_type(discr.layout); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + bx.cx().const_null(niche_llty) } else { - C_uint_big(niche_llty, niche_start) + bx.cx().const_uint_big(niche_llty, niche_start) }; - bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval), - C_uint(cast_to, niche_variants.start().as_u32() as u64), - C_uint(cast_to, dataful_variant.as_u32() as u64)) + let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval); + bx.select(select_arg, + bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64), + bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); - let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); - let lldiscr_max = C_uint(niche_llty, niche_variants.end().as_u32() as u64); - bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max), - bx.intcast(lldiscr, cast_to, false), - C_uint(cast_to, dataful_variant.as_u32() as u64)) + let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); + let lldiscr_max = + bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64); + let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max); + let cast = bx.intcast(lldiscr, cast_to, false); + bx.select(select_arg, + cast, + bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64)) } } } @@ -337,8 +274,12 @@ pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) - /// Set the discriminant for a new value of the given case of the given /// representation. - pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) { - if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() { + pub fn codegen_set_discr>( + &self, + bx: &mut Bx, + variant_index: VariantIdx + ) { + if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { return; } match self.layout.variants { @@ -351,7 +292,7 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - C_uint_big(ptr.layout.llvm_type(bx.cx), to), + bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), ptr.llval, ptr.align); } @@ -362,29 +303,27 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari .. } => { if variant_index != dataful_variant { - if bx.sess().target.target.arch == "arm" || - bx.sess().target.target.arch == "aarch64" { + if bx.cx().sess().target.target.arch == "arm" || + bx.cx().sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to()); - let fill_byte = C_u8(bx.cx, 0); + let fill_byte = bx.cx().const_u8(0); let (size, align) = self.layout.size_and_align(); - let size = C_usize(bx.cx, size.bytes()); - let align = C_u32(bx.cx, align.abi() as u32); - base::call_memset(bx, llptr, fill_byte, size, align, false); + let size = bx.cx().const_usize(size.bytes()); + bx.memset(self.llval, fill_byte, size, align, MemFlags::empty()); } let niche = self.project_field(bx, 0); - let niche_llty = niche.layout.immediate_llvm_type(bx.cx); + let niche_llty = bx.cx().immediate_backend_type(niche.layout); let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); let niche_value = (niche_value as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + bx.cx().const_null(niche_llty) } else { - C_uint_big(niche_llty, niche_value) + bx.cx().const_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } @@ -392,46 +331,53 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari } } - pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) - -> PlaceRef<'ll, 'tcx> { + pub fn project_index>( + &self, + bx: &mut Bx, + llindex: V + ) -> Self { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), llextra: None, - layout: self.layout.field(bx.cx, 0), + layout: self.layout.field(bx.cx(), 0), align: self.align } } - pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) - -> PlaceRef<'ll, 'tcx> { + pub fn project_downcast>( + &self, + bx: &mut Bx, + variant_index: VariantIdx + ) -> Self { let mut downcast = *self; - downcast.layout = self.layout.for_variant(bx.cx, variant_index); + downcast.layout = self.layout.for_variant(bx.cx(), variant_index); // Cast to the appropriate variant struct type. - let variant_ty = downcast.layout.llvm_type(bx.cx); - downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to()); + let variant_ty = bx.cx().backend_type(downcast.layout); + downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); downcast } - pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_live>(&self, bx: &mut Bx) { bx.lifetime_start(self.llval, self.layout.size); } - pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_dead>(&self, bx: &mut Bx) { bx.lifetime_end(self.llval, self.layout.size); } } -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_place(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> PlaceRef<'ll, 'tcx> { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_place( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> PlaceRef<'tcx, Bx::Value> { debug!("codegen_place(place={:?})", place); - let cx = bx.cx; - let tcx = cx.tcx; + let cx = self.cx; + let tcx = self.cx.tcx(); if let mir::Place::Local(index) = *place { match self.locals[index] { @@ -439,7 +385,7 @@ pub fn codegen_place(&mut self, return place; } LocalRef::UnsizedPlace(place) => { - return place.load(bx).deref(&cx); + return bx.load_operand(place).deref(cx); } LocalRef::Operand(..) => { bug!("using operand local {:?} as place", place); @@ -459,7 +405,7 @@ pub fn codegen_place(&mut self, match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { mir::interpret::ConstValue::ByRef(_, alloc, offset) => { - PlaceRef::from_const_alloc(bx, layout, alloc, offset) + bx.cx().from_const_alloc(layout, alloc, offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -468,23 +414,25 @@ pub fn codegen_place(&mut self, // and compile-time agree on values // With floats that won't always be true // so we generate an abort - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = C_undef(layout.llvm_type(bx.cx).ptr_to()); + let llval = bx.cx().const_undef( + bx.cx().type_ptr_to(bx.cx().backend_type(layout)) + ); PlaceRef::new_sized(llval, layout, layout.align) } } } mir::Place::Static(box mir::Static { def_id, ty }) => { let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align) + PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align) }, mir::Place::Projection(box mir::Projection { ref base, elem: mir::ProjectionElem::Deref }) => { // Load the pointer from its location. - self.codegen_consume(bx, base).deref(bx.cx) + self.codegen_consume(bx, base).deref(bx.cx()) } mir::Place::Projection(ref projection) => { let cg_base = self.codegen_place(bx, &projection.base); @@ -503,34 +451,33 @@ pub fn codegen_place(&mut self, mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); + let lloffset = bx.cx().const_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); - let lllen = cg_base.len(bx.cx); + let lloffset = bx.cx().const_usize(offset as u64); + let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - C_usize(bx.cx, from as u64)); + bx.cx().const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } - .projection_ty(tcx, &projection.elem) - .to_ty(bx.tcx()); - subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty)); + .projection_ty(tcx, &projection.elem).to_ty(tcx); + subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - C_usize(bx.cx, (from as u64) + (to as u64)))); + bx.cx().const_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - subslice.layout.llvm_type(bx.cx).ptr_to()); + bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout))); subslice } @@ -545,7 +492,7 @@ pub fn codegen_place(&mut self, } pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { - let tcx = self.cx.tcx; + let tcx = self.cx.tcx(); let place_ty = place.ty(self.mir, tcx); self.monomorphize(&place_ty.to_ty(tcx)) } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs similarity index 71% rename from src/librustc_codegen_llvm/mir/rvalue.rs rename to src/librustc_codegen_ssa/mir/rvalue.rs index fa22bdff94dddbeb29fda3c9a0295e6c2100ea8b..6b1efa060fdad8a40d6f6b92f51c57c850e4e44a 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -8,57 +8,53 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; use std::{u128, i128}; use base; -use builder::Builder; +use MemFlags; use callee; -use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big}; -use consts; -use monomorphize; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; +use common::{self, RealPredicate, IntPredicate}; +use rustc_mir::monomorphize; + +use traits::*; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_rvalue(&mut self, - bx: Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> - { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_rvalue( + &mut self, + mut bx: Bx, + dest: PlaceRef<'tcx, Bx::Value>, + rvalue: &mir::Rvalue<'tcx> + ) -> Bx { debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); + let cg_operand = self.codegen_operand(&mut bx, operand); // FIXME: consider not copying constants through stack. (fixable by codegenning // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - cg_operand.val.store(&bx, dest); + cg_operand.val.store(&mut bx, dest); bx } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. - if dest.layout.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); return bx; } @@ -66,7 +62,7 @@ pub fn codegen_rvalue(&mut self, // this to be eliminated by MIR building, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); match operand.val { OperandValue::Pair(..) | OperandValue::Immediate(_) => { @@ -77,15 +73,15 @@ pub fn codegen_rvalue(&mut self, // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp"); - scratch.storage_live(&bx); - operand.val.store(&bx, scratch); - base::coerce_unsized_into(&bx, scratch, dest); - scratch.storage_dead(&bx); + let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + scratch.storage_live(&mut bx); + operand.val.store(&mut bx, scratch); + base::coerce_unsized_into(&mut bx, scratch, dest); + scratch.storage_dead(&mut bx); } OperandValue::Ref(llref, None, align) => { let source = PlaceRef::new_sized(llref, operand.layout, align); - base::coerce_unsized_into(&bx, source, dest); + base::coerce_unsized_into(&mut bx, source, dest); } OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue") @@ -95,51 +91,50 @@ pub fn codegen_rvalue(&mut self, } mir::Rvalue::Repeat(ref elem, count) => { - let cg_elem = self.codegen_operand(&bx, elem); + let cg_elem = self.codegen_operand(&mut bx, elem); // Do not generate the loop for zero-sized elements or empty arrays. if dest.layout.is_zst() { return bx; } - - let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval; + let zero = bx.cx().const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = C_i32(bx.cx, dest.align.abi() as i32); - let size = C_usize(bx.cx, dest.layout.size.bytes()); + let size = bx.cx().const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let fill = C_u8(bx.cx, 0); - base::call_memset(&bx, start, fill, size, align, false); + if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { + let fill = bx.cx().const_u8(0); + bx.memset(start, fill, size, dest.align, MemFlags::empty()); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays - let v = base::from_immediate(&bx, v); - if common::val_ty(v) == Type::i8(bx.cx) { - base::call_memset(&bx, start, v, size, align, false); + let v = base::from_immediate(&mut bx, v); + if bx.cx().val_ty(v) == bx.cx().type_i8() { + bx.memset(start, v, size, dest.align, MemFlags::empty()); return bx; } } - let count = C_usize(bx.cx, count); - let end = dest.project_index(&bx, count).llval; + let count = bx.cx().const_usize(count); + let end = dest.project_index(&mut bx, count).llval; - let header_bx = bx.build_sibling_block("repeat_loop_header"); - let body_bx = bx.build_sibling_block("repeat_loop_body"); + let mut header_bx = bx.build_sibling_block("repeat_loop_header"); + let mut body_bx = bx.build_sibling_block("repeat_loop_body"); let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); - let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]); + let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); - let keep_going = header_bx.icmp(llvm::IntNE, current, end); + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - cg_elem.val.store(&body_bx, + cg_elem.val.store(&mut body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]); + let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -149,9 +144,9 @@ pub fn codegen_rvalue(&mut self, mir::Rvalue::Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { - dest.codegen_set_discr(&bx, variant_index); + dest.codegen_set_discr(&mut bx, variant_index); if adt_def.is_enum() { - (dest.project_downcast(&bx, variant_index), active_field_index) + (dest.project_downcast(&mut bx, variant_index), active_field_index) } else { (dest, active_field_index) } @@ -159,11 +154,12 @@ pub fn codegen_rvalue(&mut self, _ => (dest, None) }; for (i, operand) in operands.iter().enumerate() { - let op = self.codegen_operand(&bx, operand); + let op = self.codegen_operand(&mut bx, operand); // Do not generate stores and GEPis for zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - op.val.store(&bx, dest.project_field(&bx, field_index)); + let field = dest.project_field(&mut bx, field_index); + op.val.store(&mut bx, field); } } bx @@ -171,26 +167,26 @@ pub fn codegen_rvalue(&mut self, _ => { assert!(self.rvalue_creates_operand(rvalue)); - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); bx } } } - pub fn codegen_rvalue_unsized(&mut self, - bx: Builder<'a, 'll, 'tcx>, - indirect_dest: PlaceRef<'ll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> - { + pub fn codegen_rvalue_unsized( + &mut self, + mut bx: Bx, + indirect_dest: PlaceRef<'tcx, Bx::Value>, + rvalue: &mir::Rvalue<'tcx>, + ) -> Bx { debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", indirect_dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); - cg_operand.val.store_unsized(&bx, indirect_dest); + let cg_operand = self.codegen_operand(&mut bx, operand); + cg_operand.val.store_unsized(&mut bx, indirect_dest); bx } @@ -198,29 +194,29 @@ pub fn codegen_rvalue_unsized(&mut self, } } - pub fn codegen_rvalue_operand(&mut self, - bx: Builder<'a, 'll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>) - { + pub fn codegen_rvalue_operand( + &mut self, + mut bx: Bx, + rvalue: &mir::Rvalue<'tcx> + ) -> (Bx, OperandRef<'tcx, Bx::Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") { + if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } OperandValue::Immediate( - callee::resolve_and_get_fn(bx.cx, def_id, substs)) + callee::resolve_and_get_fn(bx.cx(), def_id, substs)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.layout.ty) @@ -231,8 +227,8 @@ pub fn codegen_rvalue_operand(&mut self, match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(callee::get_fn(bx.cx, instance)) + bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(bx.cx().get_fn(instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -244,7 +240,7 @@ pub fn codegen_rvalue_operand(&mut self, operand.val } mir::CastKind::Unsize => { - assert!(cast.is_llvm_scalar_pair()); + assert!(bx.cx().is_backend_scalar_pair(cast)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -255,12 +251,12 @@ pub fn codegen_rvalue_operand(&mut self, // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + bx.cx().scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize - let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata, + let (lldata, llextra) = base::unsize_thin_ptr(&mut bx, lldata, operand.layout.ty, cast.ty); OperandValue::Pair(lldata, llextra) } @@ -270,16 +266,16 @@ pub fn codegen_rvalue_operand(&mut self, } } } - mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { + mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if cast.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(cast) { let data_cast = bx.pointercast(data_ptr, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + bx.cx().scalar_pair_element_backend_type(cast, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = cast.immediate_llvm_type(bx.cx); + let llcast_ty = bx.cx().immediate_backend_type(cast); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -288,25 +284,26 @@ pub fn codegen_rvalue_operand(&mut self, } } mir::CastKind::Misc => { - assert!(cast.is_llvm_immediate()); - let ll_t_out = cast.immediate_llvm_type(bx.cx); + assert!(bx.cx().is_backend_immediate(cast)); + let ll_t_out = bx.cx().immediate_backend_type(cast); if operand.layout.abi.is_uninhabited() { + let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); return (bx, OperandRef { - val: OperandValue::Immediate(C_undef(ll_t_out)), + val, layout: cast, }); } let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = operand.layout.immediate_llvm_type(bx.cx); + let ll_t_in = bx.cx().immediate_backend_type(operand.layout); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx.tcx, index) + .discriminant_for_variant(bx.cx().tcx(), index) .val; - let discr = C_uint_big(ll_t_out, discr_val); + let discr = bx.cx().const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -327,18 +324,20 @@ pub fn codegen_rvalue_operand(&mut self, // then `i1 1` (i.e. E::B) is effectively `i8 -1`. signed = !scalar.is_bool() && s; - let er = scalar.valid_range_exclusive(bx.cx); + let er = scalar.valid_range_exclusive(bx.cx()); if er.end != er.start && scalar.valid_range.end() > scalar.valid_range.start() { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. - - base::call_assume(&bx, bx.icmp( - llvm::IntULE, + let ll_t_in_const = + bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end()); + let cmp = bx.icmp( + IntPredicate::IntULE, llval, - C_uint_big(ll_t_in, *scalar.valid_range.end()) - )); + ll_t_in_const + ); + base::call_assume(&mut bx, cmp); } } } @@ -348,8 +347,8 @@ pub fn codegen_rvalue_operand(&mut self, bx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { - let srcsz = ll_t_in.float_width(); - let dstsz = ll_t_out.float_width(); + let srcsz = bx.cx().float_width(ll_t_in); + let dstsz = bx.cx().float_width(ll_t_out); if dstsz > srcsz { bx.fpext(llval, ll_t_out) } else if srcsz > dstsz { @@ -366,15 +365,15 @@ pub fn codegen_rvalue_operand(&mut self, (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed); + let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => - cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out), + cast_int_to_float(&mut bx, signed, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(IntTy::I)) => - cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(_)) => - cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out), _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty) }; OperandValue::Immediate(newval) @@ -387,42 +386,42 @@ pub fn codegen_rvalue_operand(&mut self, } mir::Rvalue::Ref(_, bk, ref place) => { - let cg_place = self.codegen_place(&bx, place); + let cg_place = self.codegen_place(&mut bx, place); let ty = cg_place.layout.ty; // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. - let val = if !bx.cx.type_has_metadata(ty) { + let val = if !bx.cx().type_has_metadata(ty) { OperandValue::Immediate(cg_place.llval) } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) }; (bx, OperandRef { val, - layout: self.cx.layout_of(self.cx.tcx.mk_ref( - self.cx.tcx.types.re_erased, + layout: self.cx.layout_of(self.cx.tcx().mk_ref( + self.cx.tcx().types.re_erased, ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() } )), }) } mir::Rvalue::Len(ref place) => { - let size = self.evaluate_array_len(&bx, place); + let size = self.evaluate_array_len(&mut bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), - layout: bx.cx.layout_of(bx.tcx().types.usize), + layout: bx.cx().layout_of(bx.tcx().types.usize), }; (bx, operand) } mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); let llresult = match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.codegen_fat_ptr_binop(&bx, op, + self.codegen_fat_ptr_binop(&mut bx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, lhs.layout.ty) @@ -430,36 +429,36 @@ pub fn codegen_rvalue_operand(&mut self, (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty) + self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty) } _ => bug!() }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - layout: bx.cx.layout_of( + layout: bx.cx().layout_of( op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bx, operand) } mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); - let result = self.codegen_scalar_checked_binop(&bx, op, + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); + let result = self.codegen_scalar_checked_binop(&mut bx, op, lhs.immediate(), rhs.immediate(), lhs.layout.ty); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, - layout: bx.cx.layout_of(operand_ty) + layout: bx.cx().layout_of(operand_ty) }; (bx, operand) } mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); let lloperand = operand.immediate(); let is_float = operand.layout.ty.is_fp(); let llval = match op { @@ -478,8 +477,8 @@ pub fn codegen_rvalue_operand(&mut self, mir::Rvalue::Discriminant(ref place) => { let discr_ty = rvalue.ty(&*self.mir, bx.tcx()); - let discr = self.codegen_place(&bx, place) - .codegen_get_discr(&bx, discr_ty); + let discr = self.codegen_place(&mut bx, place) + .codegen_get_discr(&mut bx, discr_ty); (bx, OperandRef { val: OperandValue::Immediate(discr), layout: self.cx.layout_of(discr_ty) @@ -487,9 +486,9 @@ pub fn codegen_rvalue_operand(&mut self, } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { - assert!(bx.cx.type_is_sized(ty)); - let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes()); - let tcx = bx.tcx(); + assert!(bx.cx().type_is_sized(ty)); + let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); + let tcx = self.cx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), layout: self.cx.layout_of(tcx.types.usize), @@ -498,22 +497,23 @@ pub fn codegen_rvalue_operand(&mut self, mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let (size, align) = bx.cx.size_and_align_of(content_ty); - let llsize = C_usize(bx.cx, size.bytes()); - let llalign = C_usize(bx.cx, align.abi()); - let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = box_layout.llvm_type(bx.cx); + let (size, align) = bx.cx().layout_of(content_ty).size_and_align(); + let llsize = bx.cx().const_usize(size.bytes()); + let llalign = bx.cx().const_usize(align.abi()); + let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); + let llty_ptr = bx.cx().backend_type(box_layout); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); + bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = callee::get_fn(bx.cx, instance); - let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr); + let r = bx.cx().get_fn(instance); + let call = bx.call(r, &[llsize, llalign], None); + let val = bx.pointercast(call, llty_ptr); let operand = OperandRef { val: OperandValue::Immediate(val), @@ -522,14 +522,14 @@ pub fn codegen_rvalue_operand(&mut self, (bx, operand) } mir::Rvalue::Use(ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); (bx, operand) } mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, self.cx.tcx()); (bx, OperandRef::new_zst(self.cx, self.cx.layout_of(self.monomorphize(&ty)))) } @@ -538,32 +538,32 @@ pub fn codegen_rvalue_operand(&mut self, fn evaluate_array_len( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, place: &mir::Place<'tcx>, - ) -> &'ll Value { + ) -> Bx::Value { // ZST are passed as operands and require special handling // because codegen_place() panics if Local is operand. if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx.tcx); - return common::C_usize(bx.cx, n); + let n = n.unwrap_usize(bx.cx().tcx()); + return bx.cx().const_usize(n); } } } // use common size calculation for non zero-sized types - let cg_value = self.codegen_place(&bx, place); - return cg_value.len(bx.cx); + let cg_value = self.codegen_place(bx, place); + return cg_value.len(bx.cx()); } pub fn codegen_scalar_binop( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, + lhs: Bx::Value, + rhs: Bx::Value, input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Bx::Value { let is_float = input_ty.is_fp(); let is_signed = input_ty.is_signed(); let is_unit = input_ty.is_unit(); @@ -605,7 +605,7 @@ pub fn codegen_scalar_binop( mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - C_bool(bx.cx, match op { + bx.cx().const_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -626,45 +626,40 @@ pub fn codegen_scalar_binop( pub fn codegen_fat_ptr_binop( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, op: mir::BinOp, - lhs_addr: &'ll Value, - lhs_extra: &'ll Value, - rhs_addr: &'ll Value, - rhs_extra: &'ll Value, + lhs_addr: Bx::Value, + lhs_extra: Bx::Value, + rhs_addr: Bx::Value, + rhs_extra: Bx::Value, _input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Bx::Value { match op { mir::BinOp::Eq => { - bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra); + bx.and(lhs, rhs) } mir::BinOp::Ne => { - bx.or( - bx.icmp(llvm::IntNE, lhs_addr, rhs_addr), - bx.icmp(llvm::IntNE, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra); + bx.or(lhs, rhs) } mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => { // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) let (op, strict_op) = match op { - mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT), - mir::BinOp::Le => (llvm::IntULE, llvm::IntULT), - mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT), - mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT), + mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT), + mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT), + mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT), + mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT), _ => bug!(), }; - - bx.or( - bx.icmp(strict_op, lhs_addr, rhs_addr), - bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(op, lhs_extra, rhs_extra) - ) - ) + let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr); + let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let and_rhs = bx.icmp(op, lhs_extra, rhs_extra); + let rhs = bx.and(and_lhs, and_rhs); + bx.or(lhs, rhs) } _ => { bug!("unexpected fat ptr binop"); @@ -672,19 +667,21 @@ pub fn codegen_fat_ptr_binop( } } - pub fn codegen_scalar_checked_binop(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, - input_ty: Ty<'tcx>) -> OperandValue<'ll> { + pub fn codegen_scalar_checked_binop( + &mut self, + bx: &mut Bx, + op: mir::BinOp, + lhs: Bx::Value, + rhs: Bx::Value, + input_ty: Ty<'tcx> + ) -> OperandValue { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow() { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, C_bool(bx.cx, false)); + return OperandValue::Pair(val, bx.cx().const_bool(false)); } let (val, of) = match op { @@ -703,12 +700,12 @@ pub fn codegen_scalar_checked_binop(&mut self, bx.extract_value(res, 1)) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); - let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); + let lhs_llty = bx.cx().val_ty(lhs); + let rhs_llty = bx.cx().val_ty(rhs); + let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -720,7 +717,9 @@ pub fn codegen_scalar_checked_binop(&mut self, OperandValue::Pair(val, of) } +} +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { mir::Rvalue::Ref(..) | @@ -735,7 +734,7 @@ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { true, mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, self.cx.tcx()); let ty = self.monomorphize(&ty); self.cx.layout_of(ty).is_zst() } @@ -750,7 +749,11 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value { +fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + oop: OverflowOp, + bx: &mut Bx, + ty: Ty +) -> Bx::Value { use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; @@ -812,18 +815,22 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> }, }; - bx.cx.get_intrinsic(&name) + bx.cx().get_intrinsic(&name) } -fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, - signed: bool, - x: &'ll Value, - int_ty: &'ll Type, - float_ty: &'ll Type) -> &'ll Value { +fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + signed: bool, + x: Bx::Value, + int_ty: Bx::Type, + float_ty: Bx::Type +) -> Bx::Value { // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. - let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32; + let is_u128_to_f32 = !signed && + bx.cx().int_width(int_ty) == 128 && + bx.cx().float_width(float_ty) == 32; if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. @@ -831,11 +838,12 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); - let overflow = bx.icmp(llvm::IntUGE, x, max); - let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); - let infinity = consts::bitcast(infinity_bits, float_ty); - bx.select(overflow, infinity, bx.uitofp(x, float_ty)) + let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let overflow = bx.icmp(IntPredicate::IntUGE, x, max); + let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); + let infinity = bx.bitcast(infinity_bits, float_ty); + let fp = bx.uitofp(x, float_ty); + bx.select(overflow, infinity, fp) } else { if signed { bx.sitofp(x, float_ty) @@ -845,20 +853,25 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, } } -fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, - signed: bool, - x: &'ll Value, - float_ty: &'ll Type, - int_ty: &'ll Type) -> &'ll Value { +fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( + bx: &mut Bx, + signed: bool, + x: Bx::Value, + float_ty: Bx::Type, + int_ty: Bx::Type +) -> Bx::Value { let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; - if !bx.sess().opts.debugging_opts.saturating_float_casts { + if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } + + let int_width = bx.cx().int_width(int_ty); + let float_width = bx.cx().float_width(float_ty); // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the // destination integer type after rounding towards zero. This `undef` value can cause UB in // safe code (see issue #10184), so we implement a saturating conversion on top of it: @@ -878,39 +891,50 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - fn compute_clamp_bounds(signed: bool, int_ty: &Type) -> (u128, u128) { - let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero); - assert_eq!(rounded_min.status, Status::OK); - let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero); - assert!(rounded_max.value.is_finite()); - (rounded_min.value.to_bits(), rounded_max.value.to_bits()) - } - fn int_max(signed: bool, int_ty: &Type) -> u128 { - let shift_amount = 128 - int_ty.int_width(); + let int_max = |signed: bool, int_width: u64| -> u128 { + let shift_amount = 128 - int_width; if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } - } - fn int_min(signed: bool, int_ty: &Type) -> i128 { + }; + let int_min = |signed: bool, int_width: u64| -> i128 { if signed { - i128::MIN >> (128 - int_ty.int_width()) + i128::MIN >> (128 - int_width) } else { 0 } - } - let float_bits_to_llval = |bits| { - let bits_llval = match float_ty.float_width() { - 32 => C_u32(bx.cx, bits as u32), - 64 => C_u64(bx.cx, bits as u64), + }; + + let compute_clamp_bounds_single = + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let compute_clamp_bounds_double = + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + + let mut float_bits_to_llval = |bits| { + let bits_llval = match float_width { + 32 => bx.cx().const_u32(bits as u32), + 64 => bx.cx().const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; - consts::bitcast(bits_llval, float_ty) + bx.bitcast(bits_llval, float_ty) }; - let (f_min, f_max) = match float_ty.float_width() { - 32 => compute_clamp_bounds::(signed, int_ty), - 64 => compute_clamp_bounds::(signed, int_ty), + let (f_min, f_max) = match float_width { + 32 => compute_clamp_bounds_single(signed, int_width), + 64 => compute_clamp_bounds_double(signed, int_width), n => bug!("unsupported float width {}", n), }; let f_min = float_bits_to_llval(f_min); @@ -956,10 +980,10 @@ fn int_min(signed: bool, int_ty: &Type) -> i128 { // negation, and the negation can be merged into the select. Therefore, it not necessarily any // more expensive than a ordered ("normal") comparison. Whether these optimizations will be // performed is ultimately up to the backend, but at least x86 does perform them. - let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min); - let greater = bx.fcmp(llvm::RealOGT, x, f_max); - let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); + let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); + let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); + let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -968,7 +992,9 @@ fn int_min(signed: bool, int_ty: &Type) -> i128 { // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0)) + let zero = bx.cx().const_uint(int_ty, 0); + let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); + bx.select(cmp, s1, zero) } else { s1 } diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs similarity index 78% rename from src/librustc_codegen_llvm/mir/statement.rs rename to src/librustc_codegen_ssa/mir/statement.rs index 8bda2c98594e500e7b3844d7648c56f4a22e016a..a69474142ab58b98d945df9763b57af83dc126e2 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -10,21 +10,21 @@ use rustc::mir; -use asm; -use builder::Builder; - +use traits::BuilderMethods; use super::FunctionCx; use super::LocalRef; use super::OperandValue; +use traits::*; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_statement(&mut self, - bx: Builder<'a, 'll, 'tcx>, - statement: &mir::Statement<'tcx>) - -> Builder<'a, 'll, 'tcx> { +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_statement( + &mut self, + mut bx: Bx, + statement: &mir::Statement<'tcx> + ) -> Bx { debug!("codegen_statement(statement={:?})", statement); - self.set_debug_loc(&bx, statement.source_info); + self.set_debug_loc(&mut bx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref place, ref rvalue) => { if let mir::Place::Local(index) = *place { @@ -53,52 +53,52 @@ pub fn codegen_statement(&mut self, } } } else { - let cg_dest = self.codegen_place(&bx, place); + let cg_dest = self.codegen_place(&mut bx, place); self.codegen_rvalue(bx, cg_dest, rvalue) } } mir::StatementKind::SetDiscriminant{ref place, variant_index} => { - self.codegen_place(&bx, place) - .codegen_set_discr(&bx, variant_index); + self.codegen_place(&mut bx, place) + .codegen_set_discr(&mut bx, variant_index); bx } mir::StatementKind::StorageLive(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_live(&bx); + cg_place.storage_live(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_live(&bx); + cg_indirect_place.storage_live(&mut bx); } bx } mir::StatementKind::StorageDead(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_dead(&bx); + cg_place.storage_dead(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_dead(&bx); + cg_indirect_place.storage_dead(&mut bx); } bx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { - self.codegen_place(&bx, output) + self.codegen_place(&mut bx, output) }).collect(); let input_vals = inputs.iter() .fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| { - let op = self.codegen_operand(&bx, input); + let op = self.codegen_operand(&mut bx, input); if let OperandValue::Immediate(_) = op.val { acc.push(op.immediate()); } else { - span_err!(bx.sess(), span.to_owned(), E0669, + span_err!(bx.cx().sess(), span.to_owned(), E0669, "invalid value for constraint in inline assembly"); } acc }); if input_vals.len() == inputs.len() { - let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals); + let res = bx.codegen_inline_asm(asm, outputs, input_vals); if !res { - span_err!(bx.sess(), statement.source_info.span, E0668, + span_err!(bx.cx().sess(), statement.source_info.span, E0668, "malformed inline assembly"); } } diff --git a/src/librustc_codegen_ssa/mono_item.rs b/src/librustc_codegen_ssa/mono_item.rs new file mode 100644 index 0000000000000000000000000000000000000000..53acb3e376c774d933c0d4ed4d67478e9dcdc6da --- /dev/null +++ b/src/librustc_codegen_ssa/mono_item.rs @@ -0,0 +1,117 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Walks the crate looking for items/impl-items/trait-items that have +//! either a `rustc_symbol_name` or `rustc_item_path` attribute and +//! generates an error giving, respectively, the symbol name or +//! item-path. This is used for unit testing the code that generates +//! paths etc in all kinds of annoying scenarios. + +use base; +use rustc::hir; +use rustc::hir::def::Def; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty::layout::HasTyCtxt; +use std::fmt; +use traits::*; + +pub use rustc::mir::mono::MonoItem; + +pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; + +pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { + fn define>(&self, cx: &'a Bx::CodegenCx) { + debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + let tcx = cx.tcx(); + let is_mutable = match tcx.describe_def(def_id) { + Some(Def::Static(_, is_mutable)) => is_mutable, + Some(other) => { + bug!("Expected Def::Static, found {:?}", other) + } + None => { + bug!("Expected Def::Static for {:?}, found nothing", def_id) + } + }; + cx.codegen_static(def_id, is_mutable); + } + MonoItem::GlobalAsm(node_id) => { + let item = cx.tcx().hir.expect_item(node_id); + if let hir::ItemKind::GlobalAsm(ref ga) = item.node { + cx.codegen_global_asm(ga); + } else { + span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") + } + } + MonoItem::Fn(instance) => { + base::codegen_instance::(&cx, instance); + } + } + + debug!("END IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + } + + fn predefine>( + &self, + cx: &'a Bx::CodegenCx, + linkage: Linkage, + visibility: Visibility + ) { + debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + + let symbol_name = self.symbol_name(cx.tcx()).as_str(); + + debug!("symbol {}", &symbol_name); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + cx.predefine_static(def_id, linkage, visibility, &symbol_name); + } + MonoItem::Fn(instance) => { + cx.predefine_fn(instance, linkage, visibility, &symbol_name); + } + MonoItem::GlobalAsm(..) => {} + } + + debug!("END PREDEFINING '{} ({})' in cgu {}", + self.to_string(cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + } + + fn to_raw_string(&self) -> String { + match *self.as_mono_item() { + MonoItem::Fn(instance) => { + format!("Fn({:?}, {})", + instance.def, + instance.substs.as_ptr() as usize) + } + MonoItem::Static(id) => { + format!("Static({:?})", id) + } + MonoItem::GlobalAsm(id) => { + format!("GlobalAsm({:?})", id) + } + } + } +} + +impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} diff --git a/src/librustc_codegen_ssa/traits/abi.rs b/src/librustc_codegen_ssa/traits/abi.rs new file mode 100644 index 0000000000000000000000000000000000000000..f35eb84813f753a7777b199346e9bec81951b04f --- /dev/null +++ b/src/librustc_codegen_ssa/traits/abi.rs @@ -0,0 +1,23 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::HasCodegen; +use rustc::ty::{FnSig, Instance, Ty}; +use rustc_target::abi::call::FnType; + +pub trait AbiMethods<'tcx> { + fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>; + fn new_vtable(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>; + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>; +} + +pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> { + fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value); +} diff --git a/src/librustc_codegen_ssa/traits/asm.rs b/src/librustc_codegen_ssa/traits/asm.rs new file mode 100644 index 0000000000000000000000000000000000000000..93e4869e93733e35228f4dd2e7a965aeff1f6f54 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/asm.rs @@ -0,0 +1,28 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use super::HasCodegen; +use mir::place::PlaceRef; +use rustc::hir::{GlobalAsm, InlineAsm}; + +pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> { + // Take an inline assembly expression and splat it out via LLVM + fn codegen_inline_asm( + &mut self, + ia: &InlineAsm, + outputs: Vec>, + inputs: Vec, + ) -> bool; +} + +pub trait AsmMethods<'tcx>: Backend<'tcx> { + fn codegen_global_asm(&self, ga: &GlobalAsm); +} diff --git a/src/librustc_codegen_ssa/traits/backend.rs b/src/librustc_codegen_ssa/traits/backend.rs new file mode 100644 index 0000000000000000000000000000000000000000..b4d376cf5f0e29792700ebbac3bf56329e6eb13f --- /dev/null +++ b/src/librustc_codegen_ssa/traits/backend.rs @@ -0,0 +1,66 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::layout::{HasTyCtxt, LayoutOf, TyLayout}; +use rustc::ty::Ty; + +use super::write::WriteBackendMethods; +use super::CodegenObject; +use rustc::middle::allocator::AllocatorKind; +use rustc::middle::cstore::EncodedMetadata; +use rustc::mir::mono::Stats; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc_codegen_utils::codegen_backend::CodegenBackend; +use std::sync::Arc; +use syntax_pos::symbol::InternedString; + +pub trait BackendTypes { + type Value: CodegenObject; + type BasicBlock: Copy; + type Type: CodegenObject; + type Context; + type Funclet; + + type DIScope: Copy; +} + +pub trait Backend<'tcx>: + Sized + BackendTypes + HasTyCtxt<'tcx> + LayoutOf, TyLayout = TyLayout<'tcx>> +{ +} + +impl<'tcx, T> Backend<'tcx> for T where + Self: BackendTypes + HasTyCtxt<'tcx> + LayoutOf, TyLayout = TyLayout<'tcx>> +{} + +pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send { + fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Module; + fn write_metadata<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + metadata: &Self::Module, + ) -> EncodedMetadata; + fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Module, kind: AllocatorKind); + fn compile_codegen_unit<'a, 'tcx: 'a>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu_name: InternedString, + ) -> Stats; + // If find_features is true this won't access `sess.crate_types` by assuming + // that `is_pie_binary` is false. When we discover LLVM target features + // `sess.crate_types` is uninitialized so we cannot access it. + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool, + ) -> Arc Result + Send + Sync>; + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str; +} diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs new file mode 100644 index 0000000000000000000000000000000000000000..3757c514d2ce96f2684dc37703162c274b729726 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -0,0 +1,323 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::abi::AbiBuilderMethods; +use super::asm::AsmBuilderMethods; +use super::debuginfo::DebugInfoBuilderMethods; +use super::intrinsic::IntrinsicCallMethods; +use super::type_::ArgTypeMethods; +use super::HasCodegen; +use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; +use std::ffi::CStr; +use mir::operand::OperandRef; +use mir::place::PlaceRef; +use rustc::ty::layout::{Align, Size}; +use MemFlags; + +use std::borrow::Cow; +use std::ops::Range; +use syntax::ast::AsmDialect; + +pub trait BuilderMethods<'a, 'tcx: 'a>: + HasCodegen<'tcx> + + DebugInfoBuilderMethods<'tcx> + + ArgTypeMethods<'tcx> + + AbiBuilderMethods<'tcx> + + IntrinsicCallMethods<'tcx> + + AsmBuilderMethods<'tcx> +{ + fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self; + fn with_cx(cx: &'a Self::CodegenCx) -> Self; + fn build_sibling_block<'b>(&self, name: &'b str) -> Self; + fn cx(&self) -> &Self::CodegenCx; + fn llfn(&self) -> Self::Value; + fn llbb(&self) -> Self::BasicBlock; + fn count_insn(&self, category: &str); + + fn set_value_name(&mut self, value: Self::Value, name: &str); + fn position_at_end(&mut self, llbb: Self::BasicBlock); + fn position_at_start(&mut self, llbb: Self::BasicBlock); + fn ret_void(&mut self); + fn ret(&mut self, v: Self::Value); + fn br(&mut self, dest: Self::BasicBlock); + fn cond_br( + &mut self, + cond: Self::Value, + then_llbb: Self::BasicBlock, + else_llbb: Self::BasicBlock, + ); + fn switch( + &mut self, + v: Self::Value, + else_llbb: Self::BasicBlock, + num_cases: usize, + ) -> Self::Value; + fn invoke( + &mut self, + llfn: Self::Value, + args: &[Self::Value], + then: Self::BasicBlock, + catch: Self::BasicBlock, + funclet: Option<&Self::Funclet>, + ) -> Self::Value; + fn unreachable(&mut self); + fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn neg(&mut self, v: Self::Value) -> Self::Value; + fn fneg(&mut self, v: Self::Value) -> Self::Value; + fn not(&mut self, v: Self::Value) -> Self::Value; + + fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn array_alloca( + &mut self, + ty: Self::Type, + len: Self::Value, + name: &str, + align: Align, + ) -> Self::Value; + + fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; + fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; + fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; + fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) + -> OperandRef<'tcx, Self::Value>; + + fn range_metadata(&mut self, load: Self::Value, range: Range); + fn nonnull_metadata(&mut self, load: Self::Value); + + fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; + fn store_with_flags( + &mut self, + val: Self::Value, + ptr: Self::Value, + align: Align, + flags: MemFlags, + ) -> Self::Value; + fn atomic_store( + &mut self, + val: Self::Value, + ptr: Self::Value, + order: AtomicOrdering, + size: Size, + ); + + fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; + + fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value; + fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + + fn empty_phi(&mut self, ty: Self::Type) -> Self::Value; + fn phi( + &mut self, + ty: Self::Type, + vals: &[Self::Value], + bbs: &[Self::BasicBlock], + ) -> Self::Value; + fn inline_asm_call( + &mut self, + asm: &CStr, + cons: &CStr, + inputs: &[Self::Value], + output: Self::Type, + volatile: bool, + alignstack: bool, + dia: AsmDialect, + ) -> Option; + + fn memcpy( + &mut self, + dst: Self::Value, + dst_align: Align, + src: Self::Value, + src_align: Align, + size: Self::Value, + flags: MemFlags, + ); + fn memmove( + &mut self, + dst: Self::Value, + dst_align: Align, + src: Self::Value, + src_align: Align, + size: Self::Value, + flags: MemFlags, + ); + fn memset( + &mut self, + ptr: Self::Value, + fill_byte: Self::Value, + size: Self::Value, + align: Align, + flags: MemFlags, + ); + + fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; + fn select( + &mut self, + cond: Self::Value, + then_val: Self::Value, + else_val: Self::Value, + ) -> Self::Value; + + fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; + fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; + fn insert_element( + &mut self, + vec: Self::Value, + elt: Self::Value, + idx: Self::Value, + ) -> Self::Value; + fn shuffle_vector( + &mut self, + v1: Self::Value, + v2: Self::Value, + mask: Self::Value, + ) -> Self::Value; + fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; + fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; + fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; + fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value; + fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; + fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; + fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; + fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; + + fn landing_pad( + &mut self, + ty: Self::Type, + pers_fn: Self::Value, + num_clauses: usize, + ) -> Self::Value; + fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value); + fn set_cleanup(&mut self, landing_pad: Self::Value); + fn resume(&mut self, exn: Self::Value) -> Self::Value; + fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; + fn cleanup_ret( + &mut self, + funclet: &Self::Funclet, + unwind: Option, + ) -> Self::Value; + fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; + fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value; + fn catch_switch( + &mut self, + parent: Option, + unwind: Option, + num_handlers: usize, + ) -> Self::Value; + fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); + fn set_personality_fn(&mut self, personality: Self::Value); + + fn atomic_cmpxchg( + &mut self, + dst: Self::Value, + cmp: Self::Value, + src: Self::Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, + ) -> Self::Value; + fn atomic_rmw( + &mut self, + op: AtomicRmwBinOp, + dst: Self::Value, + src: Self::Value, + order: AtomicOrdering, + ) -> Self::Value; + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); + fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); + fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); + fn set_invariant_load(&mut self, load: Self::Value); + + /// Returns the ptr value that should be used for storing `val`. + fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value; + + /// Returns the args that should be used for a call to `llfn`. + fn check_call<'b>( + &mut self, + typ: &str, + llfn: Self::Value, + args: &'b [Self::Value], + ) -> Cow<'b, [Self::Value]> + where + [Self::Value]: ToOwned; + fn lifetime_start(&mut self, ptr: Self::Value, size: Size); + fn lifetime_end(&mut self, ptr: Self::Value, size: Size); + + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targeted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: Self::Value, size: Size); + + fn call( + &mut self, + llfn: Self::Value, + args: &[Self::Value], + funclet: Option<&Self::Funclet>, + ) -> Self::Value; + fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; + + unsafe fn delete_basic_block(&mut self, bb: Self::BasicBlock); + fn do_not_inline(&mut self, llret: Self::Value); +} diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs new file mode 100644 index 0000000000000000000000000000000000000000..c0a5445219565cb86e7fab49ec48be0ae175a41d --- /dev/null +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -0,0 +1,64 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use mir::place::PlaceRef; +use rustc::mir::interpret::Allocation; +use rustc::mir::interpret::Scalar; +use rustc::ty::layout; +use syntax::symbol::LocalInternedString; + +pub trait ConstMethods<'tcx>: Backend<'tcx> { + // Constant constructors + + fn const_null(&self, t: Self::Type) -> Self::Value; + fn const_undef(&self, t: Self::Type) -> Self::Value; + fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; + fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value; + fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; + fn const_bool(&self, val: bool) -> Self::Value; + fn const_i32(&self, i: i32) -> Self::Value; + fn const_u32(&self, i: u32) -> Self::Value; + fn const_u64(&self, i: u64) -> Self::Value; + fn const_usize(&self, i: u64) -> Self::Value; + fn const_u8(&self, i: u8) -> Self::Value; + + // This is a 'c-like' raw string, which differs from + // our boxed-and-length-annotated strings. + fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value; + + fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; + fn const_fat_ptr(&self, ptr: Self::Value, meta: Self::Value) -> Self::Value; + fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value; + fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; + fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; + fn const_bytes(&self, bytes: &[u8]) -> Self::Value; + + fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; + fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; + fn const_to_uint(&self, v: Self::Value) -> u64; + fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; + + fn is_const_integral(&self, v: Self::Value) -> bool; + fn is_const_real(&self, v: Self::Value) -> bool; + + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: Self::Type, + ) -> Self::Value; + fn from_const_alloc( + &self, + layout: layout::TyLayout<'tcx>, + alloc: &Allocation, + offset: layout::Size, + ) -> PlaceRef<'tcx, Self::Value>; +} diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs new file mode 100644 index 0000000000000000000000000000000000000000..643776fcd64f4ee0bb416dec17606e38d0f89d7e --- /dev/null +++ b/src/librustc_codegen_ssa/traits/debuginfo.rs @@ -0,0 +1,72 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use super::HasCodegen; +use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; +use rustc::hir::def_id::CrateNum; +use rustc::mir; +use rustc::ty::{self, Ty}; +use rustc_data_structures::indexed_vec::IndexVec; +use rustc_mir::monomorphize::Instance; +use syntax::ast::Name; +use syntax_pos::{SourceFile, Span}; + +pub trait DebugInfoMethods<'tcx>: Backend<'tcx> { + fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value); + + /// Creates the function-specific debug context. + /// + /// Returns the FunctionDebugContext for the function which holds state needed + /// for debug info creation. The function may also return another variant of the + /// FunctionDebugContext enum which indicates why no debuginfo should be created + /// for the function. + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: Self::Value, + mir: &mir::Mir, + ) -> FunctionDebugContext; + + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext, + ) -> IndexVec>; + fn extend_scope_to_file( + &self, + scope_metadata: Self::DIScope, + file: &SourceFile, + defining_crate: CrateNum, + ) -> Self::DIScope; + fn debuginfo_finalize(&self); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4]; +} + +pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> { + fn declare_local( + &mut self, + dbg_context: &FunctionDebugContext, + variable_name: Name, + variable_type: Ty<'tcx>, + scope_metadata: Self::DIScope, + variable_access: VariableAccess<'_, Self::Value>, + variable_kind: VariableKind, + span: Span, + ); + fn set_source_location( + &mut self, + debug_context: &FunctionDebugContext, + scope: Option, + span: Span, + ); + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); +} diff --git a/src/librustc_codegen_ssa/traits/declare.rs b/src/librustc_codegen_ssa/traits/declare.rs new file mode 100644 index 0000000000000000000000000000000000000000..38ef52e3c8e001762e5b5e4b2562202114899d44 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/declare.rs @@ -0,0 +1,89 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use rustc::hir::def_id::DefId; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty; +use rustc_mir::monomorphize::Instance; + +pub trait DeclareMethods<'tcx>: Backend<'tcx> { + /// Declare a global value. + /// + /// If there’s a value with the same name already declared, the function will + /// return its Value instead. + fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value; + + /// Declare a C ABI function. + /// + /// Only use this for foreign function ABIs and glue. For Rust functions use + /// `declare_fn` instead. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_cfn(&self, name: &str, fn_type: Self::Type) -> Self::Value; + + /// Declare a Rust function. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_fn(&self, name: &str, sig: ty::PolyFnSig<'tcx>) -> Self::Value; + + /// Declare a global with an intention to define it. + /// + /// Use this function when you intend to define a global. This function will + /// return None if the name already has a definition associated with it. In that + /// case an error should be reported to the user, because it usually happens due + /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). + fn define_global(&self, name: &str, ty: Self::Type) -> Option; + + /// Declare a private global + /// + /// Use this function when you intend to define a global without a name. + fn define_private_global(&self, ty: Self::Type) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_internal_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value; + + /// Get declared value by name. + fn get_declared_value(&self, name: &str) -> Option; + + /// Get defined or externally defined (AvailableExternally linkage) value by + /// name. + fn get_defined_value(&self, name: &str) -> Option; +} + +pub trait PreDefineMethods<'tcx>: Backend<'tcx> { + fn predefine_static( + &self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str, + ); + fn predefine_fn( + &self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str, + ); +} diff --git a/src/librustc_codegen_ssa/traits/intrinsic.rs b/src/librustc_codegen_ssa/traits/intrinsic.rs new file mode 100644 index 0000000000000000000000000000000000000000..53a7878796b3112982d2a746adeb9e48122b84d7 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/intrinsic.rs @@ -0,0 +1,37 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use super::HasCodegen; +use mir::operand::OperandRef; +use rustc::ty::Ty; +use rustc_target::abi::call::FnType; +use syntax_pos::Span; + +pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> { + /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, + /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, + /// add them to librustc_codegen_llvm/context.rs + fn codegen_intrinsic_call( + &mut self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, Self::Value>], + llresult: Self::Value, + span: Span, + ); +} + +pub trait IntrinsicDeclarationMethods<'tcx>: Backend<'tcx> { + fn get_intrinsic(&self, key: &str) -> Self::Value; + + /// Declare any llvm intrinsics that you might need + fn declare_intrinsic(&self, key: &str) -> Option; +} diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs new file mode 100644 index 0000000000000000000000000000000000000000..0425b8e8e23b1dd0d849773ab5fa7f66fa4d69b5 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -0,0 +1,41 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use libc::c_uint; +use rustc::mir::mono::Stats; +use rustc::session::Session; +use rustc::ty::{self, Instance, Ty}; +use rustc::util::nodemap::FxHashMap; +use rustc_mir::monomorphize::partitioning::CodegenUnit; +use std::cell::RefCell; +use std::sync::Arc; + +pub trait MiscMethods<'tcx>: Backend<'tcx> { + fn vtables( + &self, + ) -> &RefCell, ty::PolyExistentialTraitRef<'tcx>), Self::Value>>; + fn check_overflow(&self) -> bool; + fn instances(&self) -> &RefCell, Self::Value>>; + fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; + fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; + fn eh_personality(&self) -> Self::Value; + fn eh_unwind_resume(&self) -> Self::Value; + fn sess(&self) -> &Session; + fn stats(&self) -> &RefCell; + fn consume_stats(self) -> RefCell; + fn codegen_unit(&self) -> &Arc>; + fn statics_to_rauw(&self) -> &RefCell>; + fn closure_env_needs_indirect_debuginfo(&self) -> bool; + fn used_statics(&self) -> &RefCell>; + fn set_frame_pointer_elimination(&self, llfn: Self::Value); + fn apply_target_cpu_attr(&self, llfn: Self::Value); + fn create_used_variable(&self); +} diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..5cff31e17b5bc7abdd2f603c48ee2f6e717c665a --- /dev/null +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -0,0 +1,99 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Interface of a Rust codegen backend +//! +//! This crate defines all the traits that have to be implemented by a codegen backend in order to +//! use the backend-agnostic codegen code in `rustc_codegen_ssa`. +//! +//! The interface is designed around two backend-specific data structures, the codegen context and +//! the builder. The codegen context is supposed to be read-only after its creation and during the +//! actual codegen, while the builder stores the information about the function during codegen and +//! is used to produce the instructions of the backend IR. +//! +//! Finaly, a third `Backend` structure has to implement methods related to how codegen information +//! is passed to the backend, especially for asynchronous compilation. +//! +//! The traits contain associated types that are backend-specific, such as the backend's value or +//! basic blocks. + +mod abi; +mod asm; +mod backend; +mod builder; +mod consts; +mod debuginfo; +mod declare; +mod intrinsic; +mod misc; +mod statics; +mod type_; +mod write; + +pub use self::abi::{AbiBuilderMethods, AbiMethods}; +pub use self::asm::{AsmBuilderMethods, AsmMethods}; +pub use self::backend::{Backend, BackendTypes, ExtraBackendMethods}; +pub use self::builder::BuilderMethods; +pub use self::consts::ConstMethods; +pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods}; +pub use self::declare::{DeclareMethods, PreDefineMethods}; +pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; +pub use self::misc::MiscMethods; +pub use self::statics::StaticMethods; +pub use self::type_::{ + ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods, +}; +pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; + +use std::fmt; + +pub trait CodegenObject: Copy + PartialEq + fmt::Debug {} +impl CodegenObject for T {} + +pub trait CodegenMethods<'tcx>: + Backend<'tcx> + + TypeMethods<'tcx> + + MiscMethods<'tcx> + + ConstMethods<'tcx> + + StaticMethods<'tcx> + + DebugInfoMethods<'tcx> + + AbiMethods<'tcx> + + IntrinsicDeclarationMethods<'tcx> + + DeclareMethods<'tcx> + + AsmMethods<'tcx> + + PreDefineMethods<'tcx> +{ +} + +impl<'tcx, T> CodegenMethods<'tcx> for T where + Self: Backend<'tcx> + + TypeMethods<'tcx> + + MiscMethods<'tcx> + + ConstMethods<'tcx> + + StaticMethods<'tcx> + + DebugInfoMethods<'tcx> + + AbiMethods<'tcx> + + IntrinsicDeclarationMethods<'tcx> + + DeclareMethods<'tcx> + + AsmMethods<'tcx> + + PreDefineMethods<'tcx> +{} + +pub trait HasCodegen<'tcx>: Backend<'tcx> { + type CodegenCx: CodegenMethods<'tcx> + + BackendTypes< + Value = Self::Value, + BasicBlock = Self::BasicBlock, + Type = Self::Type, + Context = Self::Context, + Funclet = Self::Funclet, + DIScope = Self::DIScope, + >; +} diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs new file mode 100644 index 0000000000000000000000000000000000000000..172c48f8a85ffcbd6f3a929806d6f358a0ddd58c --- /dev/null +++ b/src/librustc_codegen_ssa/traits/statics.rs @@ -0,0 +1,23 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use rustc::hir::def_id::DefId; +use rustc::ty::layout::Align; + +pub trait StaticMethods<'tcx>: Backend<'tcx> { + fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn static_addr_of_mut(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value; + fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value; + fn get_static(&self, def_id: DefId) -> Self::Value; + fn codegen_static(&self, def_id: DefId, is_mutable: bool); + unsafe fn static_replace_all_uses(&self, old_g: Self::Value, new_g: Self::Value); +} diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs new file mode 100644 index 0000000000000000000000000000000000000000..1aa1f45f51746a28599100d37b13535d38e99644 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -0,0 +1,204 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::misc::MiscMethods; +use super::Backend; +use super::HasCodegen; +use common::{self, TypeKind}; +use mir::place::PlaceRef; +use rustc::ty::layout::{self, Align, Size, TyLayout}; +use rustc::ty::{self, Ty}; +use rustc::util::nodemap::FxHashMap; +use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; +use std::cell::RefCell; +use syntax::ast; + +pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { + fn type_void(&self) -> Self::Type; + fn type_metadata(&self) -> Self::Type; + fn type_i1(&self) -> Self::Type; + fn type_i8(&self) -> Self::Type; + fn type_i16(&self) -> Self::Type; + fn type_i32(&self) -> Self::Type; + fn type_i64(&self) -> Self::Type; + fn type_i128(&self) -> Self::Type; + + // Creates an integer type with the given number of bits, e.g. i24 + fn type_ix(&self, num_bits: u64) -> Self::Type; + fn type_isize(&self) -> Self::Type; + + fn type_f32(&self) -> Self::Type; + fn type_f64(&self) -> Self::Type; + fn type_x86_mmx(&self) -> Self::Type; + + fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; + fn type_named_struct(&self, name: &str) -> Self::Type; + fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_kind(&self, ty: Self::Type) -> TypeKind; + fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool); + fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; + fn element_type(&self, ty: Self::Type) -> Self::Type; + + /// Return the number of elements in `self` if it is a LLVM vector type. + fn vector_length(&self, ty: Self::Type) -> usize; + + fn func_params_types(&self, ty: Self::Type) -> Vec; + fn float_width(&self, ty: Self::Type) -> usize; + + /// Retrieve the bit width of the integer type `self`. + fn int_width(&self, ty: Self::Type) -> u64; + + fn val_ty(&self, v: Self::Value) -> Self::Type; + fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; +} + +pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { + fn type_bool(&self) -> Self::Type { + self.type_i8() + } + + fn type_i8p(&self) -> Self::Type { + self.type_ptr_to(self.type_i8()) + } + + fn type_int(&self) -> Self::Type { + match &self.sess().target.target.target_c_int_width[..] { + "16" => self.type_i16(), + "32" => self.type_i32(), + "64" => self.type_i64(), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + + fn type_int_from_ty(&self, t: ast::IntTy) -> Self::Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + + fn type_uint_from_ty(&self, t: ast::UintTy) -> Self::Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + + fn type_float_from_ty(&self, t: ast::FloatTy) -> Self::Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + fn type_from_integer(&self, i: layout::Integer) -> Self::Type { + use rustc::ty::layout::Integer::*; + match i { + I8 => self.type_i8(), + I16 => self.type_i16(), + I32 => self.type_i32(), + I64 => self.type_i64(), + I128 => self.type_i128(), + } + } + + fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_abi_align(self, align); + self.type_from_integer(ity) + } + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. + fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { + let unit = layout::Integer::approximate_abi_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + common::type_needs_drop(self.tcx(), ty) + } + + fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + common::type_is_sized(self.tcx(), ty) + } + + fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { + common::type_is_freeze(self.tcx(), ty) + } + + fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { + use syntax_pos::DUMMY_SP; + if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { + return false; + } + + let tail = self.tcx().struct_tail(ty); + match tail.sty { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail.sty), + } + } +} + +impl DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {} + +pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> { + fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; + fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; + fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; + fn reg_backend_type(&self, ty: &Reg) -> Self::Type; + fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; + fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool; + fn is_backend_scalar_pair(&self, layout: TyLayout<'tcx>) -> bool; + fn backend_field_index(&self, layout: TyLayout<'tcx>, index: usize) -> u64; + fn scalar_pair_element_backend_type<'a>( + &self, + layout: TyLayout<'tcx>, + index: usize, + immediate: bool, + ) -> Self::Type; +} + +pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> { + fn store_fn_arg( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, + dst: PlaceRef<'tcx, Self::Value>, + ); + fn store_arg_ty( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: Self::Value, + dst: PlaceRef<'tcx, Self::Value>, + ); + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> Self::Type; +} + +pub trait TypeMethods<'tcx>: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} + +impl TypeMethods<'tcx> for T where Self: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {} diff --git a/src/librustc_codegen_ssa/traits/write.rs b/src/librustc_codegen_ssa/traits/write.rs new file mode 100644 index 0000000000000000000000000000000000000000..72522e19af21083aa13dadf365ea53c6f486c073 --- /dev/null +++ b/src/librustc_codegen_ssa/traits/write.rs @@ -0,0 +1,72 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use back::write::{CodegenContext, ModuleConfig}; +use {CompiledModule, ModuleCodegen}; + +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use rustc_errors::{FatalError, Handler}; + +pub trait WriteBackendMethods: 'static + Sized + Clone { + type Module: Send + Sync; + type TargetMachine; + type ModuleBuffer: ModuleBufferMethods; + type Context: ?Sized; + type ThinData: Send + Sync; + type ThinBuffer: ThinBufferMethods; + + /// Performs LTO, which in the case of full LTO means merging all modules into + /// a single one and returning it for further optimizing. For ThinLTO, it will + /// do the global analysis necessary and return two lists, one of the modules + /// the need optimization and another for modules that can simply be copied over + /// from the incr. comp. cache. + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline, + ) -> Result<(Vec>, Vec), FatalError>; + fn print_pass_timings(&self); + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline, + ) -> Result<(), FatalError>; + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline, + ) -> Result, FatalError>; + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline, + ) -> Result; + fn run_lto_pass_manager( + cgcx: &CodegenContext, + llmod: &ModuleCodegen, + config: &ModuleConfig, + thin: bool, + ); +} + +pub trait ThinBufferMethods: Send + Sync { + fn data(&self) -> &[u8]; +} + +pub trait ModuleBufferMethods: Send + Sync { + fn data(&self) -> &[u8]; +} diff --git a/src/librustc_codegen_utils/Cargo.toml b/src/librustc_codegen_utils/Cargo.toml index 4c57e97841409a0d8a344eb797b823ce87177510..34a09f30b641162e1ed5541673832dd686935634 100644 --- a/src/librustc_codegen_utils/Cargo.toml +++ b/src/librustc_codegen_utils/Cargo.toml @@ -13,11 +13,9 @@ test = false flate2 = "1.0" log = "0.4" -serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } rustc = { path = "../librustc" } -rustc_allocator = { path = "../librustc_allocator" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_metadata = { path = "../librustc_metadata" } diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index f0ce1e9b0efab2021377b8f52f57b2e18f724b47..c3edbb633c72298c084c3d353d530eb6416ba068 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -30,10 +30,8 @@ #[macro_use] extern crate log; -extern crate serialize; #[macro_use] extern crate rustc; -extern crate rustc_allocator; extern crate rustc_target; extern crate rustc_metadata; extern crate rustc_mir; @@ -42,16 +40,10 @@ extern crate syntax_pos; #[macro_use] extern crate rustc_data_structures; -use std::path::PathBuf; - -use rustc::session::Session; use rustc::ty::TyCtxt; -pub mod command; pub mod link; -pub mod linker; pub mod codegen_backend; -pub mod symbol_export; pub mod symbol_names; pub mod symbol_names_test; @@ -69,43 +61,4 @@ pub fn check_for_rustc_errors_attr(tcx: TyCtxt) { } } -#[derive(Copy, Clone, Debug, PartialEq)] -pub enum ModuleKind { - Regular, - Metadata, - Allocator, -} - -#[derive(Debug)] -pub struct CompiledModule { - pub name: String, - pub kind: ModuleKind, - pub object: Option, - pub bytecode: Option, - pub bytecode_compressed: Option, -} - -pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) - -> PathBuf { - // On Windows, static libraries sometimes show up as libfoo.a and other - // times show up as foo.lib - let oslibname = format!("{}{}{}", - sess.target.target.options.staticlib_prefix, - name, - sess.target.target.options.staticlib_suffix); - let unixlibname = format!("lib{}.a", name); - - for path in search_paths { - debug!("looking for {} inside {:?}", name, path); - let test = path.join(&oslibname); - if test.exists() { return test } - if oslibname != unixlibname { - let test = path.join(&unixlibname); - if test.exists() { return test } - } - } - sess.fatal(&format!("could not find native static library `{}`, \ - perhaps an -L flag is missing?", name)); -} - __build_diagnostic_array! { librustc_codegen_utils, DIAGNOSTICS }