提交 937e8da3 编写于 作者: M Mark Simulacrum

Purge FunctionContext

上级 1be170b0
......@@ -54,7 +54,7 @@
use common::{C_bool, C_bytes_in_context, C_i32, C_uint};
use collector::{self, TransItemCollectionMode};
use common::{C_struct_in_context, C_u64, C_undef};
use common::{CrateContext, FunctionContext};
use common::CrateContext;
use common::{fulfill_obligation};
use common::{type_is_zero_size, val_ty};
use common;
......@@ -590,18 +590,17 @@ pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
let fcx = FunctionContext::new(ccx, lldecl);
let mir = ccx.tcx().item_mir(instance.def);
mir::trans_mir(&fcx, fn_ty, &mir, instance, &sig, abi);
mir::trans_mir(ccx, lldecl, fn_ty, &mir, instance, &sig, abi);
}
pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
disr: Disr,
llfndecl: ValueRef) {
attributes::inline(llfndecl, attributes::InlineAttr::Hint);
attributes::set_frame_pointer_elimination(ccx, llfndecl);
llfn: ValueRef) {
attributes::inline(llfn, attributes::InlineAttr::Hint);
attributes::set_frame_pointer_elimination(ccx, llfn);
let ctor_ty = ccx.tcx().item_type(def_id);
let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty);
......@@ -609,13 +608,12 @@ pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig());
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
let fcx = FunctionContext::new(ccx, llfndecl);
let bcx = fcx.get_entry_block();
let bcx = Builder::entry_block(ccx, llfn);
if !fn_ty.ret.is_ignore() {
// But if there are no nested returns, we skip the indirection
// and have a single retslot
let dest = if fn_ty.ret.is_indirect() {
get_param(fcx.llfn, 0)
get_param(llfn, 0)
} else {
// We create an alloca to hold a pointer of type `ret.original_ty`
// which will hold the pointer to the right alloca which has the
......
......@@ -50,6 +50,10 @@ fn noname() -> *const c_char {
}
impl<'a, 'tcx> Builder<'a, 'tcx> {
pub fn entry_block(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef) -> Self {
Builder::new_block(ccx, llfn, "entry-block")
}
pub fn new_block<'b>(ccx: &'a CrateContext<'a, 'tcx>, llfn: ValueRef, name: &'b str) -> Self {
let builder = Builder::with_ccx(ccx);
let llbb = unsafe {
......
......@@ -23,9 +23,9 @@
use abi::{Abi, FnType};
use attributes;
use base;
use common::{
self, CrateContext, FunctionContext, SharedCrateContext
};
use builder::Builder;
use common::{self, CrateContext, SharedCrateContext};
use cleanup::CleanupScope;
use adt::MaybeSizedValue;
use consts;
use declare;
......@@ -329,8 +329,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
attributes::set_frame_pointer_elimination(ccx, lloncefn);
let orig_fn_ty = fn_ty;
let fcx = FunctionContext::new(ccx, lloncefn);
let mut bcx = fcx.get_entry_block();
let mut bcx = Builder::entry_block(ccx, lloncefn);
let callee = Callee {
data: Fn(llreffn),
......@@ -339,7 +338,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
// the first argument (`self`) will be the (by value) closure env.
let mut llargs = get_params(fcx.llfn);
let mut llargs = get_params(lloncefn);
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(bcx.ccx, &[]);
let self_idx = fn_ty.ret.is_indirect() as usize;
......@@ -364,7 +363,9 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
// Call the by-ref closure body with `self` in a cleanup scope,
// to drop `self` when the body returns, or in case it unwinds.
let self_scope = fcx.schedule_drop_mem(&bcx, MaybeSizedValue::sized(llenv), closure_ty);
let self_scope = CleanupScope::schedule_drop_mem(
&bcx, MaybeSizedValue::sized(llenv), closure_ty
);
let llfn = callee.reify(bcx.ccx);
let llret;
......@@ -488,10 +489,9 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
let llfn = declare::define_internal_fn(ccx, &function_name, tuple_fn_ty);
attributes::set_frame_pointer_elimination(ccx, llfn);
//
let fcx = FunctionContext::new(ccx, llfn);
let bcx = fcx.get_entry_block();
let bcx = Builder::entry_block(ccx, llfn);
let mut llargs = get_params(fcx.llfn);
let mut llargs = get_params(llfn);
let self_arg = llargs.remove(fn_ty.ret.is_indirect() as usize);
let llfnpointer = llfnpointer.unwrap_or_else(|| {
......
......@@ -22,7 +22,7 @@
use base;
use adt::MaybeSizedValue;
use builder::Builder;
use common::{FunctionContext, Funclet};
use common::Funclet;
use glue;
use type_::Type;
use rustc::ty::Ty;
......@@ -93,12 +93,12 @@ fn get_landing_pad<'a>(&self, bcx: &Builder<'a, 'tcx>) -> BasicBlockRef {
}
}
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
impl<'a, 'tcx> CleanupScope<'tcx> {
/// Schedules a (deep) drop of `val`, which is a pointer to an instance of `ty`
pub fn schedule_drop_mem(
&self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
) -> CleanupScope<'tcx> {
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
let drop = DropValue {
val: val,
ty: ty,
......@@ -114,11 +114,11 @@ pub fn schedule_drop_mem(
/// and dropping the contents associated with that variant
/// *without* executing any associated drop implementation.
pub fn schedule_drop_adt_contents(
&self, bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
bcx: &Builder<'a, 'tcx>, val: MaybeSizedValue, ty: Ty<'tcx>
) -> CleanupScope<'tcx> {
// `if` below could be "!contents_needs_drop"; skipping drop
// is just an optimization, so sound to be conservative.
if !self.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
if !bcx.ccx.shared().type_needs_drop(ty) { return CleanupScope::noop(); }
let drop = DropValue {
val: val,
......@@ -128,10 +128,8 @@ pub fn schedule_drop_adt_contents(
CleanupScope::new(bcx, drop)
}
}
impl<'tcx> CleanupScope<'tcx> {
fn new<'a>(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
fn new(bcx: &Builder<'a, 'tcx>, drop_val: DropValue<'tcx>) -> CleanupScope<'tcx> {
CleanupScope {
cleanup: Some(drop_val),
landing_pad: if !bcx.ccx.sess().no_landing_pads() {
......@@ -149,7 +147,7 @@ pub fn noop() -> CleanupScope<'tcx> {
}
}
pub fn trans<'a>(self, bcx: &'a Builder<'a, 'tcx>) {
pub fn trans(self, bcx: &'a Builder<'a, 'tcx>) {
if let Some(cleanup) = self.cleanup {
cleanup.trans(None, &bcx);
}
......
......@@ -13,7 +13,7 @@
//! Code that is useful in various trans modules.
use llvm;
use llvm::{ValueRef, BasicBlockRef, ContextRef, TypeKind};
use llvm::{ValueRef, ContextRef, TypeKind};
use llvm::{True, False, Bool, OperandBundleDef};
use rustc::hir::def::Def;
use rustc::hir::def_id::DefId;
......@@ -36,7 +36,6 @@
use libc::{c_uint, c_char};
use std::borrow::Cow;
use std::iter;
use std::ffi::CString;
use syntax::ast;
use syntax::symbol::{Symbol, InternedString};
......@@ -219,71 +218,6 @@ pub fn from_ty(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
// Function context. Every LLVM function we create will have one of these.
pub struct FunctionContext<'a, 'tcx: 'a> {
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
// address of the first instruction in the sequence of
// instructions for this function that will go in the .text
// section of the executable we're generating.
pub llfn: ValueRef,
// A marker for the place where we want to insert the function's static
// allocas, so that LLVM will coalesce them into a single alloca call.
alloca_insert_pt: Option<ValueRef>,
// This function's enclosing crate context.
pub ccx: &'a CrateContext<'a, 'tcx>,
}
impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
/// Create a function context for the given function.
/// Call FunctionContext::get_entry_block for the first entry block.
pub fn new(ccx: &'a CrateContext<'a, 'tcx>, llfndecl: ValueRef) -> FunctionContext<'a, 'tcx> {
let mut fcx = FunctionContext {
llfn: llfndecl,
alloca_insert_pt: None,
ccx: ccx,
};
let entry_bcx = Builder::new_block(fcx.ccx, fcx.llfn, "entry-block");
entry_bcx.position_at_start(entry_bcx.llbb());
// Use a dummy instruction as the insertion point for all allocas.
// This is later removed in the drop of FunctionContext.
fcx.alloca_insert_pt = Some(entry_bcx.load(C_null(Type::i8p(ccx))));
fcx
}
pub fn new_block(&self, name: &str) -> BasicBlockRef {
unsafe {
let name = CString::new(name).unwrap();
llvm::LLVMAppendBasicBlockInContext(
self.ccx.llcx(),
self.llfn,
name.as_ptr()
)
}
}
pub fn build_new_block(&self, name: &str) -> Builder<'a, 'tcx> {
Builder::new_block(self.ccx, self.llfn, name)
}
pub fn get_entry_block(&'a self) -> Builder<'a, 'tcx> {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_end(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn) });
builder
}
}
impl<'a, 'tcx> Drop for FunctionContext<'a, 'tcx> {
fn drop(&mut self) {
unsafe {
llvm::LLVMInstructionEraseFromParent(self.alloca_insert_pt.unwrap());
}
}
}
/// A structure representing an active landing pad for the duration of a basic
/// block.
///
......
......@@ -14,7 +14,7 @@
use llvm;
use llvm::debuginfo::{DIScope, DISubprogram};
use common::{CrateContext, FunctionContext};
use common::CrateContext;
use rustc::mir::{Mir, VisibilityScope};
use libc::c_uint;
......@@ -44,7 +44,7 @@ pub fn is_valid(&self) -> bool {
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
/// If debuginfo is disabled, the returned vector is empty.
pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &FunctionDebugContext)
pub fn create_mir_scopes(ccx: &CrateContext, mir: &Mir, debug_context: &FunctionDebugContext)
-> IndexVec<VisibilityScope, MirDebugScope> {
let null_scope = MirDebugScope {
scope_metadata: ptr::null_mut(),
......@@ -71,7 +71,7 @@ pub fn create_mir_scopes(fcx: &FunctionContext, mir: &Mir, debug_context: &Funct
// Instantiate all scopes.
for idx in 0..mir.visibility_scopes.len() {
let scope = VisibilityScope::new(idx);
make_mir_scope(fcx.ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes);
make_mir_scope(ccx, &mir, &has_variables, fn_metadata, scope, &mut scopes);
}
scopes
......
......@@ -45,7 +45,7 @@
//!
//! All private state used by the module is stored within either the
//! CrateDebugContext struct (owned by the CrateContext) or the
//! FunctionDebugContext (owned by the FunctionContext).
//! FunctionDebugContext (owned by the MirContext).
//!
//! This file consists of three conceptual sections:
//! 1. The public interface of the module
......
......@@ -25,6 +25,7 @@
use adt::{self, MaybeSizedValue};
use base::*;
use callee::Callee;
use cleanup::CleanupScope;
use common::*;
use machine::*;
use monomorphize;
......@@ -34,7 +35,6 @@
use type_::Type;
use value::Value;
use Disr;
use cleanup::CleanupScope;
use builder::Builder;
use syntax_pos::DUMMY_SP;
......@@ -174,8 +174,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
assert_eq!(g.ty(), get_drop_glue_type(ccx.shared(), g.ty()));
let (llfn, _) = ccx.drop_glues().borrow().get(&g).unwrap().clone();
let fcx = FunctionContext::new(ccx, llfn);
let mut bcx = fcx.get_entry_block();
let mut bcx = Builder::entry_block(ccx, llfn);
ccx.stats().n_glues_created.set(ccx.stats().n_glues_created.get() + 1);
// All glue functions take values passed *by alias*; this is a
......@@ -246,7 +245,7 @@ pub fn implement_drop_glue<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, g: DropGlueKi
// Issue #23611: schedule cleanup of contents, re-inspecting the
// discriminant (if any) in case of variant swap in drop code.
let contents_scope = if !shallow_drop {
fcx.schedule_drop_adt_contents(&bcx, ptr, t)
CleanupScope::schedule_drop_adt_contents(&bcx, ptr, t)
} else {
CleanupScope::noop()
};
......
......@@ -89,7 +89,6 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
llargs: &[ValueRef],
......@@ -127,7 +126,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None)
}
"try" => {
try_intrinsic(bcx, fcx, llargs[0], llargs[1], llargs[2], llresult);
try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult);
C_nil(ccx)
}
"breakpoint" => {
......@@ -689,7 +688,7 @@ fn memset_intrinsic<'a, 'tcx>(
fn try_intrinsic<'a, 'tcx>(
bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
ccx: &CrateContext,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
......@@ -701,7 +700,7 @@ fn try_intrinsic<'a, 'tcx>(
} else if wants_msvc_seh(bcx.sess()) {
trans_msvc_try(bcx, fcx, func, data, local_ptr, dest);
} else {
trans_gnu_try(bcx, fcx, func, data, local_ptr, dest);
trans_gnu_try(bcx, ccx, func, data, local_ptr, dest);
}
}
......@@ -713,12 +712,12 @@ fn try_intrinsic<'a, 'tcx>(
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
ccx: &CrateContext,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef) {
let llfn = get_rust_try_fn(fcx, &mut |bcx| {
let llfn = get_rust_try_fn(ccx, &mut |bcx| {
let ccx = bcx.ccx;
bcx.set_personality_fn(bcx.ccx.eh_personality());
......@@ -817,12 +816,12 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
fcx: &FunctionContext,
ccx: &CrateContext,
func: ValueRef,
data: ValueRef,
local_ptr: ValueRef,
dest: ValueRef) {
let llfn = get_rust_try_fn(fcx, &mut |bcx| {
let llfn = get_rust_try_fn(ccx, &mut |bcx| {
let ccx = bcx.ccx;
// Translates the shims described above:
......@@ -874,13 +873,12 @@ fn trans_gnu_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
// Helper function to give a Block to a closure to translate a shim function.
// This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
fn gen_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
name: &str,
inputs: Vec<Ty<'tcx>>,
output: Ty<'tcx>,
trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
let sig = ccx.tcx().mk_fn_sig(inputs.into_iter(), output, false);
let rust_fn_ty = ccx.tcx().mk_fn_ptr(ccx.tcx().mk_bare_fn(ty::BareFnTy {
......@@ -889,8 +887,8 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
sig: ty::Binder(sig)
}));
let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
let fcx = FunctionContext::new(ccx, llfn);
trans(fcx.get_entry_block());
let bcx = Builder::entry_block(ccx, llfn);
trans(bcx);
llfn
}
......@@ -898,10 +896,9 @@ fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// catch exceptions.
//
// This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
fn get_rust_try_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
trans: &mut for<'b> FnMut(Builder<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
if let Some(llfn) = ccx.rust_try_fn().get() {
return llfn;
}
......@@ -915,7 +912,7 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
sig: ty::Binder(tcx.mk_fn_sig(iter::once(i8p), tcx.mk_nil(), false)),
}));
let output = tcx.types.i32;
let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
let rust_try = gen_fn(ccx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
ccx.rust_try_fn().set(Some(rust_try));
return rust_try
}
......
......@@ -76,10 +76,9 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
let llfn = declare::define_internal_fn(ccx, &function_name, callee.ty);
attributes::set_frame_pointer_elimination(ccx, llfn);
let fcx = FunctionContext::new(ccx, llfn);
let bcx = fcx.get_entry_block();
let bcx = Builder::entry_block(ccx, llfn);
let mut llargs = get_params(fcx.llfn);
let mut llargs = get_params(llfn);
let fn_ret = callee.ty.fn_ret();
let fn_ty = callee.direct_fn_type(ccx, &[]);
......
......@@ -302,7 +302,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, &bcx, target);
let panic_block = self.fcx.build_new_block("panic");
let panic_block = bcx.build_new_block("panic");
if expected {
bcx.cond_br(cond, lltarget, panic_block.llbb());
} else {
......@@ -546,7 +546,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock,
bug!("Cannot use direct operand with an intrinsic call")
};
trans_intrinsic_call(&bcx, self.fcx, callee.ty, &fn_ty, &llargs, dest,
trans_intrinsic_call(&bcx, callee.ty, &fn_ty, &llargs, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
......@@ -793,13 +793,13 @@ fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef {
let target = self.build_block(target_bb);
let bcx = self.fcx.build_new_block("cleanup");
let bcx = target.build_new_block("cleanup");
self.landing_pads[target_bb] = Some(bcx.llbb());
let ccx = bcx.ccx;
let llpersonality = self.ccx.eh_personality();
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.fcx.llfn);
let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn);
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot, None);
......@@ -809,7 +809,7 @@ fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> BasicBlockRef {
fn unreachable_block(&mut self) -> BasicBlockRef {
self.unreachable_block.unwrap_or_else(|| {
let bl = self.fcx.build_new_block("unreachable");
let bl = self.build_block(mir::START_BLOCK).build_new_block("unreachable");
bl.unreachable();
self.unreachable_block = Some(bl.llbb());
bl.llbb()
......@@ -817,7 +817,7 @@ fn unreachable_block(&mut self) -> BasicBlockRef {
}
pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'tcx> {
let builder = Builder::with_ccx(self.fcx.ccx);
let builder = Builder::with_ccx(self.ccx);
builder.position_at_end(self.blocks[bb]);
builder
}
......
......@@ -20,7 +20,7 @@
use session::config::FullDebugInfo;
use base;
use builder::Builder;
use common::{self, CrateContext, FunctionContext, C_null, Funclet};
use common::{self, CrateContext, C_null, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::{self, Instance};
use abi::FnType;
......@@ -31,6 +31,7 @@
use syntax::abi::Abi;
use std::iter;
use std::ffi::CString;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
......@@ -49,7 +50,7 @@ pub struct MirContext<'a, 'tcx:'a> {
debug_context: debuginfo::FunctionDebugContext,
fcx: &'a common::FunctionContext<'a, 'tcx>,
llfn: ValueRef,
ccx: &'a CrateContext<'a, 'tcx>,
......@@ -199,7 +200,8 @@ fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>,
///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'a, 'tcx: 'a>(
fcx: &'a FunctionContext<'a, 'tcx>,
ccx: &'a CrateContext<'a, 'tcx>,
llfn: ValueRef,
fn_ty: FnType,
mir: &'a Mir<'tcx>,
instance: Instance<'tcx>,
......@@ -208,29 +210,36 @@ pub fn trans_mir<'a, 'tcx: 'a>(
) {
debug!("fn_ty: {:?}", fn_ty);
let debug_context =
debuginfo::create_function_debug_context(fcx.ccx, instance, sig, abi, fcx.llfn, mir);
let bcx = fcx.get_entry_block();
debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfn, mir);
let bcx = Builder::entry_block(ccx, llfn);
let cleanup_kinds = analyze::cleanup_kinds(&mir);
// Allocate a `Block` for every basic block
let block_bcxs: IndexVec<mir::BasicBlock, BasicBlockRef> =
mir.basic_blocks().indices().map(|bb| {
if bb == mir::START_BLOCK {
fcx.new_block("start")
let name = if bb == mir::START_BLOCK {
CString::new("start").unwrap()
} else {
fcx.new_block(&format!("{:?}", bb))
CString::new(format!("{:?}", bb)).unwrap()
};
unsafe {
llvm::LLVMAppendBasicBlockInContext(
ccx.llcx(),
llfn,
name.as_ptr()
)
}
}).collect();
// Compute debuginfo scopes from MIR scopes.
let scopes = debuginfo::create_mir_scopes(fcx, mir, &debug_context);
let scopes = debuginfo::create_mir_scopes(ccx, mir, &debug_context);
let mut mircx = MirContext {
mir: mir,
fcx: fcx,
llfn: llfn,
fn_ty: fn_ty,
ccx: fcx.ccx,
ccx: ccx,
llpersonalityslot: None,
blocks: block_bcxs,
unreachable_block: None,
......@@ -281,7 +290,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
// Temporary or return pointer
if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return pointer) -> lvalue", local);
let llretptr = llvm::get_param(fcx.llfn, 0);
let llretptr = llvm::get_param(llfn, 0);
LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
} else if lvalue_locals.contains(local.index()) {
debug!("alloc: {:?} -> lvalue", local);
......@@ -319,7 +328,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
if let CleanupKind::Funclet = *cleanup_kind {
let bcx = mircx.build_block(bb);
bcx.set_personality_fn(mircx.ccx.eh_personality());
if base::wants_msvc_seh(fcx.ccx.sess()) {
if base::wants_msvc_seh(ccx.sess()) {
return Some(Funclet::new(bcx.cleanup_pad(None, &[])));
}
}
......@@ -358,7 +367,6 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
lvalue_locals: &BitVector)
-> Vec<LocalRef<'tcx>> {
let mir = mircx.mir;
let fcx = mircx.fcx;
let tcx = bcx.ccx.tcx();
let mut idx = 0;
let mut llarg_idx = mircx.fn_ty.ret.is_indirect() as usize;
......@@ -433,7 +441,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
llarg
} else if !lvalue_locals.contains(local.index()) &&
......@@ -449,13 +457,13 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if arg.pad.is_some() {
llarg_idx += 1;
}
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
let meta = &mircx.fn_ty.args[idx];
idx += 1;
assert_eq!((meta.cast, meta.pad), (None, None));
let llmeta = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
OperandValue::Pair(llarg, llmeta)
} else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册