提交 ba26efb6 编写于 作者: S Simonas Kazlauskas

Implement filling drop in MIR

Hopefully the author caught all the cases. For the mir_dynamic_drops_3 test case the ratio of
memsets to other instructions is 12%. On the other hand we actually do not double drop for at least
the test cases provided anymore in MIR.
上级 be7196a7
......@@ -2165,6 +2165,8 @@ pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char,
NumInputs: c_uint)
-> OperandBundleDefRef;
pub fn LLVMRustFreeOperandBundleDef(Bundle: OperandBundleDefRef);
pub fn LLVMRustPositionBuilderAtStart(B: BuilderRef, BB: BasicBlockRef);
}
// LLVM requires symbols from this library, but apparently they're not printed
......
......@@ -1298,22 +1298,29 @@ pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'
fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
let _icx = push_ctxt("memfill");
let ccx = b.ccx;
let llty = type_of::type_of(ccx, ty);
let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
let llzeroval = C_u8(ccx, byte);
let size = machine::llsize_of(ccx, llty);
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
let volatile = C_bool(ccx, false);
b.call(llintrinsicfn,
&[llptr, llzeroval, size, align, volatile],
None, None);
call_memset(b, llptr, llzeroval, size, align, false);
}
pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
ptr: ValueRef,
fill_byte: ValueRef,
size: ValueRef,
align: ValueRef,
volatile: bool) {
let ccx = b.ccx;
let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
let volatile = C_bool(ccx, volatile);
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None, None);
}
/// In general, when we create an scratch value in an alloca, the
/// creator may not know if the block (that initializes the scratch
/// with the desired value) actually dominates the cleanup associated
......
......@@ -104,6 +104,12 @@ pub fn position_at_end(&self, llbb: BasicBlockRef) {
}
}
pub fn position_at_start(&self, llbb: BasicBlockRef) {
unsafe {
llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
}
}
pub fn ret_void(&self) {
self.count_insn("retvoid");
unsafe {
......
......@@ -735,6 +735,15 @@ pub fn map_block<F>(self, f: F) -> Self
BlockAndBuilder::new(bcx, owned_builder)
}
pub fn at_start<F, R>(&self, f: F) -> R
where F: FnOnce(&BlockAndBuilder<'blk, 'tcx>) -> R
{
self.position_at_start(self.bcx.llbb);
let r = f(self);
self.position_at_end(self.bcx.llbb);
r
}
// Methods delegated to bcx
pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
......
......@@ -20,11 +20,11 @@
use trans::debuginfo::DebugLoc;
use trans::Disr;
use trans::foreign;
use trans::glue;
use trans::type_of;
use trans::glue;
use trans::type_::Type;
use super::MirContext;
use super::{MirContext, drop};
use super::operand::OperandValue::{FatPtr, Immediate, Ref};
use super::operand::OperandRef;
......@@ -188,8 +188,10 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
unwind.llbb(),
cleanup_bundle.as_ref(),
None);
self.bcx(target).at_start(|bcx| drop::drop_fill(bcx, lvalue.llval, ty));
} else {
bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref(), None);
drop::drop_fill(&bcx, lvalue.llval, ty);
funclet_br(bcx, self.llblock(target));
}
}
......@@ -250,59 +252,41 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
landingpad.llbb(),
cleanup_bundle.as_ref(),
Some(attrs));
landingpad.at_start(|bcx| for op in args {
self.set_operand_dropped(bcx, op);
});
},
(false, &Some(cleanup), &Some((_, success))) => {
let cleanup = self.bcx(cleanup);
let landingpad = self.make_landing_pad(cleanup);
let (target, postinvoke) = if must_copy_dest {
(self.fcx.new_block("", None).build(), Some(self.bcx(success)))
} else {
(self.bcx(success), None)
};
let invokeret = bcx.invoke(callee.immediate(),
&llargs[..],
target.llbb(),
self.llblock(success),
landingpad.llbb(),
cleanup_bundle.as_ref(),
Some(attrs));
if let Some(postinvoketarget) = postinvoke {
// We translate the copy into a temporary block. The temporary block is
// necessary because the current block has already been terminated (by
// `invoke`) and we cannot really translate into the target block
// because:
// * The target block may have more than a single precedesor;
// * Some LLVM insns cannot have a preceeding store insn (phi,
// cleanuppad), and adding/prepending the store now may render
// those other instructions invalid.
//
// NB: This approach still may break some LLVM code. For example if the
// target block starts with a `phi` (which may only match on immediate
// precedesors), it cannot know about this temporary block thus
// resulting in an invalid code:
//
// this:
// …
// %0 = …
// %1 = invoke to label %temp …
// temp:
// store ty %1, ty* %dest
// br label %actualtargetblock
// actualtargetblock: ; preds: %temp, …
// phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on
// ; immediate precedesors
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
target.with_block(|target| {
base::store_ty(target, invokeret, ret_dest.llval, ret_ty);
});
target.br(postinvoketarget.llbb());
// We translate the copy straight into the beginning of the target
// block.
self.bcx(success).at_start(|bcx| bcx.with_block( |bcx| {
base::store_ty(bcx, invokeret, ret_dest.llval, ret_ty);
}));
}
self.bcx(success).at_start(|bcx| for op in args {
self.set_operand_dropped(bcx, op);
});
landingpad.at_start(|bcx| for op in args {
self.set_operand_dropped(bcx, op);
});
},
(false, _, &None) => {
bcx.call(callee.immediate(),
&llargs[..],
cleanup_bundle.as_ref(),
Some(attrs));
// no need to drop args, because the call never returns
bcx.unreachable();
}
(false, _, &Some((_, target))) => {
......@@ -317,6 +301,9 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
});
}
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
}
// Foreign functions
......@@ -333,6 +320,9 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
debugloc)
});
if let Some((_, target)) = *destination {
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
}
},
......@@ -388,7 +378,7 @@ fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, Operand
let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup;
let cleanup_pad = if use_funclets {
bcx.set_personality_fn(self.fcx.eh_personality());
Some(bcx.cleanup_pad(None, &[]))
bcx.at_start(|bcx| Some(bcx.cleanup_pad(None, &[])))
} else {
None
};
......@@ -416,7 +406,7 @@ fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
self.blocks[bb.index()].build()
}
fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {
pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {
self.blocks[bb.index()].llbb
}
}
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use rustc::middle::ty::Ty;
use trans::adt;
use trans::base;
use trans::common::{self, BlockAndBuilder};
use trans::machine;
use trans::type_of;
use trans::type_::Type;
pub fn drop_fill<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, value: ValueRef, ty: Ty<'tcx>) {
let llty = type_of::type_of(bcx.ccx(), ty);
let llptr = bcx.pointercast(value, Type::i8(bcx.ccx()).ptr_to());
let filling = common::C_u8(bcx.ccx(), adt::DTOR_DONE);
let size = machine::llsize_of(bcx.ccx(), llty);
let align = common::C_u32(bcx.ccx(), machine::llalign_of_min(bcx.ccx(), llty));
base::call_memset(&bcx, llptr, filling, size, align, false);
}
......@@ -17,6 +17,7 @@
use trans::common::{self, BlockAndBuilder};
use trans::machine;
use trans::type_of;
use trans::mir::drop;
use llvm;
use trans::Disr;
......@@ -48,6 +49,7 @@ pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
{
assert!(!ty.has_erasable_regions());
let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
drop::drop_fill(bcx, lltemp, ty);
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
}
}
......
......@@ -197,6 +197,7 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mod block;
mod constant;
mod did;
mod drop;
mod lvalue;
mod operand;
mod rvalue;
......
......@@ -16,8 +16,9 @@
use trans::common::{self, Block, BlockAndBuilder};
use trans::datum;
use trans::Disr;
use trans::glue;
use super::{MirContext, TempRef};
use super::{MirContext, TempRef, drop};
use super::lvalue::LvalueRef;
/// The representation of a Rust value. The enum variant is in fact
......@@ -158,31 +159,13 @@ pub fn trans_operand(&mut self,
}
}
pub fn trans_operand_into(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
lldest: ValueRef,
operand: &mir::Operand<'tcx>)
{
debug!("trans_operand_into(lldest={}, operand={:?})",
bcx.val_to_string(lldest),
operand);
// FIXME: consider not copying constants through the
// stack.
let o = self.trans_operand(bcx, operand);
self.store_operand(bcx, lldest, o);
}
pub fn store_operand(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
lldest: ValueRef,
operand: OperandRef<'tcx>)
{
debug!("store_operand: operand={}", operand.repr(bcx));
bcx.with_block(|bcx| {
self.store_operand_direct(bcx, lldest, operand)
})
bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand))
}
pub fn store_operand_direct(&mut self,
......@@ -245,4 +228,30 @@ pub fn trans_operand_untupled(&mut self,
}), ty)
}).collect()
}
pub fn set_operand_dropped(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>) {
match *operand {
mir::Operand::Constant(_) => return,
mir::Operand::Consume(ref lvalue) => {
if let mir::Lvalue::Temp(idx) = *lvalue {
if let TempRef::Operand(..) = self.temps[idx as usize] {
// All lvalues which have an associated drop are promoted to an alloca
// beforehand. If this is an operand, it is safe to say this is never
// dropped and there’s no reason for us to zero this out at all.
return
}
}
let lvalue = self.trans_lvalue(bcx, lvalue);
let ty = lvalue.ty.to_ty(bcx.tcx());
if !glue::type_needs_drop(bcx.tcx(), ty) ||
common::type_is_fat_ptr(bcx.tcx(), ty) {
return
} else {
drop::drop_fill(bcx, lvalue.llval, ty);
}
}
}
}
}
......@@ -42,10 +42,14 @@ pub fn trans_rvalue(&mut self,
rvalue);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
self.trans_operand_into(&bcx, dest.llval, operand);
bcx
}
mir::Rvalue::Use(ref operand) => {
// FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
let tr_operand = self.trans_operand(&bcx, operand);
self.store_operand(&bcx, dest.llval, tr_operand);
self.set_operand_dropped(&bcx, operand);
bcx
}
mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => {
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
......@@ -89,15 +93,17 @@ pub fn trans_rvalue(&mut self,
}
mir::Rvalue::Repeat(ref elem, ref count) => {
let elem = self.trans_operand(&bcx, elem);
let tr_elem = self.trans_operand(&bcx, elem);
let size = self.trans_constval(&bcx, &count.value, count.ty).immediate();
bcx.map_block(|block| {
let bcx = bcx.map_block(|block| {
let base = expr::get_dataptr(block, dest.llval);
tvec::iter_vec_raw(block, base, elem.ty, size, |block, llslot, _| {
self.store_operand_direct(block, llslot, elem);
tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
self.store_operand_direct(block, llslot, tr_elem);
block
})
})
});
self.set_operand_dropped(&bcx, elem);
bcx
}
mir::Rvalue::Aggregate(ref kind, ref operands) => {
......@@ -117,6 +123,7 @@ pub fn trans_rvalue(&mut self,
adt::trans_field_ptr(bcx, &repr, val, disr, i)
});
self.store_operand(&bcx, lldest_i, op);
self.set_operand_dropped(&bcx, operand);
}
}
},
......@@ -130,6 +137,7 @@ pub fn trans_rvalue(&mut self,
// not be structs but arrays.
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op);
self.set_operand_dropped(&bcx, operand);
}
}
}
......@@ -179,11 +187,6 @@ pub fn trans_rvalue_operand(&mut self,
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
match *rvalue {
mir::Rvalue::Use(ref operand) => {
let operand = self.trans_operand(&bcx, operand);
(bcx, operand)
}
mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => {
let operand = self.trans_operand(&bcx, operand);
debug!("cast operand is {}", operand.repr(&bcx));
......@@ -426,6 +429,7 @@ pub fn trans_rvalue_operand(&mut self,
(bcx, operand)
}
mir::Rvalue::Use(..) |
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
......@@ -543,7 +547,6 @@ pub fn trans_scalar_binop(&mut self,
pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Use(..) | // (*)
mir::Rvalue::Ref(..) |
mir::Rvalue::Len(..) |
mir::Rvalue::Cast(..) | // (*)
......@@ -551,6 +554,7 @@ pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Box(..) =>
true,
mir::Rvalue::Use(..) | // (**)
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
......@@ -559,4 +563,6 @@ pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
}
// (*) this is only true if the type is suitable
// (**) we need to zero-out the old value before moving, so we are restricted to either
// ensuring all users of `Use` set it themselves or not allowing to “create” operand for it.
}
......@@ -1167,3 +1167,8 @@ LLVMRustBuildInvoke(LLVMBuilderRef B,
return LLVMBuildInvoke(B, Fn, Args, NumArgs, Then, Catch, Name);
}
#endif
extern "C" void LLVMRustPositionBuilderAtStart(LLVMBuilderRef B, LLVMBasicBlockRef BB) {
auto point = unwrap(BB)->getFirstInsertionPt();
unwrap(B)->SetInsertPoint(unwrap(BB), point);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册