提交 f97c4115 编写于 作者: B bors

Auto merge of #33622 - arielb1:elaborate-drops, r=nikomatsakis

[MIR] non-zeroing drop

This enables non-zeroing drop through stack flags for MIR.

Fixes #30380.
Fixes #5016.
......@@ -624,6 +624,24 @@ pub fn normalize_associated_type<T>(self, value: &T) -> T
value.trans_normalize(&infcx)
})
}
pub fn normalize_associated_type_in_env<T>(
self, value: &T, env: &'a ty::ParameterEnvironment<'tcx>
) -> T
where T: TransNormalize<'tcx>
{
debug!("normalize_associated_type_in_env(t={:?})", value);
let value = self.erase_regions(value);
if !value.has_projection_types() {
return value;
}
self.infer_ctxt(None, Some(env.clone()), ProjectionMode::Any).enter(|infcx| {
value.trans_normalize(&infcx)
})
}
}
impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> {
......
......@@ -163,6 +163,7 @@ fn item_type<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> ty::TypeScheme<'tcx>;
fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>>;
fn item_name(&self, def: DefId) -> ast::Name;
fn opt_item_name(&self, def: DefId) -> Option<ast::Name>;
fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> ty::GenericPredicates<'tcx>;
fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
......@@ -345,6 +346,7 @@ fn visible_parent_map<'a>(&'a self) -> ::std::cell::RefMut<'a, DefIdMap<DefId>>
bug!("visible_parent_map")
}
fn item_name(&self, def: DefId) -> ast::Name { bug!("item_name") }
fn opt_item_name(&self, def: DefId) -> Option<ast::Name> { bug!("opt_item_name") }
fn item_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
-> ty::GenericPredicates<'tcx> { bug!("item_predicates") }
fn item_super_predicates<'a>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, def: DefId)
......
......@@ -330,11 +330,19 @@ pub enum TerminatorKind<'tcx> {
/// Drop the Lvalue
Drop {
value: Lvalue<'tcx>,
location: Lvalue<'tcx>,
target: BasicBlock,
unwind: Option<BasicBlock>
},
/// Drop the Lvalue and assign the new value over it
DropAndReplace {
location: Lvalue<'tcx>,
value: Operand<'tcx>,
target: BasicBlock,
unwind: Option<BasicBlock>,
},
/// Block ends with a call of a converging function
Call {
/// The function that’s being called
......@@ -373,8 +381,14 @@ pub fn successors(&self) -> Cow<[BasicBlock]> {
slice::ref_slice(t).into_cow(),
Call { destination: None, cleanup: Some(ref c), .. } => slice::ref_slice(c).into_cow(),
Call { destination: None, cleanup: None, .. } => (&[]).into_cow(),
Drop { target, unwind: Some(unwind), .. } => vec![target, unwind].into_cow(),
Drop { ref target, .. } => slice::ref_slice(target).into_cow(),
DropAndReplace { target, unwind: Some(unwind), .. } |
Drop { target, unwind: Some(unwind), .. } => {
vec![target, unwind].into_cow()
}
DropAndReplace { ref target, unwind: None, .. } |
Drop { ref target, unwind: None, .. } => {
slice::ref_slice(target).into_cow()
}
}
}
......@@ -393,8 +407,12 @@ pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
Call { destination: Some((_, ref mut t)), cleanup: None, .. } => vec![t],
Call { destination: None, cleanup: Some(ref mut c), .. } => vec![c],
Call { destination: None, cleanup: None, .. } => vec![],
DropAndReplace { ref mut target, unwind: Some(ref mut unwind), .. } |
Drop { ref mut target, unwind: Some(ref mut unwind), .. } => vec![target, unwind],
Drop { ref mut target, .. } => vec![target]
DropAndReplace { ref mut target, unwind: None, .. } |
Drop { ref mut target, unwind: None, .. } => {
vec![target]
}
}
}
}
......@@ -461,7 +479,9 @@ pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
SwitchInt { discr: ref lv, .. } => write!(fmt, "switchInt({:?})", lv),
Return => write!(fmt, "return"),
Resume => write!(fmt, "resume"),
Drop { ref value, .. } => write!(fmt, "drop({:?})", value),
Drop { ref location, .. } => write!(fmt, "drop({:?})", location),
DropAndReplace { ref location, ref value, .. } =>
write!(fmt, "replace({:?} <- {:?})", location, value),
Call { ref func, ref args, ref destination, .. } => {
if let Some((ref destination, _)) = *destination {
write!(fmt, "{:?} = ", destination)?;
......@@ -506,8 +526,12 @@ pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
Call { destination: Some(_), cleanup: None, .. } => vec!["return".into_cow()],
Call { destination: None, cleanup: Some(_), .. } => vec!["unwind".into_cow()],
Call { destination: None, cleanup: None, .. } => vec![],
DropAndReplace { unwind: None, .. } |
Drop { unwind: None, .. } => vec!["return".into_cow()],
Drop { .. } => vec!["return".into_cow(), "unwind".into_cow()],
DropAndReplace { unwind: Some(_), .. } |
Drop { unwind: Some(_), .. } => {
vec!["return".into_cow(), "unwind".into_cow()]
}
}
}
}
......@@ -918,7 +942,7 @@ fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result {
ppaux::parameterized(fmt, substs, variant_def.did,
ppaux::Ns::Value, &[],
|tcx| {
tcx.lookup_item_type(variant_def.did).generics
Some(tcx.lookup_item_type(variant_def.did).generics)
})?;
match variant_def.kind() {
......@@ -1010,8 +1034,9 @@ fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::Literal::*;
match *self {
Item { def_id, substs } => {
ppaux::parameterized(fmt, substs, def_id, ppaux::Ns::Value, &[],
|tcx| tcx.lookup_item_type(def_id).generics)
ppaux::parameterized(
fmt, substs, def_id, ppaux::Ns::Value, &[],
|tcx| Some(tcx.lookup_item_type(def_id).generics))
}
Value { ref value } => {
write!(fmt, "const ")?;
......
......@@ -394,10 +394,20 @@ fn super_terminator_kind(&mut self,
TerminatorKind::Return => {
}
TerminatorKind::Drop { ref $($mutability)* value,
TerminatorKind::Drop { ref $($mutability)* location,
target,
unwind } => {
self.visit_lvalue(value, LvalueContext::Drop);
self.visit_lvalue(location, LvalueContext::Drop);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
TerminatorKind::DropAndReplace { ref $($mutability)* location,
ref $($mutability)* value,
target,
unwind } => {
self.visit_lvalue(location, LvalueContext::Drop);
self.visit_operand(value);
self.visit_branch(block, target);
unwind.map(|t| self.visit_branch(block, t));
}
......
......@@ -13,6 +13,7 @@
use hir::def_id::{DefId, CRATE_DEF_INDEX};
use ty::{self, Ty, TyCtxt};
use syntax::ast;
use syntax::parse::token;
use std::cell::Cell;
......@@ -138,7 +139,8 @@ pub fn try_push_visible_item_path<T>(self, buffer: &mut T, external_def_id: DefI
}
}
cur_path.push(self.sess.cstore.item_name(cur_def));
cur_path.push(self.sess.cstore.opt_item_name(cur_def).unwrap_or_else(||
token::intern("<unnamed>")));
match visible_parent_map.get(&cur_def) {
Some(&def) => cur_def = def,
None => return false,
......
......@@ -2503,6 +2503,18 @@ pub fn lookup_item_type(self, did: DefId) -> TypeScheme<'gcx> {
|| self.sess.cstore.item_type(self.global_tcx(), did))
}
pub fn opt_lookup_item_type(self, did: DefId) -> Option<TypeScheme<'gcx>> {
if let Some(scheme) = self.tcache.borrow_mut().get(&did) {
return Some(scheme.clone());
}
if did.krate == LOCAL_CRATE {
None
} else {
Some(self.sess.cstore.item_type(self.global_tcx(), did))
}
}
/// Given the did of a trait, returns its canonical trait ref.
pub fn lookup_trait_def(self, did: DefId) -> &'gcx TraitDef<'gcx> {
lookup_locally_or_in_crate_store(
......
......@@ -69,15 +69,12 @@ pub enum Ns {
Value
}
fn number_of_supplied_defaults<'a, 'gcx, 'tcx, GG>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
substs: &subst::Substs,
space: subst::ParamSpace,
get_generics: GG)
-> usize
where GG: FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> ty::Generics<'tcx>
fn number_of_supplied_defaults<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
substs: &subst::Substs,
space: subst::ParamSpace,
generics: ty::Generics<'tcx>)
-> usize
{
let generics = get_generics(tcx);
let has_self = substs.self_ty().is_some();
let ty_params = generics.types.get_slice(space);
let tps = substs.types.get_slice(space);
......@@ -115,7 +112,8 @@ pub fn parameterized<GG>(f: &mut fmt::Formatter,
projections: &[ty::ProjectionPredicate],
get_generics: GG)
-> fmt::Result
where GG: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> ty::Generics<'tcx>
where GG: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>)
-> Option<ty::Generics<'tcx>>
{
if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) {
write!(f, "<{} as ", self_ty)?;
......@@ -176,13 +174,12 @@ pub fn parameterized<GG>(f: &mut fmt::Formatter,
let num_supplied_defaults = if verbose {
0
} else {
// It is important to execute this conditionally, only if -Z
// verbose is false. Otherwise, debug logs can sometimes cause
// ICEs trying to fetch the generics early in the pipeline. This
// is kind of a hacky workaround in that -Z verbose is required to
// avoid those ICEs.
ty::tls::with(|tcx| {
number_of_supplied_defaults(tcx, substs, subst::TypeSpace, get_generics)
if let Some(generics) = get_generics(tcx) {
number_of_supplied_defaults(tcx, substs, subst::TypeSpace, generics)
} else {
0
}
})
};
......@@ -312,7 +309,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
trait_ref.def_id,
Ns::Type,
projection_bounds,
|tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone())
|tcx| Some(tcx.lookup_trait_def(trait_ref.def_id).generics.clone()))
}
}
......@@ -814,7 +811,7 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
impl<'tcx> fmt::Display for ty::TraitRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
parameterized(f, self.substs, self.def_id, Ns::Type, &[],
|tcx| tcx.lookup_trait_def(self.def_id).generics.clone())
|tcx| Some(tcx.lookup_trait_def(self.def_id).generics.clone()))
}
}
......@@ -866,8 +863,9 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
}
write!(f, "{} {{", bare_fn.sig.0)?;
parameterized(f, substs, def_id, Ns::Value, &[],
|tcx| tcx.lookup_item_type(def_id).generics)?;
parameterized(
f, substs, def_id, Ns::Value, &[],
|tcx| tcx.opt_lookup_item_type(def_id).map(|t| t.generics))?;
write!(f, "}}")
}
TyFnPtr(ref bare_fn) => {
......@@ -890,8 +888,12 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
!tcx.tcache.borrow().contains_key(&def.did) {
write!(f, "{}<..>", tcx.item_path_str(def.did))
} else {
parameterized(f, substs, def.did, Ns::Type, &[],
|tcx| tcx.lookup_item_type(def.did).generics)
parameterized(
f, substs, def.did, Ns::Type, &[],
|tcx| {
tcx.opt_lookup_item_type(def.did).
map(|t| t.generics)
})
}
})
}
......
......@@ -200,6 +200,12 @@ pub fn mir(&self) -> &'a Mir<'tcx> { self.mir }
pub struct DataflowResults<O>(DataflowState<O>) where O: BitDenotation;
impl<O: BitDenotation> DataflowResults<O> {
pub fn sets(&self) -> &AllSets<O::Idx> {
&self.0.sets
}
}
// FIXME: This type shouldn't be public, but the graphviz::MirWithFlowState trait
// references it in a method signature. Look into using `pub(crate)` to address this.
pub struct DataflowState<O: BitDenotation>
......@@ -444,10 +450,17 @@ fn propagate_bits_into_graph_successors_of(
repr::TerminatorKind::Return |
repr::TerminatorKind::Resume => {}
repr::TerminatorKind::Goto { ref target } |
repr::TerminatorKind::Drop { ref target, value: _, unwind: None } => {
repr::TerminatorKind::Drop { ref target, location: _, unwind: None } |
repr::TerminatorKind::DropAndReplace {
ref target, value: _, location: _, unwind: None
} => {
self.propagate_bits_into_entry_set_for(in_out, changed, target);
}
repr::TerminatorKind::Drop { ref target, value: _, unwind: Some(ref unwind) } => {
repr::TerminatorKind::Drop { ref target, location: _, unwind: Some(ref unwind) } |
repr::TerminatorKind::DropAndReplace {
ref target, value: _, location: _, unwind: Some(ref unwind)
} => {
self.propagate_bits_into_entry_set_for(in_out, changed, target);
self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
}
......
此差异已折叠。
......@@ -671,10 +671,18 @@ fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MoveD
let _ = discr;
}
TerminatorKind::Drop { value: ref lval, target: _, unwind: _ } => {
TerminatorKind::Drop { ref location, target: _, unwind: _ } => {
let source = Location { block: bb,
index: bb_data.statements.len() };
bb_ctxt.on_move_out_lval(SK::Drop, lval, source);
bb_ctxt.on_move_out_lval(SK::Drop, location, source);
}
TerminatorKind::DropAndReplace { ref location, ref value, .. } => {
let assigned_path = bb_ctxt.builder.move_path_for(location);
bb_ctxt.path_map.fill_to(assigned_path.idx());
let source = Location { block: bb,
index: bb_data.statements.len() };
bb_ctxt.on_operand(SK::Use, value, source);
}
TerminatorKind::Call { ref func, ref args, ref destination, cleanup: _ } => {
let source = Location { block: bb,
......
......@@ -24,8 +24,10 @@
use rustc::ty::{self, TyCtxt};
mod abs_domain;
pub mod elaborate_drops;
mod dataflow;
mod gather_moves;
mod patch;
// mod graphviz;
use self::dataflow::{BitDenotation};
......@@ -34,7 +36,7 @@
use self::dataflow::{MaybeInitializedLvals, MaybeUninitializedLvals};
use self::dataflow::{DefinitelyInitializedLvals};
use self::gather_moves::{MoveData, MovePathIndex, Location};
use self::gather_moves::{MovePathContent};
use self::gather_moves::{MovePathContent, MovePathData};
fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option<P<MetaItem>> {
for attr in attrs {
......@@ -202,6 +204,37 @@ enum DropFlagState {
Absent, // i.e. deinitialized or "moved"
}
impl DropFlagState {
fn value(self) -> bool {
match self {
DropFlagState::Present => true,
DropFlagState::Absent => false
}
}
}
fn move_path_children_matching<'tcx, F>(move_paths: &MovePathData<'tcx>,
path: MovePathIndex,
mut cond: F)
-> Option<MovePathIndex>
where F: FnMut(&repr::LvalueProjection<'tcx>) -> bool
{
let mut next_child = move_paths[path].first_child;
while let Some(child_index) = next_child {
match move_paths[child_index].content {
MovePathContent::Lvalue(repr::Lvalue::Projection(ref proj)) => {
if cond(proj) {
return Some(child_index)
}
}
_ => {}
}
next_child = move_paths[child_index].next_sibling;
}
None
}
fn on_all_children_bits<'a, 'tcx, F>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
mir: &Mir<'tcx>,
......@@ -309,15 +342,23 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>(
Some(stmt) => match stmt.kind {
repr::StatementKind::Assign(ref lvalue, _) => {
debug!("drop_flag_effects: assignment {:?}", stmt);
on_all_children_bits(tcx, mir, move_data,
on_all_children_bits(tcx, mir, move_data,
move_data.rev_lookup.find(lvalue),
|moi| callback(moi, DropFlagState::Present))
}
},
None => {
// terminator - no move-ins except for function return edge
let term = bb.terminator();
debug!("drop_flag_effects: terminator {:?}", term);
debug!("drop_flag_effects: replace {:?}", bb.terminator());
match bb.terminator().kind {
repr::TerminatorKind::DropAndReplace { ref location, .. } => {
on_all_children_bits(tcx, mir, move_data,
move_data.rev_lookup.find(location),
|moi| callback(moi, DropFlagState::Present))
}
_ => {
// other terminators do not contain move-ins
}
}
}
}
}
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::gather_moves::Location;
use rustc::ty::Ty;
use rustc::mir::repr::*;
use syntax::codemap::Span;
use std::iter;
use std::u32;
/// This struct represents a patch to MIR, which can add
/// new statements and basic blocks and patch over block
/// terminators.
pub struct MirPatch<'tcx> {
patch_map: Vec<Option<TerminatorKind<'tcx>>>,
new_blocks: Vec<BasicBlockData<'tcx>>,
new_statements: Vec<(Location, StatementKind<'tcx>)>,
new_temps: Vec<TempDecl<'tcx>>,
resume_block: BasicBlock,
next_temp: u32,
}
impl<'tcx> MirPatch<'tcx> {
pub fn new(mir: &Mir<'tcx>) -> Self {
let mut result = MirPatch {
patch_map: iter::repeat(None)
.take(mir.basic_blocks.len()).collect(),
new_blocks: vec![],
new_temps: vec![],
new_statements: vec![],
next_temp: mir.temp_decls.len() as u32,
resume_block: START_BLOCK
};
// make sure the MIR we create has a resume block. It is
// completely legal to convert jumps to the resume block
// to jumps to None, but we occasionally have to add
// instructions just before that.
let mut resume_block = None;
let mut resume_stmt_block = None;
for block in mir.all_basic_blocks() {
let data = mir.basic_block_data(block);
if let TerminatorKind::Resume = data.terminator().kind {
if data.statements.len() > 0 {
resume_stmt_block = Some(block);
} else {
resume_block = Some(block);
}
break
}
}
let resume_block = resume_block.unwrap_or_else(|| {
result.new_block(BasicBlockData {
statements: vec![],
terminator: Some(Terminator {
span: mir.span,
scope: ScopeId::new(0),
kind: TerminatorKind::Resume
}),
is_cleanup: true
})});
result.resume_block = resume_block;
if let Some(resume_stmt_block) = resume_stmt_block {
result.patch_terminator(resume_stmt_block, TerminatorKind::Goto {
target: resume_block
});
}
result
}
pub fn resume_block(&self) -> BasicBlock {
self.resume_block
}
pub fn is_patched(&self, bb: BasicBlock) -> bool {
self.patch_map[bb.index()].is_some()
}
pub fn terminator_loc(&self, mir: &Mir<'tcx>, bb: BasicBlock) -> Location {
let offset = match bb.index().checked_sub(mir.basic_blocks.len()) {
Some(index) => self.new_blocks[index].statements.len(),
None => mir.basic_block_data(bb).statements.len()
};
Location {
block: bb,
index: offset
}
}
pub fn new_temp(&mut self, ty: Ty<'tcx>) -> u32 {
let index = self.next_temp;
assert!(self.next_temp < u32::MAX);
self.next_temp += 1;
self.new_temps.push(TempDecl { ty: ty });
index
}
pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
let block = BasicBlock::new(self.patch_map.len());
debug!("MirPatch: new_block: {:?}: {:?}", block, data);
self.new_blocks.push(data);
self.patch_map.push(None);
block
}
pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
assert!(self.patch_map[block.index()].is_none());
debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
self.patch_map[block.index()] = Some(new);
}
pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
self.new_statements.push((loc, stmt));
}
pub fn add_assign(&mut self, loc: Location, lv: Lvalue<'tcx>, rv: Rvalue<'tcx>) {
self.add_statement(loc, StatementKind::Assign(lv, rv));
}
pub fn apply(self, mir: &mut Mir<'tcx>) {
debug!("MirPatch: {:?} new temps, starting from index {}: {:?}",
self.new_temps.len(), mir.temp_decls.len(), self.new_temps);
debug!("MirPatch: {} new blocks, starting from index {}",
self.new_blocks.len(), mir.basic_blocks.len());
mir.basic_blocks.extend(self.new_blocks);
mir.temp_decls.extend(self.new_temps);
for (src, patch) in self.patch_map.into_iter().enumerate() {
if let Some(patch) = patch {
debug!("MirPatch: patching block {:?}", src);
mir.basic_blocks[src].terminator_mut().kind = patch;
}
}
let mut new_statements = self.new_statements;
new_statements.sort_by(|u,v| u.0.cmp(&v.0));
let mut delta = 0;
let mut last_bb = START_BLOCK;
for (mut loc, stmt) in new_statements {
if loc.block != last_bb {
delta = 0;
last_bb = loc.block;
}
debug!("MirPatch: adding statement {:?} at loc {:?}+{}",
stmt, loc, delta);
loc.index += delta;
let (span, scope) = Self::context_for_index(
mir.basic_block_data(loc.block), loc
);
mir.basic_block_data_mut(loc.block).statements.insert(
loc.index, Statement {
span: span,
scope: scope,
kind: stmt
});
delta += 1;
}
}
pub fn context_for_index(data: &BasicBlockData, loc: Location) -> (Span, ScopeId) {
match data.statements.get(loc.index) {
Some(stmt) => (stmt.span, stmt.scope),
None => (data.terminator().span, data.terminator().scope)
}
}
pub fn context_for_location(&self, mir: &Mir, loc: Location) -> (Span, ScopeId) {
let data = match loc.block.index().checked_sub(mir.basic_blocks.len()) {
Some(new) => &self.new_blocks[new],
None => mir.basic_block_data(loc.block)
};
Self::context_for_index(data, loc)
}
}
......@@ -18,6 +18,8 @@
pub use self::AliasableViolationKind::*;
pub use self::MovedValueUseKind::*;
pub use self::mir::elaborate_drops::ElaborateDrops;
use self::InteriorKind::*;
use rustc::dep_graph::DepNode;
......
......@@ -39,7 +39,7 @@
pub use borrowck::check_crate;
pub use borrowck::build_borrowck_dataflow_data_for_fn;
pub use borrowck::{AnalysisData, BorrowckCtxt};
pub use borrowck::{AnalysisData, BorrowckCtxt, ElaborateDrops};
// NB: This module needs to be declared first so diagnostics are
// registered before they are used.
......
......@@ -1037,7 +1037,12 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads);
passes.push_pass(box mir::transform::remove_dead_blocks::RemoveDeadBlocks);
passes.push_pass(box mir::transform::erase_regions::EraseRegions);
passes.push_pass(box mir::transform::break_cleanup_edges::BreakCleanupEdges);
passes.push_pass(box mir::transform::add_call_guards::AddCallGuards);
passes.push_pass(box borrowck::ElaborateDrops);
passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads);
passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg);
passes.push_pass(box mir::transform::add_call_guards::AddCallGuards);
passes.push_pass(box mir::transform::dump_mir::DumpMir("pre_trans"));
passes.run_passes(tcx, &mut mir_map);
});
......
......@@ -142,6 +142,11 @@ fn item_name(&self, def: DefId) -> ast::Name {
decoder::get_item_name(&self.intr, &cdata, def.index)
}
fn opt_item_name(&self, def: DefId) -> Option<ast::Name> {
self.dep_graph.read(DepNode::MetaData(def));
let cdata = self.get_crate_data(def.krate);
decoder::maybe_get_item_name(&self.intr, &cdata, def.index)
}
fn inherent_implementations_for_type(&self, def_id: DefId) -> Vec<DefId>
{
......
......@@ -285,12 +285,17 @@ fn item_trait_ref<'a, 'tcx>(doc: rbml::Doc, tcx: TyCtxt<'a, 'tcx, 'tcx>, cdata:
}
fn item_name(intr: &IdentInterner, item: rbml::Doc) -> ast::Name {
let name = reader::get_doc(item, tag_paths_data_name);
let string = name.as_str_slice();
match intr.find(string) {
None => token::intern(string),
Some(val) => val,
}
maybe_item_name(intr, item).expect("no item in item_name")
}
fn maybe_item_name(intr: &IdentInterner, item: rbml::Doc) -> Option<ast::Name> {
reader::maybe_get_doc(item, tag_paths_data_name).map(|name| {
let string = name.as_str_slice();
match intr.find(string) {
None => token::intern(string),
Some(val) => val,
}
})
}
fn family_to_variant_kind<'tcx>(family: Family) -> Option<ty::VariantKind> {
......@@ -792,6 +797,11 @@ pub fn get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) -> ast::Nam
item_name(intr, cdata.lookup_item(id))
}
pub fn maybe_get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex)
-> Option<ast::Name> {
maybe_item_name(intr, cdata.lookup_item(id))
}
pub fn maybe_get_item_ast<'a, 'tcx>(cdata: Cmd, tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefIndex)
-> FoundAst<'tcx> {
debug!("Looking up item: {:?}", id);
......
......@@ -34,29 +34,25 @@ pub fn stmt_expr(&mut self, mut block: BasicBlock, expr: Expr<'tcx>) -> BlockAnd
let scope_id = this.innermost_scope_id();
let lhs_span = lhs.span;
let lhs_ty = lhs.ty;
let rhs_ty = rhs.ty;
let lhs_needs_drop = this.hir.needs_drop(lhs_ty);
let rhs_needs_drop = this.hir.needs_drop(rhs_ty);
// Note: we evaluate assignments right-to-left. This
// is better for borrowck interaction with overloaded
// operators like x[j] = x[i].
// Generate better code for things that don't need to be
// dropped.
let rhs = if lhs_needs_drop || rhs_needs_drop {
let op = unpack!(block = this.as_operand(block, rhs));
Rvalue::Use(op)
if this.hir.needs_drop(lhs.ty) {
let rhs = unpack!(block = this.as_operand(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
unpack!(block = this.build_drop_and_replace(
block, lhs_span, lhs, rhs
));
block.unit()
} else {
unpack!(block = this.as_rvalue(block, rhs))
};
let lhs = unpack!(block = this.as_lvalue(block, lhs));
unpack!(block = this.build_drop(block, lhs_span, lhs.clone(), lhs_ty));
this.cfg.push_assign(block, scope_id, expr_span, &lhs, rhs);
block.unit()
let rhs = unpack!(block = this.as_rvalue(block, rhs));
let lhs = unpack!(block = this.as_lvalue(block, lhs));
this.cfg.push_assign(block, scope_id, expr_span, &lhs, rhs);
block.unit()
}
}
ExprKind::AssignOp { op, lhs, rhs } => {
// FIXME(#28160) there is an interesting semantics
......
......@@ -139,7 +139,7 @@ struct DropData<'tcx> {
span: Span,
/// lvalue to drop
value: Lvalue<'tcx>,
location: Lvalue<'tcx>,
/// The cached block for the cleanups-on-diverge path. This block
/// contains code to run the current drop and all the preceding
......@@ -402,7 +402,7 @@ pub fn schedule_drop(&mut self,
// the drop that comes before it in the vector.
scope.drops.push(DropData {
span: span,
value: lvalue.clone(),
location: lvalue.clone(),
cached_block: None
});
return;
......@@ -497,7 +497,7 @@ pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
pub fn build_drop(&mut self,
block: BasicBlock,
span: Span,
value: Lvalue<'tcx>,
location: Lvalue<'tcx>,
ty: Ty<'tcx>) -> BlockAnd<()> {
if !self.hir.needs_drop(ty) {
return block.unit();
......@@ -509,7 +509,7 @@ pub fn build_drop(&mut self,
scope_id,
span,
TerminatorKind::Drop {
value: value,
location: location,
target: next_target,
unwind: diverge_target,
});
......@@ -517,6 +517,27 @@ pub fn build_drop(&mut self,
}
pub fn build_drop_and_replace(&mut self,
block: BasicBlock,
span: Span,
location: Lvalue<'tcx>,
value: Operand<'tcx>) -> BlockAnd<()> {
let scope_id = self.innermost_scope_id();
let next_target = self.cfg.start_new_block();
let diverge_target = self.diverge_cleanup();
self.cfg.terminate(block,
scope_id,
span,
TerminatorKind::DropAndReplace {
location: location,
value: value,
target: next_target,
unwind: diverge_target,
});
next_target.unit()
}
// Panicking
// =========
// FIXME: should be moved into their own module
......@@ -653,7 +674,7 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
});
let next = cfg.start_new_block();
cfg.terminate(block, scope.id, drop_data.span, TerminatorKind::Drop {
value: drop_data.value.clone(),
location: drop_data.location.clone(),
target: next,
unwind: on_diverge
});
......@@ -709,7 +730,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
scope.id,
drop_data.span,
TerminatorKind::Drop {
value: drop_data.value.clone(),
location: drop_data.location.clone(),
target: target,
unwind: None
});
......
......@@ -12,13 +12,11 @@
use rustc::mir::repr::*;
use rustc::mir::transform::{MirPass, MirSource, Pass};
use rustc_data_structures::bitvec::BitVector;
use pretty;
use traversal;
pub struct BreakCleanupEdges;
pub struct AddCallGuards;
/**
* Breaks outgoing critical edges for call terminators in the MIR.
......@@ -40,7 +38,7 @@
*
*/
impl<'tcx> MirPass<'tcx> for BreakCleanupEdges {
impl<'tcx> MirPass<'tcx> for AddCallGuards {
fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mut Mir<'tcx>) {
let mut pred_count = vec![0u32; mir.basic_blocks.len()];
......@@ -53,9 +51,6 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mu
}
}
let cleanup_map : BitVector = mir.basic_blocks
.iter().map(|bb| bb.is_cleanup).collect();
// We need a place to store the new blocks generated
let mut new_blocks = Vec::new();
......@@ -65,30 +60,31 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mu
for &bb in &bbs {
let data = mir.basic_block_data_mut(bb);
if let Some(ref mut term) = data.terminator {
if term_is_invoke(term) {
let term_span = term.span;
let term_scope = term.scope;
let succs = term.successors_mut();
for tgt in succs {
let num_preds = pred_count[tgt.index()];
if num_preds > 1 {
// It's a critical edge, break it
let goto = Terminator {
span: term_span,
scope: term_scope,
kind: TerminatorKind::Goto { target: *tgt }
};
let mut data = BasicBlockData::new(Some(goto));
data.is_cleanup = cleanup_map.contains(tgt.index());
// Get the index it will be when inserted into the MIR
let idx = cur_len + new_blocks.len();
new_blocks.push(data);
*tgt = BasicBlock::new(idx);
}
}
match data.terminator {
Some(Terminator {
kind: TerminatorKind::Call {
destination: Some((_, ref mut destination)),
cleanup: Some(_),
..
}, span, scope
}) if pred_count[destination.index()] > 1 => {
// It's a critical edge, break it
let call_guard = BasicBlockData {
statements: vec![],
is_cleanup: data.is_cleanup,
terminator: Some(Terminator {
span: span,
scope: scope,
kind: TerminatorKind::Goto { target: *destination }
})
};
// Get the index it will be when inserted into the MIR
let idx = cur_len + new_blocks.len();
new_blocks.push(call_guard);
*destination = BasicBlock::new(idx);
}
_ => {}
}
}
......@@ -99,13 +95,4 @@ fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mu
}
}
impl Pass for BreakCleanupEdges {}
// Returns true if the terminator is a call that would use an invoke in LLVM.
fn term_is_invoke(term: &Terminator) -> bool {
match term.kind {
TerminatorKind::Call { cleanup: Some(_), .. } |
TerminatorKind::Drop { unwind: Some(_), .. } => true,
_ => false
}
}
impl Pass for AddCallGuards {}
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
......@@ -8,20 +8,20 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::ValueRef;
use rustc::ty::Ty;
use adt;
use base;
use common::{self, BlockAndBuilder};
use machine;
use type_of;
use type_::Type;
//! This pass just dumps MIR at a specified point.
pub fn drop_fill<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, value: ValueRef, ty: Ty<'tcx>) {
let llty = type_of::type_of(bcx.ccx(), ty);
let llptr = bcx.pointercast(value, Type::i8(bcx.ccx()).ptr_to());
let filling = common::C_u8(bcx.ccx(), adt::DTOR_DONE);
let size = machine::llsize_of(bcx.ccx(), llty);
let align = common::C_u32(bcx.ccx(), machine::llalign_of_min(bcx.ccx(), llty));
base::call_memset(&bcx, llptr, filling, size, align, false);
use rustc::ty::TyCtxt;
use rustc::mir::repr::*;
use rustc::mir::transform::{Pass, MirPass, MirSource};
use pretty;
pub struct DumpMir<'a>(pub &'a str);
impl<'b, 'tcx> MirPass<'tcx> for DumpMir<'b> {
fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>,
src: MirSource, mir: &mut Mir<'tcx>) {
pretty::dump_mir(tcx, self.0, &0, src, mir, None);
}
}
impl<'b> Pass for DumpMir<'b> {}
......@@ -13,6 +13,7 @@
pub mod erase_regions;
pub mod no_landing_pads;
pub mod type_check;
pub mod break_cleanup_edges;
pub mod add_call_guards;
pub mod promote_consts;
pub mod qualify_consts;
pub mod dump_mir;
......@@ -29,12 +29,11 @@ fn visit_terminator(&mut self, bb: BasicBlock, terminator: &mut Terminator<'tcx>
TerminatorKind::SwitchInt { .. } => {
/* nothing to do */
},
TerminatorKind::Call { cleanup: ref mut unwind, .. } |
TerminatorKind::DropAndReplace { ref mut unwind, .. } |
TerminatorKind::Drop { ref mut unwind, .. } => {
unwind.take();
},
TerminatorKind::Call { ref mut cleanup, .. } => {
cleanup.take();
},
}
self.super_terminator(bb, terminator);
}
......
......@@ -399,7 +399,7 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>,
});
let terminator = block.terminator_mut();
match terminator.kind {
TerminatorKind::Drop { value: Lvalue::Temp(index), target, .. } => {
TerminatorKind::Drop { location: Lvalue::Temp(index), target, .. } => {
if promoted(index) {
terminator.kind = TerminatorKind::Goto {
target: target
......
......@@ -422,6 +422,7 @@ fn qualify_const(&mut self) -> Qualif {
TerminatorKind::Switch {..} |
TerminatorKind::SwitchInt {..} |
TerminatorKind::DropAndReplace { .. } |
TerminatorKind::Resume => None,
TerminatorKind::Return => {
......
......@@ -363,6 +363,20 @@ fn check_terminator(&mut self,
// no checks needed for these
}
TerminatorKind::DropAndReplace {
ref location,
ref value,
..
} => {
let lv_ty = mir.lvalue_ty(tcx, location).to_ty(tcx);
let rv_ty = mir.operand_ty(tcx, value);
if let Err(terr) = self.sub_types(self.last_span, rv_ty, lv_ty) {
span_mirbug!(self, term, "bad DropAndReplace ({:?} = {:?}): {:?}",
lv_ty, rv_ty, terr);
}
}
TerminatorKind::If { ref cond, .. } => {
let cond_ty = mir.operand_ty(tcx, cond);
match cond_ty.sty {
......@@ -519,6 +533,69 @@ fn check_box_free_inputs(&self,
}
}
fn check_iscleanup(&mut self, mir: &Mir<'tcx>, block: &BasicBlockData<'tcx>)
{
let is_cleanup = block.is_cleanup;
self.last_span = block.terminator().span;
match block.terminator().kind {
TerminatorKind::Goto { target } =>
self.assert_iscleanup(mir, block, target, is_cleanup),
TerminatorKind::If { targets: (on_true, on_false), .. } => {
self.assert_iscleanup(mir, block, on_true, is_cleanup);
self.assert_iscleanup(mir, block, on_false, is_cleanup);
}
TerminatorKind::Switch { ref targets, .. } |
TerminatorKind::SwitchInt { ref targets, .. } => {
for target in targets {
self.assert_iscleanup(mir, block, *target, is_cleanup);
}
}
TerminatorKind::Resume => {
if !is_cleanup {
span_mirbug!(self, block, "resume on non-cleanup block!")
}
}
TerminatorKind::Return => {
if is_cleanup {
span_mirbug!(self, block, "return on cleanup block")
}
}
TerminatorKind::Drop { target, unwind, .. } |
TerminatorKind::DropAndReplace { target, unwind, .. } => {
self.assert_iscleanup(mir, block, target, is_cleanup);
if let Some(unwind) = unwind {
if is_cleanup {
span_mirbug!(self, block, "unwind on cleanup block")
}
self.assert_iscleanup(mir, block, unwind, true);
}
}
TerminatorKind::Call { ref destination, cleanup, .. } => {
if let &Some((_, target)) = destination {
self.assert_iscleanup(mir, block, target, is_cleanup);
}
if let Some(cleanup) = cleanup {
if is_cleanup {
span_mirbug!(self, block, "cleanup on cleanup block")
}
self.assert_iscleanup(mir, block, cleanup, true);
}
}
}
}
fn assert_iscleanup(&mut self,
mir: &Mir<'tcx>,
ctxt: &fmt::Debug,
bb: BasicBlock,
iscleanuppad: bool)
{
if mir.basic_block_data(bb).is_cleanup != iscleanuppad {
span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}",
bb, iscleanuppad);
}
}
fn typeck_mir(&mut self, mir: &Mir<'tcx>) {
self.last_span = mir.span;
debug!("run_on_mir: {:?}", mir.span);
......@@ -530,9 +607,8 @@ fn typeck_mir(&mut self, mir: &Mir<'tcx>) {
self.check_stmt(mir, stmt);
}
if let Some(ref terminator) = block.terminator {
self.check_terminator(mir, terminator);
}
self.check_terminator(mir, block.terminator());
self.check_iscleanup(mir, block);
}
}
......
......@@ -577,6 +577,15 @@ pub fn lpad(&self) -> Option<&'blk LandingPad> {
self.lpad.get()
}
pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) {
// FIXME: use an IVar?
self.lpad.set(lpad);
}
pub fn set_lpad(&self, lpad: Option<LandingPad>) {
self.set_lpad_ref(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
}
pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
self.fcx.mir()
}
......@@ -716,7 +725,16 @@ pub fn monomorphize<T>(&self, value: &T) -> T
}
pub fn set_lpad(&self, lpad: Option<LandingPad>) {
self.bcx.lpad.set(lpad.map(|p| &*self.fcx().lpad_arena.alloc(p)))
self.bcx.set_lpad(lpad)
}
pub fn set_lpad_ref(&self, lpad: Option<&'blk LandingPad>) {
// FIXME: use an IVar?
self.bcx.set_lpad_ref(lpad);
}
pub fn lpad(&self) -> Option<&'blk LandingPad> {
self.bcx.lpad()
}
}
......@@ -761,6 +779,10 @@ pub fn msvc(cleanuppad: ValueRef) -> LandingPad {
pub fn bundle(&self) -> Option<&OperandBundleDef> {
self.operand.as_ref()
}
pub fn cleanuppad(&self) -> Option<ValueRef> {
self.cleanuppad
}
}
impl Clone for LandingPad {
......
......@@ -13,7 +13,9 @@
use rustc_data_structures::bitvec::BitVector;
use rustc::mir::repr as mir;
use rustc::mir::repr::TerminatorKind;
use rustc::mir::visit::{Visitor, LvalueContext};
use rustc_mir::traversal;
use common::{self, Block, BlockAndBuilder};
use super::rvalue;
......@@ -134,3 +136,104 @@ fn visit_lvalue(&mut self,
self.super_lvalue(lvalue, context);
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CleanupKind {
NotCleanup,
Funclet,
Internal { funclet: mir::BasicBlock }
}
pub fn cleanup_kinds<'bcx,'tcx>(_bcx: Block<'bcx,'tcx>,
mir: &mir::Mir<'tcx>)
-> Vec<CleanupKind>
{
fn discover_masters<'tcx>(result: &mut [CleanupKind], mir: &mir::Mir<'tcx>) {
for bb in mir.all_basic_blocks() {
let data = mir.basic_block_data(bb);
match data.terminator().kind {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
TerminatorKind::Return |
TerminatorKind::If { .. } |
TerminatorKind::Switch { .. } |
TerminatorKind::SwitchInt { .. } => {
/* nothing to do */
}
TerminatorKind::Call { cleanup: unwind, .. } |
TerminatorKind::DropAndReplace { unwind, .. } |
TerminatorKind::Drop { unwind, .. } => {
if let Some(unwind) = unwind {
debug!("cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
bb, data, unwind);
result[unwind.index()] = CleanupKind::Funclet;
}
}
}
}
}
fn propagate<'tcx>(result: &mut [CleanupKind], mir: &mir::Mir<'tcx>) {
let mut funclet_succs : Vec<_> =
mir.all_basic_blocks().iter().map(|_| None).collect();
let mut set_successor = |funclet: mir::BasicBlock, succ| {
match funclet_succs[funclet.index()] {
ref mut s @ None => {
debug!("set_successor: updating successor of {:?} to {:?}",
funclet, succ);
*s = Some(succ);
},
Some(s) => if s != succ {
span_bug!(mir.span, "funclet {:?} has 2 parents - {:?} and {:?}",
funclet, s, succ);
}
}
};
for (bb, data) in traversal::reverse_postorder(mir) {
let funclet = match result[bb.index()] {
CleanupKind::NotCleanup => continue,
CleanupKind::Funclet => bb,
CleanupKind::Internal { funclet } => funclet,
};
debug!("cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
bb, data, result[bb.index()], funclet);
for &succ in data.terminator().successors().iter() {
let kind = result[succ.index()];
debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}",
funclet, succ, kind);
match kind {
CleanupKind::NotCleanup => {
result[succ.index()] = CleanupKind::Internal { funclet: funclet };
}
CleanupKind::Funclet => {
set_successor(funclet, succ);
}
CleanupKind::Internal { funclet: succ_funclet } => {
if funclet != succ_funclet {
// `succ` has 2 different funclet going into it, so it must
// be a funclet by itself.
debug!("promoting {:?} to a funclet and updating {:?}", succ,
succ_funclet);
result[succ.index()] = CleanupKind::Funclet;
set_successor(succ_funclet, succ);
set_successor(funclet, succ);
}
}
}
}
}
}
let mut result : Vec<_> =
mir.all_basic_blocks().iter().map(|_| CleanupKind::NotCleanup).collect();
discover_masters(&mut result, mir);
propagate(&mut result, mir);
debug!("cleanup_kinds: result={:?}", result);
result
}
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef};
use llvm::{self, ValueRef};
use rustc::ty;
use rustc::mir::repr as mir;
use abi::{Abi, FnType, ArgType};
......@@ -16,7 +16,7 @@
use base;
use build;
use callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, C_undef};
use common::{self, type_is_fat_ptr, Block, BlockAndBuilder, LandingPad, C_undef};
use debuginfo::DebugLoc;
use Disr;
use machine::{llalign_of_min, llbitsize_of_real};
......@@ -26,7 +26,8 @@
use type_::Type;
use rustc_data_structures::fnv::FnvHashMap;
use super::{MirContext, TempRef, drop};
use super::{MirContext, TempRef};
use super::analyze::CleanupKind;
use super::constant::Const;
use super::lvalue::{LvalueRef, load_fat_ptr};
use super::operand::OperandRef;
......@@ -34,22 +35,62 @@
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_block(&mut self, bb: mir::BasicBlock) {
debug!("trans_block({:?})", bb);
let mut bcx = self.bcx(bb);
let mir = self.mir.clone();
let data = mir.basic_block_data(bb);
// MSVC SEH bits
let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) {
(Some(cp), Some(cb))
} else {
(None, None)
debug!("trans_block({:?}={:?})", bb, data);
// Create the cleanup bundle, if needed.
let cleanup_pad = bcx.lpad().and_then(|lp| lp.cleanuppad());
let cleanup_bundle = bcx.lpad().and_then(|l| l.bundle());
let funclet_br = |this: &Self, bcx: BlockAndBuilder, bb: mir::BasicBlock| {
let lltarget = this.blocks[bb.index()].llbb;
if let Some(cp) = cleanup_pad {
match this.cleanup_kind(bb) {
CleanupKind::Funclet => {
// micro-optimization: generate a `ret` rather than a jump
// to a return block
bcx.cleanup_ret(cp, Some(lltarget));
}
CleanupKind::Internal { .. } => bcx.br(lltarget),
CleanupKind::NotCleanup => bug!("jump from cleanup bb to bb {:?}", bb)
}
} else {
bcx.br(lltarget);
}
};
let funclet_br = |bcx: BlockAndBuilder, llbb: BasicBlockRef| if let Some(cp) = cleanup_pad {
bcx.cleanup_ret(cp, Some(llbb));
} else {
bcx.br(llbb);
let llblock = |this: &mut Self, target: mir::BasicBlock| {
let lltarget = this.blocks[target.index()].llbb;
if let Some(cp) = cleanup_pad {
match this.cleanup_kind(target) {
CleanupKind::Funclet => {
// MSVC cross-funclet jump - need a trampoline
debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
let trampoline = this.fcx.new_block(name, None).build();
trampoline.set_personality_fn(this.fcx.eh_personality());
trampoline.cleanup_ret(cp, Some(lltarget));
trampoline.llbb()
}
CleanupKind::Internal { .. } => lltarget,
CleanupKind::NotCleanup =>
bug!("jump from cleanup bb {:?} to bb {:?}", bb, target)
}
} else {
if let (CleanupKind::NotCleanup, CleanupKind::Funclet) =
(this.cleanup_kind(bb), this.cleanup_kind(target))
{
// jump *into* cleanup - need a landing pad if GNU
this.landing_pad_to(target).llbb
} else {
lltarget
}
}
};
for statement in &data.statements {
......@@ -78,13 +119,14 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
}
mir::TerminatorKind::Goto { target } => {
funclet_br(bcx, self.llblock(target));
funclet_br(self, bcx, target);
}
mir::TerminatorKind::If { ref cond, targets: (true_bb, false_bb) } => {
let cond = self.trans_operand(&bcx, cond);
let lltrue = self.llblock(true_bb);
let llfalse = self.llblock(false_bb);
let lltrue = llblock(self, true_bb);
let llfalse = llblock(self, false_bb);
bcx.cond_br(cond.immediate(), lltrue, llfalse);
}
......@@ -106,18 +148,18 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
// code. This is especially helpful in cases like an if-let on a huge enum.
// Note: This optimization is only valid for exhaustive matches.
Some((&&bb, &c)) if c > targets.len() / 2 => {
(Some(bb), self.blocks[bb.index()])
(Some(bb), llblock(self, bb))
}
// We're generating an exhaustive switch, so the else branch
// can't be hit. Branching to an unreachable instruction
// lets LLVM know this
_ => (None, self.unreachable_block())
_ => (None, self.unreachable_block().llbb)
};
let switch = bcx.switch(discr, default_blk.llbb, targets.len());
let switch = bcx.switch(discr, default_blk, targets.len());
assert_eq!(adt_def.variants.len(), targets.len());
for (adt_variant, &target) in adt_def.variants.iter().zip(targets) {
if default_bb != Some(target) {
let llbb = self.llblock(target);
let llbb = llblock(self, target);
let llval = bcx.with_block(|bcx| adt::trans_case(
bcx, &repr, Disr::from(adt_variant.disr_val)));
build::AddCase(switch, llval, llbb)
......@@ -129,10 +171,10 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty));
let switch = bcx.switch(discr, self.llblock(*otherwise), values.len());
let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
for (value, target) in values.iter().zip(targets) {
let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty);
let llbb = self.llblock(*target);
let llbb = llblock(self, *target);
build::AddCase(switch, val.llval, llbb)
}
}
......@@ -143,12 +185,12 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
})
}
mir::TerminatorKind::Drop { ref value, target, unwind } => {
let lvalue = self.trans_lvalue(&bcx, value);
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let lvalue = self.trans_lvalue(&bcx, location);
let ty = lvalue.ty.to_ty(bcx.tcx());
// Double check for necessity to drop
if !glue::type_needs_drop(bcx.tcx(), ty) {
funclet_br(bcx, self.llblock(target));
funclet_br(self, bcx, target);
return;
}
let drop_fn = glue::get_drop_glue(bcx.ccx(), ty);
......@@ -159,24 +201,21 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
lvalue.llval
};
if let Some(unwind) = unwind {
let uwbcx = self.bcx(unwind);
let unwind = self.make_landing_pad(uwbcx);
bcx.invoke(drop_fn,
&[llvalue],
self.llblock(target),
unwind.llbb(),
cleanup_bundle.as_ref());
self.bcx(target).at_start(|bcx| {
debug_loc.apply_to_bcx(bcx);
drop::drop_fill(bcx, lvalue.llval, ty)
});
self.blocks[target.index()].llbb,
llblock(self, unwind),
cleanup_bundle);
} else {
bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref());
drop::drop_fill(&bcx, lvalue.llval, ty);
funclet_br(bcx, self.llblock(target));
bcx.call(drop_fn, &[llvalue], cleanup_bundle);
funclet_br(self, bcx, target);
}
}
mir::TerminatorKind::DropAndReplace { .. } => {
bug!("undesugared DropAndReplace in trans: {:?}", data);
}
mir::TerminatorKind::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bcx, func);
......@@ -211,8 +250,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
let llptr = self.trans_operand(&bcx, &args[0]).immediate();
let val = self.trans_operand(&bcx, &args[1]);
self.store_operand(&bcx, llptr, val);
self.set_operand_dropped(&bcx, &args[1]);
funclet_br(bcx, self.llblock(target));
funclet_br(self, bcx, target);
return;
}
......@@ -222,8 +260,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
this.trans_transmute(&bcx, &args[0], dest);
});
self.set_operand_dropped(&bcx, &args[0]);
funclet_br(bcx, self.llblock(target));
funclet_br(self, bcx, target);
return;
}
......@@ -328,10 +365,7 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
}
if let Some((_, target)) = *destination {
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
funclet_br(self, bcx, target);
} else {
// trans_intrinsic_call already used Unreachable.
// bcx.unreachable();
......@@ -344,28 +378,19 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
};
// Many different ways to call a function handled here
if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) {
if let &Some(cleanup) = cleanup {
let ret_bcx = if let Some((_, target)) = *destination {
self.blocks[target.index()]
} else {
self.unreachable_block()
};
let landingpad = self.make_landing_pad(cleanup);
let invokeret = bcx.invoke(fn_ptr,
&llargs,
ret_bcx.llbb,
landingpad.llbb(),
cleanup_bundle.as_ref());
llblock(self, cleanup),
cleanup_bundle);
fn_ty.apply_attrs_callsite(invokeret);
landingpad.at_start(|bcx| {
debug_loc.apply_to_bcx(bcx);
for op in args {
self.set_operand_dropped(bcx, op);
}
});
if destination.is_some() {
let ret_bcx = ret_bcx.build();
ret_bcx.at_start(|ret_bcx| {
......@@ -375,13 +400,10 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
ty: sig.output.unwrap()
};
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op);
for op in args {
self.set_operand_dropped(&ret_bcx, op);
}
});
}
} else {
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref());
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
fn_ty.apply_attrs_callsite(llret);
if let Some((_, target)) = *destination {
let op = OperandRef {
......@@ -389,12 +411,8 @@ pub fn trans_block(&mut self, bb: mir::BasicBlock) {
ty: sig.output.unwrap()
};
self.store_return(&bcx, ret_dest, fn_ty.ret, op);
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
funclet_br(self, bcx, target);
} else {
// no need to drop args, because the call never returns
bcx.unreachable();
}
}
......@@ -534,17 +552,29 @@ fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRe
}
}
/// Create a landingpad wrapper around the given Block.
fn cleanup_kind(&self, bb: mir::BasicBlock) -> CleanupKind {
self.cleanup_kinds[bb.index()]
}
/// Return the landingpad wrapper around the given basic block
///
/// No-op in MSVC SEH scheme.
fn make_landing_pad(&mut self,
cleanup: BlockAndBuilder<'bcx, 'tcx>)
-> BlockAndBuilder<'bcx, 'tcx>
fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> Block<'bcx, 'tcx>
{
if base::wants_msvc_seh(cleanup.sess()) {
return cleanup;
if let Some(block) = self.landing_pads[target_bb.index()] {
return block;
}
if base::wants_msvc_seh(self.fcx.ccx.sess()) {
return self.blocks[target_bb.index()];
}
let bcx = self.fcx.new_block("cleanup", None).build();
let target = self.bcx(target_bb);
let block = self.fcx.new_block("cleanup", None);
self.landing_pads[target_bb.index()] = Some(block);
let bcx = block.build();
let ccx = bcx.ccx();
let llpersonality = self.fcx.eh_personality();
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
......@@ -552,36 +582,34 @@ fn make_landing_pad(&mut self,
bcx.set_cleanup(llretval);
let slot = self.get_personality_slot(&bcx);
bcx.store(llretval, slot);
bcx.br(cleanup.llbb());
bcx
bcx.br(target.llbb());
block
}
/// Create prologue cleanuppad instruction under MSVC SEH handling scheme.
///
/// Also handles setting some state for the original trans and creating an operand bundle for
/// function calls.
fn make_cleanup_pad(&mut self, bb: mir::BasicBlock) -> Option<(ValueRef, OperandBundleDef)> {
pub fn init_cpad(&mut self, bb: mir::BasicBlock) {
let bcx = self.bcx(bb);
let data = self.mir.basic_block_data(bb);
let use_funclets = base::wants_msvc_seh(bcx.sess()) && data.is_cleanup;
let cleanup_pad = if use_funclets {
bcx.set_personality_fn(self.fcx.eh_personality());
bcx.at_start(|bcx| {
DebugLoc::None.apply_to_bcx(bcx);
Some(bcx.cleanup_pad(None, &[]))
})
} else {
None
debug!("init_cpad({:?})", data);
match self.cleanup_kinds[bb.index()] {
CleanupKind::NotCleanup => {
bcx.set_lpad(None)
}
_ if !base::wants_msvc_seh(bcx.sess()) => {
bcx.set_lpad(Some(LandingPad::gnu()))
}
CleanupKind::Internal { funclet } => {
// FIXME: is this needed?
bcx.set_personality_fn(self.fcx.eh_personality());
bcx.set_lpad_ref(self.bcx(funclet).lpad());
}
CleanupKind::Funclet => {
bcx.set_personality_fn(self.fcx.eh_personality());
DebugLoc::None.apply_to_bcx(&bcx);
let cleanup_pad = bcx.cleanup_pad(None, &[]);
bcx.set_lpad(Some(LandingPad::msvc(cleanup_pad)));
}
};
// Set the landingpad global-state for old translator, so it knows about the SEH used.
bcx.set_lpad(if let Some(cleanup_pad) = cleanup_pad {
Some(common::LandingPad::msvc(cleanup_pad))
} else if data.is_cleanup {
Some(common::LandingPad::gnu())
} else {
None
});
cleanup_pad.map(|f| (f, OperandBundleDef::new("funclet", &[f])))
}
fn unreachable_block(&mut self) -> Block<'bcx, 'tcx> {
......@@ -597,10 +625,6 @@ fn bcx(&self, bb: mir::BasicBlock) -> BlockAndBuilder<'bcx, 'tcx> {
self.blocks[bb.index()].build()
}
pub fn llblock(&self, bb: mir::BasicBlock) -> BasicBlockRef {
self.blocks[bb.index()].llbb
}
fn make_return_dest(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>,
dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
......
......@@ -20,7 +20,6 @@
use consts;
use machine;
use type_of::type_of;
use mir::drop;
use Disr;
use std::ptr;
......@@ -51,9 +50,6 @@ pub fn alloca<'bcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
{
assert!(!ty.has_erasable_regions());
let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
if bcx.fcx().type_needs_drop(ty) {
drop::drop_fill(bcx, lltemp, ty);
}
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
}
......
......@@ -73,6 +73,13 @@ pub struct MirContext<'bcx, 'tcx:'bcx> {
/// A `Block` for each MIR `BasicBlock`
blocks: Vec<Block<'bcx, 'tcx>>,
/// The funclet status of each basic block
cleanup_kinds: Vec<analyze::CleanupKind>,
/// This stores the landing-pad block for a given BB, computed lazily on GNU
/// and eagerly on MSVC.
landing_pads: Vec<Option<Block<'bcx, 'tcx>>>,
/// Cached unreachable block
unreachable_block: Option<Block<'bcx, 'tcx>>,
......@@ -139,8 +146,9 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
// Analyze the temps to determine which must be lvalues
// FIXME
let lvalue_temps = bcx.with_block(|bcx| {
analyze::lvalue_temps(bcx, &mir)
let (lvalue_temps, cleanup_kinds) = bcx.with_block(|bcx| {
(analyze::lvalue_temps(bcx, &mir),
analyze::cleanup_kinds(bcx, &mir))
});
// Compute debuginfo scopes from MIR scopes.
......@@ -206,6 +214,8 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
llpersonalityslot: None,
blocks: block_bcxs,
unreachable_block: None,
cleanup_kinds: cleanup_kinds,
landing_pads: mir_blocks.iter().map(|_| None).collect(),
vars: vars,
temps: temps,
args: args,
......@@ -214,7 +224,14 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let mut visited = BitVector::new(mir_blocks.len());
let rpo = traversal::reverse_postorder(&mir);
let mut rpo = traversal::reverse_postorder(&mir);
// Prepare each block for translation.
for (bb, _) in rpo.by_ref() {
mircx.init_cpad(bb);
}
rpo.reset();
// Translate the body of each block using reverse postorder
for (bb, _) in rpo {
visited.insert(bb.index());
......@@ -228,8 +245,7 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let block = BasicBlock(block.llbb);
// Unreachable block
if !visited.contains(bb.index()) {
block.delete();
} else if block.pred_iter().count() == 0 {
debug!("trans_mir: block {:?} was not visited", bb);
block.delete();
}
}
......@@ -431,7 +447,6 @@ fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mod analyze;
mod block;
mod constant;
mod drop;
mod lvalue;
mod operand;
mod rvalue;
......
......@@ -15,12 +15,11 @@
use common::{self, Block, BlockAndBuilder};
use datum;
use value::Value;
use glue;
use std::fmt;
use super::lvalue::load_fat_ptr;
use super::{MirContext, TempRef, drop};
use super::{MirContext, TempRef};
/// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a
......@@ -179,29 +178,4 @@ pub fn store_operand_direct(&mut self,
}
}
}
pub fn set_operand_dropped(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>) {
match *operand {
mir::Operand::Constant(_) => return,
mir::Operand::Consume(ref lvalue) => {
if let mir::Lvalue::Temp(idx) = *lvalue {
if let TempRef::Operand(..) = self.temps[idx as usize] {
// All lvalues which have an associated drop are promoted to an alloca
// beforehand. If this is an operand, it is safe to say this is never
// dropped and there’s no reason for us to zero this out at all.
return
}
}
let lvalue = self.trans_lvalue(bcx, lvalue);
let ty = lvalue.ty.to_ty(bcx.tcx());
if !glue::type_needs_drop(bcx.tcx(), ty) {
return
} else {
drop::drop_fill(bcx, lvalue.llval, ty);
}
}
}
}
}
......@@ -25,7 +25,6 @@
use tvec;
use value::Value;
use Disr;
use glue;
use super::MirContext;
use super::operand::{OperandRef, OperandValue};
......@@ -48,7 +47,6 @@ pub fn trans_rvalue(&mut self,
// FIXME: consider not copying constants through stack. (fixable by translating
// constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
self.store_operand(&bcx, dest.llval, tr_operand);
self.set_operand_dropped(&bcx, operand);
bcx
}
......@@ -92,7 +90,6 @@ pub fn trans_rvalue(&mut self,
}
}
});
self.set_operand_dropped(&bcx, source);
bcx
}
......@@ -107,7 +104,6 @@ pub fn trans_rvalue(&mut self,
block
})
});
self.set_operand_dropped(&bcx, elem);
bcx
}
......@@ -128,7 +124,6 @@ pub fn trans_rvalue(&mut self,
val, disr, i);
self.store_operand(&bcx, lldest_i, op);
}
self.set_operand_dropped(&bcx, operand);
}
},
_ => {
......@@ -167,7 +162,6 @@ pub fn trans_rvalue(&mut self,
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op);
}
self.set_operand_dropped(&bcx, operand);
}
}
}
......@@ -209,9 +203,6 @@ pub fn trans_rvalue(&mut self,
asm::trans_inline_asm(bcx, asm, outputs, input_vals);
});
for input in inputs {
self.set_operand_dropped(&bcx, input);
}
bcx
}
......@@ -269,7 +260,6 @@ pub fn trans_rvalue_operand(&mut self,
// &'a fmt::Debug+Send => &'a fmt::Debug,
// So we need to pointercast the base to ensure
// the types match up.
self.set_operand_dropped(&bcx, source);
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), cast_ty);
let lldata = bcx.pointercast(lldata, llcast_ty);
OperandValue::FatPtr(lldata, llextra)
......@@ -280,7 +270,6 @@ pub fn trans_rvalue_operand(&mut self,
base::unsize_thin_ptr(bcx, lldata,
operand.ty, cast_ty)
});
self.set_operand_dropped(&bcx, source);
OperandValue::FatPtr(lldata, llextra)
}
OperandValue::Ref(_) => {
......@@ -569,8 +558,8 @@ pub fn trans_scalar_binop(&mut self,
}
}
pub fn rvalue_creates_operand<'bcx, 'tcx>(mir: &mir::Mir<'tcx>,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
pub fn rvalue_creates_operand<'bcx, 'tcx>(_mir: &mir::Mir<'tcx>,
_bcx: &BlockAndBuilder<'bcx, 'tcx>,
rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Ref(..) |
......@@ -578,21 +567,14 @@ pub fn rvalue_creates_operand<'bcx, 'tcx>(mir: &mir::Mir<'tcx>,
mir::Rvalue::Cast(..) | // (*)
mir::Rvalue::BinaryOp(..) |
mir::Rvalue::UnaryOp(..) |
mir::Rvalue::Box(..) =>
mir::Rvalue::Box(..) |
mir::Rvalue::Use(..) =>
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } |
mir::Rvalue::InlineAsm { .. } =>
false,
mir::Rvalue::Use(ref operand) => {
let ty = mir.operand_ty(bcx.tcx(), operand);
let ty = bcx.monomorphize(&ty);
// Types that don't need dropping can just be an operand,
// this allows temporary lvalues, used as rvalues, to
// avoid a stack slot when it's unnecessary
!glue::type_needs_drop(bcx.tcx(), ty)
}
}
// (*) this is only true if the type is suitable
......
......@@ -173,7 +173,7 @@ pub struct Instance<'tcx> {
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ppaux::parameterized(f, &self.substs, self.def, ppaux::Ns::Value, &[],
|tcx| tcx.lookup_item_type(self.def).generics)
|tcx| Some(tcx.lookup_item_type(self.def).generics))
}
}
......
Subproject commit a73c41e7f1c85cd814e9792fc6a6a8f8e31b8dd4
Subproject commit 80ad955b60b3ac02d0462a4a65fcea597d0ebfb1
......@@ -56,5 +56,3 @@ fn main()
//~ TRANS_ITEM fn cgu_export_trait_method::{{impl}}[0]::without_default_impl_generic[0]<bool>
let _: (char, bool) = Trait::without_default_impl_generic(false);
}
//~ TRANS_ITEM drop-glue i8
......@@ -60,5 +60,3 @@ fn main() {
//~ TRANS_ITEM fn generic_functions::foo3[0]<char, (), ()>
let _ = foo3('v', (), ());
}
//~ TRANS_ITEM drop-glue i8
......@@ -56,5 +56,3 @@ fn user() {
let _ = generic("abc");
}
}
//~ TRANS_ITEM drop-glue i8
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
// check that panics in destructors during assignment do not leave
// destroyed values lying around for other destructors to observe.
// error-pattern:panicking destructors ftw!
struct Observer<'a>(&'a mut FilledOnDrop);
struct FilledOnDrop(u32);
impl Drop for FilledOnDrop {
fn drop(&mut self) {
if self.0 == 0 {
// this is only set during the destructor - safe
// code should not be able to observe this.
self.0 = 0x1c1c1c1c;
panic!("panicking destructors ftw!");
}
}
}
impl<'a> Drop for Observer<'a> {
fn drop(&mut self) {
assert_eq!(self.0 .0, 1);
}
}
#[rustc_mir]
fn foo(b: &mut Observer) {
*b.0 = FilledOnDrop(1);
}
fn main() {
let mut bomb = FilledOnDrop(0);
let mut observer = Observer(&mut bomb);
foo(&mut observer);
}
......@@ -18,6 +18,8 @@
#[lang = "sized"]
trait Sized {}
#[lang = "copy"]
trait Copy {}
#[cfg(target_has_atomic = "8")]
pub unsafe fn atomic_u8(x: *mut u8) {
......
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(rustc_attrs)]
use std::cell::{Cell, RefCell};
use std::panic;
use std::usize;
struct InjectedFailure;
struct Allocator {
data: RefCell<Vec<bool>>,
failing_op: usize,
cur_ops: Cell<usize>,
}
impl panic::UnwindSafe for Allocator {}
impl panic::RefUnwindSafe for Allocator {}
impl Drop for Allocator {
fn drop(&mut self) {
let data = self.data.borrow();
if data.iter().any(|d| *d) {
panic!("missing free: {:?}", data);
}
}
}
impl Allocator {
fn new(failing_op: usize) -> Self {
Allocator {
failing_op: failing_op,
cur_ops: Cell::new(0),
data: RefCell::new(vec![])
}
}
fn alloc(&self) -> Ptr {
self.cur_ops.set(self.cur_ops.get() + 1);
if self.cur_ops.get() == self.failing_op {
panic!(InjectedFailure);
}
let mut data = self.data.borrow_mut();
let addr = data.len();
data.push(true);
Ptr(addr, self)
}
}
struct Ptr<'a>(usize, &'a Allocator);
impl<'a> Drop for Ptr<'a> {
fn drop(&mut self) {
match self.1.data.borrow_mut()[self.0] {
false => {
panic!("double free at index {:?}", self.0)
}
ref mut d => *d = false
}
self.1.cur_ops.set(self.1.cur_ops.get()+1);
if self.1.cur_ops.get() == self.1.failing_op {
panic!(InjectedFailure);
}
}
}
#[rustc_mir]
fn dynamic_init(a: &Allocator, c: bool) {
let _x;
if c {
_x = Some(a.alloc());
}
}
#[rustc_mir]
fn dynamic_drop(a: &Allocator, c: bool) {
let x = a.alloc();
if c {
Some(x)
} else {
None
};
}
#[rustc_mir]
fn assignment2(a: &Allocator, c0: bool, c1: bool) {
let mut _v = a.alloc();
let mut _w = a.alloc();
if c0 {
drop(_v);
}
_v = _w;
if c1 {
_w = a.alloc();
}
}
#[rustc_mir]
fn assignment1(a: &Allocator, c0: bool) {
let mut _v = a.alloc();
let mut _w = a.alloc();
if c0 {
drop(_v);
}
_v = _w;
}
fn run_test<F>(mut f: F)
where F: FnMut(&Allocator)
{
let first_alloc = Allocator::new(usize::MAX);
f(&first_alloc);
for failing_op in 1..first_alloc.cur_ops.get()+1 {
let alloc = Allocator::new(failing_op);
let alloc = &alloc;
let f = panic::AssertUnwindSafe(&mut f);
let result = panic::catch_unwind(move || {
f.0(alloc);
});
match result {
Ok(..) => panic!("test executed {} ops but now {}",
first_alloc.cur_ops.get(), alloc.cur_ops.get()),
Err(e) => {
if e.downcast_ref::<InjectedFailure>().is_none() {
panic::resume_unwind(e);
}
}
}
}
}
fn main() {
run_test(|a| dynamic_init(a, false));
run_test(|a| dynamic_init(a, true));
run_test(|a| dynamic_drop(a, false));
run_test(|a| dynamic_drop(a, true));
run_test(|a| assignment2(a, false, false));
run_test(|a| assignment2(a, false, true));
run_test(|a| assignment2(a, true, false));
run_test(|a| assignment2(a, true, true));
run_test(|a| assignment1(a, false));
run_test(|a| assignment1(a, true));
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册