提交 b8266a90 编写于 作者: B bors

Auto merge of #43772 - arielb1:nonfree-block, r=nagisa

For box expressions, use NZ drop instead of a free block

This falls naturally out of making drop elaboration work with `box`
expressions, which is probably required for sane MIR borrow-checking.
This is a pure refactoring with no intentional functional effects.

r? @nagisa
......@@ -135,6 +135,11 @@ pub fn ty<'a, 'gcx, D>(&self, local_decls: &D, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> L
}
}
pub enum RvalueInitializationState {
Shallow,
Deep
}
impl<'tcx> Rvalue<'tcx> {
pub fn ty<'a, 'gcx, D>(&self, local_decls: &D, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx>
where D: HasLocalDecls<'tcx>
......@@ -206,6 +211,16 @@ pub fn ty<'a, 'gcx, D>(&self, local_decls: &D, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> T
}
}
}
#[inline]
/// Returns whether this rvalue is deeply initialized (most rvalues) or
/// whether its only shallowly initialized (`Rvalue::Box`).
pub fn initialization_state(&self) -> RvalueInitializationState {
match *self {
Rvalue::NullaryOp(NullOp::Box, _) => RvalueInitializationState::Shallow,
_ => RvalueInitializationState::Deep
}
}
}
impl<'tcx> Operand<'tcx> {
......
......@@ -94,19 +94,19 @@ fn expr_as_rvalue(&mut self,
}
block.and(Rvalue::UnaryOp(op, arg))
}
ExprKind::Box { value, value_extents } => {
ExprKind::Box { value } => {
let value = this.hir.mirror(value);
let result = this.temp(expr.ty, expr_span);
// to start, malloc some memory of suitable type (thus far, uninitialized):
let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty);
this.cfg.push_assign(block, source_info, &result, box_);
this.in_scope((value_extents, source_info), block, |this| {
if let Some(scope) = scope {
// schedule a shallow free of that memory, lest we unwind:
this.schedule_box_free(expr_span, value_extents, &result, value.ty);
// initialize the box contents:
unpack!(block = this.into(&result.clone().deref(), block, value));
block.and(Rvalue::Use(Operand::Consume(result)))
})
this.schedule_drop(expr_span, scope, &result, value.ty);
}
// initialize the box contents:
unpack!(block = this.into(&result.clone().deref(), block, value));
block.and(Rvalue::Use(Operand::Consume(result)))
}
ExprKind::Cast { source } => {
let source = this.hir.mirror(source);
......
......@@ -89,9 +89,6 @@
use build::{BlockAnd, BlockAndExtension, Builder, CFG};
use rustc::middle::region::CodeExtent;
use rustc::middle::lang_items;
use rustc::middle::const_val::ConstVal;
use rustc::ty::subst::{Kind, Subst};
use rustc::ty::{Ty, TyCtxt};
use rustc::mir::*;
use rustc::mir::transform::MirSource;
......@@ -127,21 +124,6 @@ pub struct Scope<'tcx> {
/// end of the vector (top of the stack) first.
drops: Vec<DropData<'tcx>>,
/// A scope may only have one associated free, because:
///
/// 1. We require a `free` to only be scheduled in the scope of
/// `EXPR` in `box EXPR`;
/// 2. It only makes sense to have it translated into the diverge-path.
///
/// This kind of drop will be run *after* all the regular drops
/// scheduled onto this scope, because drops may have dependencies
/// on the allocated memory.
///
/// This is expected to go away once `box EXPR` becomes a sugar
/// for placement protocol and gets desugared in some earlier
/// stage.
free: Option<FreeData<'tcx>>,
/// The cache for drop chain on “normal” exit into a particular BasicBlock.
cached_exits: FxHashMap<(BasicBlock, CodeExtent), BasicBlock>,
}
......@@ -170,22 +152,6 @@ enum DropKind {
Storage
}
#[derive(Debug)]
struct FreeData<'tcx> {
/// span where free obligation was incurred
span: Span,
/// Lvalue containing the allocated box.
value: Lvalue<'tcx>,
/// type of item for which the box was allocated for (i.e. the T in Box<T>).
item_ty: Ty<'tcx>,
/// The cached block containing code to run the free. The block will also execute all the drops
/// in the scope.
cached_block: Option<BasicBlock>
}
#[derive(Clone, Debug)]
pub struct BreakableScope<'tcx> {
/// Extent of the loop
......@@ -224,9 +190,6 @@ fn invalidate_cache(&mut self, unwind: bool) {
*cached_block = None;
}
}
if let Some(ref mut freedata) = self.free {
freedata.cached_block = None;
}
}
/// Returns the cached entrypoint for diverging exit from this scope.
......@@ -242,8 +205,6 @@ fn cached_block(&self) -> Option<BasicBlock> {
});
if let Some(cached_block) = drops.next() {
Some(cached_block.expect("drop cache is not filled"))
} else if let Some(ref data) = self.free {
Some(data.cached_block.expect("free cache is not filled"))
} else {
None
}
......@@ -333,7 +294,6 @@ pub fn push_scope(&mut self, extent: (CodeExtent, SourceInfo)) {
extent_span: extent.1.span,
needs_cleanup: false,
drops: vec![],
free: None,
cached_exits: FxHashMap()
});
}
......@@ -382,7 +342,6 @@ pub fn exit_scope(&mut self,
});
let len = self.scopes.len();
assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes");
let tmp = self.get_unit_temp();
// If we are emitting a `drop` statement, we need to have the cached
// diverge cleanup pads ready in case that drop panics.
......@@ -415,13 +374,6 @@ pub fn exit_scope(&mut self,
// End all regions for scopes out of which we are breaking.
self.cfg.push_end_region(block, extent.1, scope.extent);
if let Some(ref free_data) = scope.free {
let next = self.cfg.start_new_block();
let free = build_free(self.hir.tcx(), &tmp, free_data, next);
self.cfg.terminate(block, scope.source_info(span), free);
block = next;
}
}
}
let scope = &self.scopes[len - scope_count];
......@@ -607,36 +559,6 @@ pub fn schedule_drop(&mut self,
span_bug!(span, "extent {:?} not in scope to drop {:?}", extent, lvalue);
}
/// Schedule dropping of a not-yet-fully-initialised box.
///
/// This cleanup will only be translated into unwind branch.
/// The extent should be for the `EXPR` inside `box EXPR`.
/// There may only be one “free” scheduled in any given scope.
pub fn schedule_box_free(&mut self,
span: Span,
extent: CodeExtent,
value: &Lvalue<'tcx>,
item_ty: Ty<'tcx>) {
for scope in self.scopes.iter_mut().rev() {
// See the comment in schedule_drop above. The primary difference is that we invalidate
// the unwind blocks unconditionally. That’s because the box free may be considered
// outer-most cleanup within the scope.
scope.invalidate_cache(true);
if scope.extent == extent {
assert!(scope.free.is_none(), "scope already has a scheduled free!");
scope.needs_cleanup = true;
scope.free = Some(FreeData {
span: span,
value: value.clone(),
item_ty: item_ty,
cached_block: None
});
return;
}
}
span_bug!(span, "extent {:?} not in scope to free {:?}", extent, value);
}
// Other
// =====
/// Creates a path that performs all required cleanup for unwinding.
......@@ -650,7 +572,6 @@ pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
}
assert!(!self.scopes.is_empty()); // or `any` above would be false
let unit_temp = self.get_unit_temp();
let Builder { ref mut hir, ref mut cfg, ref mut scopes,
ref mut cached_resume_block, .. } = *self;
......@@ -679,7 +600,7 @@ pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
for scope in scopes.iter_mut() {
target = build_diverge_scope(
hir.tcx(), cfg, &unit_temp, scope.extent_span, scope, target);
hir.tcx(), cfg, scope.extent_span, scope, target);
}
Some(target)
}
......@@ -805,9 +726,8 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
block.unit()
}
fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
fn build_diverge_scope<'a, 'gcx, 'tcx>(_tcx: TyCtxt<'a, 'gcx, 'tcx>,
cfg: &mut CFG<'tcx>,
unit_temp: &Lvalue<'tcx>,
span: Span,
scope: &mut Scope<'tcx>,
mut target: BasicBlock)
......@@ -832,19 +752,6 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
scope: visibility_scope
};
// Next, build up any free.
if let Some(ref mut free_data) = scope.free {
target = if let Some(cached_block) = free_data.cached_block {
cached_block
} else {
let into = cfg.start_new_cleanup_block();
cfg.terminate(into, source_info(free_data.span),
build_free(tcx, unit_temp, free_data, target));
free_data.cached_block = Some(into);
into
};
}
// Next, build up the drops. Here we iterate the vector in
// *forward* order, so that we generate drops[0] first (right to
// left in diagram above).
......@@ -888,24 +795,3 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
target
}
fn build_free<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
unit_temp: &Lvalue<'tcx>,
data: &FreeData<'tcx>,
target: BasicBlock)
-> TerminatorKind<'tcx> {
let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem);
let substs = tcx.intern_substs(&[Kind::from(data.item_ty)]);
TerminatorKind::Call {
func: Operand::Constant(box Constant {
span: data.span,
ty: tcx.type_of(free_func).subst(tcx, substs),
literal: Literal::Value {
value: ConstVal::Function(free_func, substs),
}
}),
args: vec![Operand::Consume(data.value.clone())],
destination: Some((unit_temp.clone(), target)),
cleanup: None
}
}
......@@ -240,7 +240,7 @@ pub(crate) fn drop_flag_effects_for_function_entry<'a, 'tcx, F>(
let lookup_result = move_data.rev_lookup.find(&lvalue);
on_lookup_result_bits(tcx, mir, move_data,
lookup_result,
|moi| callback(moi, DropFlagState::Present));
|mpi| callback(mpi, DropFlagState::Present));
}
}
......@@ -270,7 +270,7 @@ pub(crate) fn drop_flag_effects_for_location<'a, 'tcx, F>(
on_all_children_bits(tcx, mir, move_data,
path,
|moi| callback(moi, DropFlagState::Absent))
|mpi| callback(mpi, DropFlagState::Absent))
}
let block = &mir[loc.block];
......@@ -279,11 +279,21 @@ pub(crate) fn drop_flag_effects_for_location<'a, 'tcx, F>(
mir::StatementKind::SetDiscriminant{ .. } => {
span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck");
}
mir::StatementKind::Assign(ref lvalue, _) => {
debug!("drop_flag_effects: assignment {:?}", stmt);
on_lookup_result_bits(tcx, mir, move_data,
move_data.rev_lookup.find(lvalue),
|moi| callback(moi, DropFlagState::Present))
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
match rvalue.initialization_state() {
mir::tcx::RvalueInitializationState::Shallow => {
debug!("drop_flag_effects: box assignment {:?}", stmt);
if let LookupResult::Exact(mpi) = move_data.rev_lookup.find(lvalue) {
callback(mpi, DropFlagState::Present);
}
}
mir::tcx::RvalueInitializationState::Deep => {
debug!("drop_flag_effects: assignment {:?}", stmt);
on_lookup_result_bits(tcx, mir, move_data,
move_data.rev_lookup.find(lvalue),
|mpi| callback(mpi, DropFlagState::Present))
}
}
}
mir::StatementKind::StorageLive(_) |
mir::StatementKind::StorageDead(_) |
......@@ -298,7 +308,7 @@ pub(crate) fn drop_flag_effects_for_location<'a, 'tcx, F>(
mir::TerminatorKind::DropAndReplace { ref location, .. } => {
on_lookup_result_bits(tcx, mir, move_data,
move_data.rev_lookup.find(location),
|moi| callback(moi, DropFlagState::Present))
|mpi| callback(mpi, DropFlagState::Present))
}
_ => {
// other terminators do not contain move-ins
......
......@@ -23,6 +23,7 @@
use util::elaborate_drops::DropFlagState;
use super::move_paths::{HasMoveData, MoveData, MoveOutIndex, MovePathIndex};
use super::move_paths::LookupResult;
use super::{BitDenotation, BlockSets, DataflowOperator};
use super::drop_flag_effects_for_function_entry;
......@@ -469,18 +470,30 @@ fn statement_effect(&self,
mir::StatementKind::SetDiscriminant { .. } => {
span_bug!(stmt.source_info.span, "SetDiscriminant should not exist in borrowck");
}
mir::StatementKind::Assign(ref lvalue, _) => {
mir::StatementKind::Assign(ref lvalue, ref rvalue) => {
// assigning into this `lvalue` kills all
// MoveOuts from it, and *also* all MoveOuts
// for children and associated fragment sets.
on_lookup_result_bits(tcx,
mir,
move_data,
rev_lookup.find(lvalue),
|mpi| for moi in &path_map[mpi] {
assert!(moi.index() < bits_per_block);
sets.kill_set.add(&moi);
});
match rvalue.initialization_state() {
mir::tcx::RvalueInitializationState::Shallow => {
if let LookupResult::Exact(mpi) = rev_lookup.find(lvalue) {
for moi in &path_map[mpi] {
assert!(moi.index() < bits_per_block);
sets.kill_set.add(&moi);
}
}
}
mir::tcx::RvalueInitializationState::Deep => {
on_lookup_result_bits(tcx,
mir,
move_data,
rev_lookup.find(lvalue),
|mpi| for moi in &path_map[mpi] {
assert!(moi.index() < bits_per_block);
sets.kill_set.add(&moi);
});
}
}
}
mir::StatementKind::StorageLive(_) |
mir::StatementKind::StorageDead(_) |
......
......@@ -11,6 +11,7 @@
use rustc::ty::{self, TyCtxt};
use rustc::mir::*;
use rustc::mir::tcx::RvalueInitializationState;
use rustc::util::nodemap::FxHashMap;
use rustc_data_structures::indexed_vec::{IndexVec};
......@@ -406,6 +407,12 @@ fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) {
match stmt.kind {
StatementKind::Assign(ref lval, ref rval) => {
self.create_move_path(lval);
if let RvalueInitializationState::Shallow = rval.initialization_state() {
// Box starts out uninitialized - need to create a separate
// move-path for the interior so it will be separate from
// the exterior.
self.create_move_path(&lval.clone().deref());
}
self.gather_rvalue(loc, rval);
}
StatementKind::StorageLive(_) |
......
......@@ -559,7 +559,6 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>,
hir::ExprBox(ref value) => {
ExprKind::Box {
value: value.to_ref(),
value_extents: CodeExtent::Misc(value.id),
}
}
hir::ExprArray(ref fields) => ExprKind::Array { fields: fields.to_ref() },
......
......@@ -116,7 +116,6 @@ pub enum ExprKind<'tcx> {
},
Box {
value: ExprRef<'tcx>,
value_extents: CodeExtent,
},
Call {
ty: ty::Ty<'tcx>,
......
......@@ -34,10 +34,9 @@ fn foo(i: i32) {
// let _1: D;
// let _2: i32;
// let _3: &'6_2rce i32;
// let _7: &'6_4rce i32;
// let _6: &'6_4rce i32;
// let mut _4: ();
// let mut _5: i32;
// let mut _6: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
......@@ -51,10 +50,10 @@ fn foo(i: i32) {
// }
// bb1: {
// StorageDead(_5);
// StorageLive(_7);
// _7 = &'6_4rce _2;
// StorageLive(_6);
// _6 = &'6_4rce _2;
// _0 = ();
// StorageDead(_7);
// StorageDead(_6);
// EndRegion('6_4rce);
// StorageDead(_3);
// EndRegion('6_2rce);
......
......@@ -33,7 +33,6 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// let mut _2: ();
// let mut _3: [closure@NodeId(18) d:&'19mce D];
// let mut _4: &'19mce D;
// let mut _5: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
......
......@@ -33,7 +33,6 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// let mut _2: ();
// let mut _3: [closure@NodeId(22) d:&'23mce D];
// let mut _4: &'23mce D;
// let mut _5: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
......
......@@ -33,7 +33,6 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// let mut _2: ();
// let mut _3: [closure@NodeId(22) d:D];
// let mut _4: D;
// let mut _5: ();
//
// bb0: {
// StorageLive(_1);
......@@ -77,7 +76,6 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// let mut _0: i32;
// let _2: &'14_0rce D;
// let mut _3: i32;
// let mut _4: ();
//
// bb0: {
// StorageLive(_2);
......
......@@ -35,7 +35,6 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// let mut _3: ();
// let mut _4: [closure@NodeId(22) r:&'6_1rce D];
// let mut _5: &'6_1rce D;
// let mut _6: ();
// bb0: {
// StorageLive(_1);
// _1 = D::{{constructor}}(const 0i32,);
......
......@@ -39,8 +39,7 @@ fn other(self, s: Self) {}
// let mut _2: S;
// let mut _3: S;
// let mut _4: S;
// let mut _5: ();
// let mut _6: bool;
// let mut _5: bool;
//
// bb0: {
// END rustc.node4.ElaborateDrops.after.mir
......@@ -50,9 +49,8 @@ fn other(self, s: Self) {}
// let mut _2: S;
// let mut _3: ();
// let mut _4: S;
// let mut _5: ();
// let mut _6: S;
// let mut _7: bool;
// let mut _5: S;
// let mut _6: bool;
//
// bb0: {
// END rustc.node13.ElaborateDrops.after.mir
......@@ -161,6 +161,11 @@ fn vec_simple(a: &Allocator) {
let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()];
}
#[allow(unreachable_code)]
fn vec_unreachable(a: &Allocator) {
let _x = vec![a.alloc(), a.alloc(), a.alloc(), return];
}
fn run_test<F>(mut f: F)
where F: FnMut(&Allocator)
{
......@@ -209,6 +214,7 @@ fn main() {
run_test(|a| array_simple(a));
run_test(|a| vec_simple(a));
run_test(|a| vec_unreachable(a));
run_test(|a| struct_dynamic_drop(a, false, false, false));
run_test(|a| struct_dynamic_drop(a, false, false, true));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册