提交 87a8e8e0 编写于 作者: B bors

Auto merge of #45359 - arielb1:escaping-borrow, r=eddyb

Fix a few bugs in drop generation

This fixes a few bugs in drop generation, one of which causes spurious MIR borrowck errors.

Fixes #44832.

r? @EddyB
......@@ -131,6 +131,9 @@ pub struct Scope<'tcx> {
/// The cache for drop chain on "generator drop" exit.
cached_generator_drop: Option<BasicBlock>,
/// The cache for drop chain on "unwind" exit.
cached_unwind: CachedBlock,
}
#[derive(Debug)]
......@@ -154,6 +157,11 @@ struct CachedBlock {
unwind: Option<BasicBlock>,
/// The cached block for unwinds during cleanups-on-generator-drop path
///
/// This is split from the standard unwind path here to prevent drop
/// elaboration from creating drop flags that would have to be captured
/// by the generator. I'm not sure how important this optimization is,
/// but it is here.
generator_drop: Option<BasicBlock>,
}
......@@ -217,34 +225,29 @@ impl<'tcx> Scope<'tcx> {
/// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
/// larger extent of code.
///
/// `unwind` controls whether caches for the unwind branch are also invalidated.
fn invalidate_cache(&mut self, unwind: bool) {
/// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
/// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
/// top-of-scope (as opposed to dependent scopes).
fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
// FIXME: maybe do shared caching of `cached_exits` etc. to handle functions
// with lots of `try!`?
// cached exits drop storage and refer to the top-of-scope
self.cached_exits.clear();
if !unwind { return; }
for dropdata in &mut self.drops {
if let DropKind::Value { ref mut cached_block } = dropdata.kind {
cached_block.invalidate();
}
if !storage_only {
// the current generator drop and unwind ignore
// storage but refer to top-of-scope
self.cached_generator_drop = None;
self.cached_unwind.invalidate();
}
}
/// Returns the cached entrypoint for diverging exit from this scope.
///
/// Precondition: the caches must be fully filled (i.e. diverge_cleanup is called) in order for
/// this method to work correctly.
fn cached_block(&self, generator_drop: bool) -> Option<BasicBlock> {
let mut drops = self.drops.iter().rev().filter_map(|data| {
match data.kind {
DropKind::Value { cached_block } => {
Some(cached_block.get(generator_drop))
if !storage_only && !this_scope_only {
for dropdata in &mut self.drops {
if let DropKind::Value { ref mut cached_block } = dropdata.kind {
cached_block.invalidate();
}
DropKind::Storage => None
}
});
if let Some(cached_block) = drops.next() {
Some(cached_block.expect("drop cache is not filled"))
} else {
None
}
}
......@@ -356,7 +359,8 @@ pub fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
needs_cleanup: false,
drops: vec![],
cached_generator_drop: None,
cached_exits: FxHashMap()
cached_exits: FxHashMap(),
cached_unwind: CachedBlock::default(),
});
}
......@@ -482,15 +486,16 @@ pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
TerminatorKind::Goto { target: b });
b
};
// End all regions for scopes out of which we are breaking.
self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
unpack!(block = build_scope_drops(&mut self.cfg,
scope,
rest,
block,
self.arg_count,
true));
// End all regions for scopes out of which we are breaking.
self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
}
self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
......@@ -672,8 +677,7 @@ pub fn schedule_drop(&mut self,
// invalidating caches of each scope visited. This way bare minimum of the
// caches gets invalidated. i.e. if a new drop is added into the middle scope, the
// cache of outer scpoe stays intact.
let invalidate_unwind = needs_drop && !this_scope;
scope.invalidate_cache(invalidate_unwind);
scope.invalidate_cache(!needs_drop, this_scope);
if this_scope {
if let DropKind::Value { .. } = drop_kind {
scope.needs_cleanup = true;
......@@ -819,30 +823,50 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
generator_drop: bool)
-> BlockAnd<()> {
debug!("build_scope_drops({:?} -> {:?})", block, scope);
let mut iter = scope.drops.iter().rev().peekable();
let mut iter = scope.drops.iter().rev();
while let Some(drop_data) = iter.next() {
let source_info = scope.source_info(drop_data.span);
match drop_data.kind {
DropKind::Value { .. } => {
// Try to find the next block with its cached block
// for us to diverge into in case the drop panics.
let on_diverge = iter.peek().iter().filter_map(|dd| {
// Try to find the next block with its cached block for us to
// diverge into, either a previous block in this current scope or
// the top of the previous scope.
//
// If it wasn't for EndRegion, we could just chain all the DropData
// together and pick the first DropKind::Value. Please do that
// when we replace EndRegion with NLL.
let on_diverge = iter.clone().filter_map(|dd| {
match dd.kind {
DropKind::Value { cached_block } => {
let result = cached_block.get(generator_drop);
if result.is_none() {
span_bug!(drop_data.span, "cached block not present?")
}
result
},
DropKind::Value { cached_block } => Some(cached_block),
DropKind::Storage => None
}
}).next();
// If there’s no `cached_block`s within current scope,
// we must look for one in the enclosing scope.
let on_diverge = on_diverge.or_else(|| {
earlier_scopes.iter().rev().flat_map(|s| s.cached_block(generator_drop)).next()
}).next().or_else(|| {
if earlier_scopes.iter().any(|scope| scope.needs_cleanup) {
// If *any* scope requires cleanup code to be run,
// we must use the cached unwind from the *topmost*
// scope, to ensure all EndRegions from surrounding
// scopes are executed before the drop code runs.
Some(earlier_scopes.last().unwrap().cached_unwind)
} else {
// We don't need any further cleanup, so return None
// to avoid creating a landing pad. We can skip
// EndRegions because all local regions end anyway
// when the function unwinds.
//
// This is an important optimization because LLVM is
// terrible at optimizing landing pads. FIXME: I think
// it would be cleaner and better to do this optimization
// in SimplifyCfg instead of here.
None
}
});
let on_diverge = on_diverge.map(|cached_block| {
cached_block.get(generator_drop).unwrap_or_else(|| {
span_bug!(drop_data.span, "cached block not present?")
})
});
let next = cfg.start_new_block();
cfg.terminate(block, source_info, TerminatorKind::Drop {
location: drop_data.location.clone(),
......@@ -933,14 +957,23 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
};
}
// Finally, push the EndRegion block, used by mir-borrowck. (Block
// becomes trivial goto after pass that removes all EndRegions.)
{
let block = cfg.start_new_cleanup_block();
cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
target = block
}
// Finally, push the EndRegion block, used by mir-borrowck, and set
// `cached_unwind` to point to it (Block becomes trivial goto after
// pass that removes all EndRegions).
target = {
let cached_block = scope.cached_unwind.ref_mut(generator_drop);
if let Some(cached_block) = *cached_block {
cached_block
} else {
let block = cfg.start_new_cleanup_block();
cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
cfg.terminate(block, source_info(span), TerminatorKind::Goto { target: target });
*cached_block = Some(block);
block
}
};
debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);
target
}
......@@ -8,10 +8,15 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
// revisions: ast mir
//[mir]compile-flags: -Z emit-end-regions -Z borrowck-mir
fn foo(x: Box<isize>) -> isize {
let y = &*x;
free(x); //~ ERROR cannot move out of `x` because it is borrowed
free(x); //[ast]~ ERROR cannot move out of `x` because it is borrowed
//[mir]~^ ERROR cannot move out of `x` because it is borrowed (Ast)
//[mir]~| ERROR cannot move out of `x` because it is borrowed (Mir)
*y
}
......
......@@ -8,9 +8,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(untagged_unions)]
#![feature(generators, generator_trait, untagged_unions)]
use std::cell::{Cell, RefCell};
use std::ops::Generator;
use std::panic;
use std::usize;
......@@ -161,6 +162,32 @@ fn vec_simple(a: &Allocator) {
let _x = vec![a.alloc(), a.alloc(), a.alloc(), a.alloc()];
}
fn generator(a: &Allocator, run_count: usize) {
assert!(run_count < 4);
let mut gen = || {
(a.alloc(),
yield a.alloc(),
a.alloc(),
yield a.alloc()
);
};
for _ in 0..run_count {
gen.resume();
}
}
fn mixed_drop_and_nondrop(a: &Allocator) {
// check that destructor panics handle drop
// and non-drop blocks in the same scope correctly.
//
// Surprisingly enough, this used to not work.
let (x, y, z);
x = a.alloc();
y = 5;
z = a.alloc();
}
#[allow(unreachable_code)]
fn vec_unreachable(a: &Allocator) {
let _x = vec![a.alloc(), a.alloc(), a.alloc(), return];
......@@ -228,5 +255,12 @@ fn main() {
run_test(|a| field_assignment(a, false));
run_test(|a| field_assignment(a, true));
run_test(|a| generator(a, 0));
run_test(|a| generator(a, 1));
run_test(|a| generator(a, 2));
run_test(|a| generator(a, 3));
run_test(|a| mixed_drop_and_nondrop(a));
run_test_nopanic(|a| union1(a));
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册