提交 9da21122 编写于 作者: B bors

Auto merge of #46320 - arielb1:always-resume, r=nikomatsakis

Always unwind through a Resume and other fixes

Should fix most of the small MIR borrowck issues.

r? @nikomatsakis
......@@ -733,6 +733,10 @@ pub fn successors(&self) -> Cow<[BasicBlock]> {
pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
self.kind.successors_mut()
}
pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
self.kind.unwind_mut()
}
}
impl<'tcx> TerminatorKind<'tcx> {
......@@ -811,6 +815,27 @@ pub fn successors_mut(&mut self) -> Vec<&mut BasicBlock> {
}
}
}
pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
match *self {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
TerminatorKind::Return |
TerminatorKind::Unreachable |
TerminatorKind::GeneratorDrop |
TerminatorKind::Yield { .. } |
TerminatorKind::SwitchInt { .. } |
TerminatorKind::FalseEdges { .. } => {
None
},
TerminatorKind::Call { cleanup: ref mut unwind, .. } |
TerminatorKind::Assert { cleanup: ref mut unwind, .. } |
TerminatorKind::DropAndReplace { ref mut unwind, .. } |
TerminatorKind::Drop { ref mut unwind, .. } => {
Some(unwind)
}
}
}
}
impl<'tcx> BasicBlockData<'tcx> {
......
......@@ -384,33 +384,23 @@ fn visit_terminator_entry(&mut self,
// StorageDead, but we don't always emit those (notably on unwind paths),
// so this "extra check" serves as a kind of backup.
let domain = flow_state.borrows.base_results.operator();
for borrow in domain.borrows() {
let root_place = self.prefixes(
&borrow.place,
PrefixSet::All
).last().unwrap();
match root_place {
Place::Static(_) => {
self.access_place(
ContextKind::StorageDead.new(loc),
(&root_place, self.mir.source_info(borrow.location).span),
(Deep, Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state
);
}
Place::Local(_) => {
self.access_place(
let data = domain.borrows();
flow_state.borrows.with_elems_outgoing(|borrows| for i in borrows {
let borrow = &data[i];
if self.place_is_invalidated_at_exit(&borrow.place) {
debug!("borrow conflicts at exit {:?}", borrow);
let borrow_span = self.mir.source_info(borrow.location).span;
// FIXME: should be talking about the region lifetime instead
// of just a span here.
let end_span = domain.opt_region_end_span(&borrow.region);
self.report_borrowed_value_does_not_live_long_enough(
ContextKind::StorageDead.new(loc),
(&root_place, self.mir.source_info(borrow.location).span),
(Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
LocalMutationIsAllowed::Yes,
flow_state
);
}
Place::Projection(_) => ()
}
(&borrow.place, borrow_span),
end_span)
}
});
}
TerminatorKind::Goto { target: _ } |
TerminatorKind::Unreachable |
......@@ -751,6 +741,50 @@ fn consume_operand(&mut self,
Operand::Constant(_) => {}
}
}
/// Returns whether a borrow of this place is invalidated when the function
/// exits
fn place_is_invalidated_at_exit(&self, place: &Place<'tcx>) -> bool {
debug!("place_is_invalidated_at_exit({:?})", place);
let root_place = self.prefixes(place, PrefixSet::All).last().unwrap();
// FIXME(nll-rfc#40): do more precise destructor tracking here. For now
// we just know that all locals are dropped at function exit (otherwise
// we'll have a memory leak) and assume that all statics have a destructor.
let (might_be_alive, will_be_dropped) = match root_place {
Place::Static(statik) => {
// Thread-locals might be dropped after the function exits, but
// "true" statics will never be.
let is_thread_local = self.tcx.get_attrs(statik.def_id).iter().any(|attr| {
attr.check_name("thread_local")
});
(true, is_thread_local)
}
Place::Local(_) => {
// Locals are always dropped at function exit, and if they
// have a destructor it would've been called already.
(false, true)
}
Place::Projection(..) => bug!("root of {:?} is a projection ({:?})?",
place, root_place)
};
if !will_be_dropped {
debug!("place_is_invalidated_at_exit({:?}) - won't be dropped", place);
return false;
}
// FIXME: replace this with a proper borrow_conflicts_with_place when
// that is merged.
let prefix_set = if might_be_alive {
PrefixSet::Supporting
} else {
PrefixSet::Shallow
};
self.prefixes(place, prefix_set).any(|prefix| prefix == root_place)
}
}
impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> {
......@@ -1667,13 +1701,13 @@ fn report_conflicting_borrow(&mut self,
fn report_borrowed_value_does_not_live_long_enough(&mut self,
_: Context,
(place, span): (&Place, Span),
(place, span): (&Place<'tcx>, Span),
end_span: Option<Span>) {
let proper_span = match *place {
let root_place = self.prefixes(place, PrefixSet::All).last().unwrap();
let proper_span = match *root_place {
Place::Local(local) => self.mir.local_decls[local].source_info.span,
_ => span
};
let mut err = self.tcx.path_does_not_live_long_enough(span, "borrowed value", Origin::Mir);
err.span_label(proper_span, "temporary value created here");
err.span_label(span, "temporary value dropped here while still borrowed");
......@@ -2162,4 +2196,12 @@ fn elems_incoming(&self) -> indexed_set::Elems<BD::Idx> {
let univ = self.base_results.sets().bits_per_block();
self.curr_state.elems(univ)
}
fn with_elems_outgoing<F>(&self, f: F) where F: FnOnce(indexed_set::Elems<BD::Idx>) {
let mut curr_state = self.curr_state.clone();
curr_state.union(&self.stmt_gen);
curr_state.subtract(&self.stmt_kill);
let univ = self.base_results.sets().bits_per_block();
f(curr_state.elems(univ));
}
}
......@@ -255,7 +255,7 @@ pub fn into_expr(&mut self,
this.cfg.terminate(block, source_info, TerminatorKind::Call {
func: fun,
args,
cleanup,
cleanup: Some(cleanup),
destination: if diverges {
None
} else {
......@@ -273,7 +273,9 @@ pub fn into_expr(&mut self,
ExprKind::Break { .. } |
ExprKind::InlineAsm { .. } |
ExprKind::Return {.. } => {
this.stmt_expr(block, expr)
unpack!(block = this.stmt_expr(block, expr));
this.cfg.push_assign_unit(block, source_info, destination);
block.unit()
}
// these are the cases that are more naturally handled by some other mode
......
......@@ -315,7 +315,7 @@ pub fn perform_test(&mut self,
}),
args: vec![val, expect],
destination: Some((eq_result.clone(), eq_block)),
cleanup,
cleanup: Some(cleanup),
});
// check the result
......
......@@ -383,7 +383,9 @@ pub fn pop_scope(&mut self,
assert_eq!(scope.region_scope, region_scope.0);
self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
let resume_block = self.resume_block();
unpack!(block = build_scope_drops(&mut self.cfg,
resume_block,
&scope,
&self.scopes,
block,
......@@ -422,6 +424,7 @@ pub fn exit_scope(&mut self,
}
{
let resume_block = self.resume_block();
let mut rest = &mut self.scopes[(len - scope_count)..];
while let Some((scope, rest_)) = {rest}.split_last_mut() {
rest = rest_;
......@@ -441,6 +444,7 @@ pub fn exit_scope(&mut self,
self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
unpack!(block = build_scope_drops(&mut self.cfg,
resume_block,
scope,
rest,
block,
......@@ -468,6 +472,7 @@ pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
let src_info = self.scopes[0].source_info(self.fn_span);
let mut block = self.cfg.start_new_block();
let result = block;
let resume_block = self.resume_block();
let mut rest = &mut self.scopes[..];
while let Some((scope, rest_)) = {rest}.split_last_mut() {
......@@ -491,6 +496,7 @@ pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
unpack!(block = build_scope_drops(&mut self.cfg,
resume_block,
scope,
rest,
block,
......@@ -701,18 +707,31 @@ pub fn schedule_drop(&mut self,
/// This path terminates in Resume. Returns the start of the path.
/// See module comment for more details. None indicates there’s no
/// cleanup to do at this point.
pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
pub fn diverge_cleanup(&mut self) -> BasicBlock {
self.diverge_cleanup_gen(false)
}
fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> {
if !self.scopes.iter().any(|scope| scope.needs_cleanup) {
return None;
fn resume_block(&mut self) -> BasicBlock {
if let Some(target) = self.cached_resume_block {
target
} else {
let resumeblk = self.cfg.start_new_cleanup_block();
self.cfg.terminate(resumeblk,
SourceInfo {
scope: ARGUMENT_VISIBILITY_SCOPE,
span: self.fn_span
},
TerminatorKind::Resume);
self.cached_resume_block = Some(resumeblk);
resumeblk
}
}
assert!(!self.scopes.is_empty()); // or `any` above would be false
let Builder { ref mut cfg, ref mut scopes,
ref mut cached_resume_block, .. } = *self;
fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
// To start, create the resume terminator.
let mut target = self.resume_block();
let Builder { ref mut cfg, ref mut scopes, .. } = *self;
// Build up the drops in **reverse** order. The end result will
// look like:
......@@ -725,23 +744,14 @@ fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> Option<BasicBlock> {
// store caches. If everything is cached, we'll just walk right
// to left reading the cached results but never created anything.
// To start, create the resume terminator.
let mut target = if let Some(target) = *cached_resume_block {
target
} else {
let resumeblk = cfg.start_new_cleanup_block();
cfg.terminate(resumeblk,
scopes[0].source_info(self.fn_span),
TerminatorKind::Resume);
*cached_resume_block = Some(resumeblk);
resumeblk
};
if scopes.iter().any(|scope| scope.needs_cleanup) {
for scope in scopes.iter_mut() {
target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
scope, target, generator_drop);
}
Some(target)
}
target
}
/// Utility function for *non*-scope code to build their own drops
......@@ -760,7 +770,7 @@ pub fn build_drop(&mut self,
TerminatorKind::Drop {
location,
target: next_target,
unwind: diverge_target,
unwind: Some(diverge_target),
});
next_target.unit()
}
......@@ -779,7 +789,7 @@ pub fn build_drop_and_replace(&mut self,
location,
value,
target: next_target,
unwind: diverge_target,
unwind: Some(diverge_target),
});
next_target.unit()
}
......@@ -804,7 +814,7 @@ pub fn assert(&mut self, block: BasicBlock,
expected,
msg,
target: success_block,
cleanup,
cleanup: Some(cleanup),
});
success_block
......@@ -813,6 +823,7 @@ pub fn assert(&mut self, block: BasicBlock,
/// Builds drops for pop_scope and exit_scope.
fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
resume_block: BasicBlock,
scope: &Scope<'tcx>,
earlier_scopes: &[Scope<'tcx>],
mut block: BasicBlock,
......@@ -868,7 +879,7 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
cfg.terminate(block, source_info, TerminatorKind::Drop {
location: drop_data.location.clone(),
target: next,
unwind: on_diverge
unwind: Some(on_diverge.unwrap_or(resume_block))
});
block = next;
}
......
......@@ -132,6 +132,10 @@ pub fn location(&self, idx: BorrowIndex) -> &Location {
&self.borrows[idx].location
}
pub fn nonlexical_regioncx(&self) -> Option<&'a RegionInferenceContext<'tcx>> {
self.nonlexical_regioncx
}
/// Returns the span for the "end point" given region. This will
/// return `None` if NLL is enabled, since that concept has no
/// meaning there. Otherwise, return region span if it exists and
......@@ -208,6 +212,12 @@ fn statement_effect(&self,
mir::StatementKind::Assign(_, ref rhs) => {
if let mir::Rvalue::Ref(region, _, ref place) = *rhs {
if is_unsafe_place(self.tcx, self.mir, place) { return; }
if let RegionKind::ReEmpty = region {
// If the borrowed value is dead, the region for it
// can be empty. Don't track the borrow in that case.
return
}
let index = self.location_map.get(&location).unwrap_or_else(|| {
panic!("could not find BorrowIndex for location {:?}", location);
});
......
......@@ -36,6 +36,7 @@
pub mod add_call_guards;
pub mod promote_consts;
pub mod qualify_consts;
pub mod remove_noop_landing_pads;
pub mod dump_mir;
pub mod deaggregator;
pub mod instcombine;
......@@ -226,8 +227,11 @@ fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx
let mut mir = tcx.mir_validated(def_id).steal();
run_passes![tcx, mir, def_id, 2;
// Remove all things not needed by analysis
no_landing_pads::NoLandingPads,
simplify_branches::SimplifyBranches::new("initial"),
remove_noop_landing_pads::RemoveNoopLandingPads,
simplify::SimplifyCfg::new("early-opt"),
// These next passes must be executed together
add_call_guards::CriticalCallEdges,
......@@ -255,6 +259,8 @@ fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx
instcombine::InstCombine,
deaggregator::Deaggregator,
copy_prop::CopyPropagation,
remove_noop_landing_pads::RemoveNoopLandingPads,
simplify::SimplifyCfg::new("final"),
simplify::SimplifyLocals,
generator::StateTransform,
......
......@@ -38,23 +38,8 @@ fn visit_terminator(&mut self,
bb: BasicBlock,
terminator: &mut Terminator<'tcx>,
location: Location) {
match terminator.kind {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
TerminatorKind::Return |
TerminatorKind::Unreachable |
TerminatorKind::GeneratorDrop |
TerminatorKind::Yield { .. } |
TerminatorKind::SwitchInt { .. } |
TerminatorKind::FalseEdges { .. } => {
/* nothing to do */
},
TerminatorKind::Call { cleanup: ref mut unwind, .. } |
TerminatorKind::Assert { cleanup: ref mut unwind, .. } |
TerminatorKind::DropAndReplace { ref mut unwind, .. } |
TerminatorKind::Drop { ref mut unwind, .. } => {
if let Some(unwind) = terminator.kind.unwind_mut() {
unwind.take();
},
}
self.super_terminator(bb, terminator, location);
}
......
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::ty::TyCtxt;
use rustc::mir::*;
use rustc_data_structures::bitvec::BitVector;
use rustc_data_structures::indexed_vec::Idx;
use transform::{MirPass, MirSource};
use util::patch::MirPatch;
/// A pass that removes no-op landing pads and replaces jumps to them with
/// `None`. This is important because otherwise LLVM generates terrible
/// code for these.
pub struct RemoveNoopLandingPads;
impl MirPass for RemoveNoopLandingPads {
fn run_pass<'a, 'tcx>(&self,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
_src: MirSource,
mir: &mut Mir<'tcx>) {
if tcx.sess.no_landing_pads() {
return
}
debug!("remove_noop_landing_pads({:?})", mir);
self.remove_nop_landing_pads(mir);
}
}
impl RemoveNoopLandingPads {
fn is_nop_landing_pad(&self, bb: BasicBlock, mir: &Mir, nop_landing_pads: &BitVector)
-> bool
{
for stmt in &mir[bb].statements {
match stmt.kind {
StatementKind::StorageLive(_) |
StatementKind::StorageDead(_) |
StatementKind::EndRegion(_) |
StatementKind::Nop => {
// These are all nops in a landing pad (there's some
// borrowck interaction between EndRegion and storage
// instructions, but this should all run after borrowck).
}
StatementKind::Assign(Place::Local(_), Rvalue::Use(_)) => {
// Writing to a local (e.g. a drop flag) does not
// turn a landing pad to a non-nop
}
StatementKind::Assign(_, _) |
StatementKind::SetDiscriminant { .. } |
StatementKind::InlineAsm { .. } |
StatementKind::Validate { .. } => {
return false;
}
}
}
let terminator = mir[bb].terminator();
match terminator.kind {
TerminatorKind::Goto { .. } |
TerminatorKind::Resume |
TerminatorKind::SwitchInt { .. } |
TerminatorKind::FalseEdges { .. } => {
terminator.successors().iter().all(|succ| {
nop_landing_pads.contains(succ.index())
})
},
TerminatorKind::GeneratorDrop |
TerminatorKind::Yield { .. } |
TerminatorKind::Return |
TerminatorKind::Unreachable |
TerminatorKind::Call { .. } |
TerminatorKind::Assert { .. } |
TerminatorKind::DropAndReplace { .. } |
TerminatorKind::Drop { .. } => {
false
}
}
}
fn remove_nop_landing_pads(&self, mir: &mut Mir) {
// make sure there's a single resume block
let resume_block = {
let patch = MirPatch::new(mir);
let resume_block = patch.resume_block();
patch.apply(mir);
resume_block
};
debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
let mut jumps_folded = 0;
let mut landing_pads_removed = 0;
let mut nop_landing_pads = BitVector::new(mir.basic_blocks().len());
// This is a post-order traversal, so that if A post-dominates B
// then A will be visited before B.
let postorder: Vec<_> = traversal::postorder(mir).map(|(bb, _)| bb).collect();
for bb in postorder {
debug!(" processing {:?}", bb);
for target in mir[bb].terminator_mut().successors_mut() {
if *target != resume_block && nop_landing_pads.contains(target.index()) {
debug!(" folding noop jump to {:?} to resume block", target);
*target = resume_block;
jumps_folded += 1;
}
}
match mir[bb].terminator_mut().unwind_mut() {
Some(unwind) => {
if *unwind == Some(resume_block) {
debug!(" removing noop landing pad");
jumps_folded -= 1;
landing_pads_removed += 1;
*unwind = None;
}
}
_ => {}
}
let is_nop_landing_pad = self.is_nop_landing_pad(bb, mir, &nop_landing_pads);
if is_nop_landing_pad {
nop_landing_pads.insert(bb.index());
}
debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
}
debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed);
}
}
......@@ -124,8 +124,6 @@ pub fn simplify(mut self) {
self.collapse_goto_chain(successor, &mut changed);
}
changed |= self.simplify_unwind(&mut terminator);
let mut new_stmts = vec![];
let mut inner_changed = true;
while inner_changed {
......@@ -238,38 +236,6 @@ fn simplify_branch(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
true
}
// turn an unwind branch to a resume block into a None
fn simplify_unwind(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
let unwind = match terminator.kind {
TerminatorKind::Drop { ref mut unwind, .. } |
TerminatorKind::DropAndReplace { ref mut unwind, .. } |
TerminatorKind::Call { cleanup: ref mut unwind, .. } |
TerminatorKind::Assert { cleanup: ref mut unwind, .. } =>
unwind,
_ => return false
};
if let &mut Some(unwind_block) = unwind {
let is_resume_block = match self.basic_blocks[unwind_block] {
BasicBlockData {
ref statements,
terminator: Some(Terminator {
kind: TerminatorKind::Resume, ..
}), ..
} if statements.is_empty() => true,
_ => false
};
if is_resume_block {
debug!("simplifying unwind to {:?} from {:?}",
unwind_block, terminator.source_info);
*unwind = None;
}
return is_resume_block;
}
false
}
fn strip_nops(&mut self) {
for blk in self.basic_blocks.iter_mut() {
blk.statements.retain(|stmt| if let StatementKind::Nop = stmt.kind {
......
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// revisions: ast mir
//[mir]compile-flags: -Z borrowck=mir
fn cplusplus_mode_exceptionally_unsafe(x: &mut Option<&'static mut isize>) {
let mut z = (0, 0);
*x = Some(&mut z.1); //[ast]~ ERROR [E0597]
//[mir]~^ ERROR [E0597]
panic!("catch me for a dangling pointer!")
}
fn main() {
cplusplus_mode_exceptionally_unsafe(&mut None);
}
......@@ -50,16 +50,16 @@ fn main() {
// StorageLive(_5);
// StorageLive(_6);
// _6 = move _4;
// replace(_5 <- move _6) -> [return: bb1, unwind: bb5];
// replace(_5 <-move _6) -> [return: bb2, unwind: bb5];
// }
// bb1: {
// drop(_6) -> [return: bb6, unwind: bb4];
// resume;
// }
// bb2: {
// resume;
// drop(_6) -> [return: bb6, unwind: bb4];
// }
// bb3: {
// drop(_4) -> bb2;
// drop(_4) -> bb1;
// }
// bb4: {
// drop(_5) -> bb3;
......@@ -74,7 +74,7 @@ fn main() {
// }
// bb7: {
// StorageDead(_5);
// drop(_4) -> bb8;
// drop(_4) -> [return: bb8, unwind: bb1];
// }
// bb8: {
// StorageDead(_4);
......
......@@ -44,20 +44,20 @@ fn drop(&mut self) {
// StorageLive(_1);
// StorageLive(_2);
// _2 = Box(S);
// (*_2) = const S::new() -> [return: bb1, unwind: bb3];
// (*_2) = const S::new() -> [return: bb2, unwind: bb3];
// }
//
// bb1: {
// _1 = move _2;
// drop(_2) -> bb4;
// resume;
// }
//
// bb2: {
// resume;
// _1 = move _2;
// drop(_2) -> bb4;
// }
//
// bb3: {
// drop(_2) -> bb2;
// drop(_2) -> bb1;
// }
//
// bb4: {
......@@ -72,7 +72,7 @@ fn drop(&mut self) {
// }
//
// bb6: {
// drop(_1) -> bb2;
// drop(_1) -> bb1;
// }
//
// bb7: {
......
......@@ -51,9 +51,12 @@ fn foo(i: i32) {
// _3 = &'26_2rs _2;
// StorageLive(_5);
// _5 = (*_3);
// _4 = const foo(move _5) -> [return: bb1, unwind: bb3];
// _4 = const foo(move _5) -> [return: bb2, unwind: bb3];
// }
// bb1: {
// resume;
// }
// bb2: {
// StorageDead(_5);
// StorageLive(_6);
// _6 = &'26_4rs _2;
......@@ -63,14 +66,11 @@ fn foo(i: i32) {
// EndRegion('26_2rs);
// StorageDead(_3);
// StorageDead(_2);
// drop(_1) -> bb4;
// }
// bb2: {
// resume;
// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('26_2rs);
// drop(_1) -> bb2;
// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
......
......@@ -43,20 +43,20 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// _4 = &'14s _1;
// _3 = [closure@NodeId(18)] { d: move _4 };
// StorageDead(_4);
// _2 = const foo(move _3) -> [return: bb1, unwind: bb3];
// _2 = const foo(move _3) -> [return: bb2, unwind: bb3];
// }
// bb1: {
// resume;
// }
// bb2: {
// EndRegion('14s);
// StorageDead(_3);
// _0 = ();
// drop(_1) -> bb4;
// }
// bb2: {
// resume;
// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('14s);
// drop(_1) -> bb2;
// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
......
......@@ -43,20 +43,20 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// _4 = &'19s _1;
// _3 = [closure@NodeId(22)] { d: move _4 };
// StorageDead(_4);
// _2 = const foo(move _3) -> [return: bb1, unwind: bb3];
// _2 = const foo(move _3) -> [return: bb2, unwind: bb3];
// }
// bb1: {
// resume;
// }
// bb2: {
// EndRegion('19s);
// StorageDead(_3);
// _0 = ();
// drop(_1) -> bb4;
// }
// bb2: {
// resume;
// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('19s);
// drop(_1) -> bb2;
// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
......
......@@ -63,7 +63,7 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// bb6: {
// StorageDead(_3);
// _0 = ();
// drop(_1) -> bb7;
// drop(_1) -> [return: bb7, unwind: bb1];
// }
// bb7: {
// StorageDead(_1);
......@@ -88,9 +88,12 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// StorageDead(_3);
// EndRegion('15_0rs);
// StorageDead(_2);
// drop(_1) -> bb1;
// drop(_1) -> [return: bb2, unwind: bb1];
// }
// bb1: {
// resume;
// }
// bb2: {
// return;
// }
// }
......
......@@ -48,21 +48,21 @@ fn foo<F>(f: F) where F: FnOnce() -> i32 {
// _5 = _2;
// _4 = [closure@NodeId(22)] { r: move _5 };
// StorageDead(_5);
// _3 = const foo(move _4) -> [return: bb1, unwind: bb3];
// _3 = const foo(move _4) -> [return: bb2, unwind: bb3];
// }
// bb1: {
// resume;
// }
// bb2: {
// StorageDead(_4);
// _0 = ();
// EndRegion('21_1rs);
// StorageDead(_2);
// drop(_1) -> bb4;
// }
// bb2: {
// resume;
// drop(_1) -> [return: bb4, unwind: bb1];
// }
// bb3: {
// EndRegion('21_1rs);
// drop(_1) -> bb2;
// drop(_1) -> bb1;
// }
// bb4: {
// StorageDead(_1);
......
......@@ -62,6 +62,7 @@ fn query() -> bool { true }
// let mut _15: std::option::Option<&'35_0rs S<'35_0rs>>;
// let mut _16: &'35_0rs S<'35_0rs>;
// let mut _17: &'35_0rs S<'35_0rs>;
//
// bb0: {
// goto -> bb1;
// }
......@@ -70,9 +71,12 @@ fn query() -> bool { true }
// StorageLive(_3);
// StorageLive(_4);
// _4 = std::option::Option<&'35_0rs S<'35_0rs>>::None;
// _3 = const <std::cell::Cell<T>>::new(move _4) -> bb2;
// _3 = const <std::cell::Cell<T>>::new(move _4) -> [return: bb3, unwind: bb2];
// }
// bb2: {
// resume;
// }
// bb3: {
// StorageDead(_4);
// _2 = S<'35_0rs> { r: move _3 };
// StorageDead(_3);
......@@ -85,27 +89,27 @@ fn query() -> bool { true }
// _8 = &'35_0rs (*_9);
// _7 = std::option::Option<&'35_0rs S<'35_0rs>>::Some(move _8,);
// StorageDead(_8);
// _5 = const <std::cell::Cell<T>>::set(move _6, move _7) -> bb3;
// _5 = const <std::cell::Cell<T>>::set(move _6, move _7) -> [return: bb4, unwind: bb2];
// }
// bb3: {
// bb4: {
// EndRegion('16s);
// StorageDead(_7);
// StorageDead(_6);
// StorageDead(_9);
// StorageLive(_11);
// _11 = const query() -> bb4;
// }
// bb4: {
// switchInt(move _11) -> [0u8: bb6, otherwise: bb5];
// _11 = const query() -> [return: bb5, unwind: bb2];
// }
// bb5: {
// switchInt(move _11) -> [0u8: bb7, otherwise: bb6];
// }
// bb6: {
// _0 = ();
// StorageDead(_11);
// EndRegion('35_0rs);
// StorageDead(_2);
// return;
// }
// bb6: {
// bb7: {
// _10 = ();
// StorageDead(_11);
// StorageLive(_14);
......@@ -117,9 +121,9 @@ fn query() -> bool { true }
// _16 = &'35_0rs (*_17);
// _15 = std::option::Option<&'35_0rs S<'35_0rs>>::Some(move _16,);
// StorageDead(_16);
// _13 = const <std::cell::Cell<T>>::set(move _14, move_15) -> bb7;
// _13 = const <std::cell::Cell<T>>::set(move _14, move _15) -> [return: bb8, unwind: bb2];
// }
// bb7: {
// bb8: {
// EndRegion('33s);
// StorageDead(_15);
// StorageDead(_14);
......
......@@ -99,10 +99,14 @@ fn drop(&mut self) {
// _2 = (_3.0: &'12ds S1);
// _1 = move _2;
// StorageDead(_2);
// drop(_3) -> bb1;
// drop(_3) -> [return: bb2, unwind: bb1];
// }
//
// bb1: {
// resume;
// }
//
// bb2: {
// StorageDead(_3);
// StorageDead(_8);
// StorageDead(_9);
......@@ -146,10 +150,14 @@ fn drop(&mut self) {
// _2 = (_3.0: &'12ds S1);
// _1 = move _2;
// StorageDead(_2);
// drop(_3) -> bb1;
// drop(_3) -> [return: bb2, unwind: bb1];
// }
//
// bb1: {
// resume;
// }
//
// bb2: {
// StorageDead(_3);
// StorageDead(_8);
// StorageDead(_5);
......
......@@ -49,198 +49,207 @@ fn main() {
// END RUST SOURCE
//
// START rustc.full_tested_match.SimplifyBranches-initial.before.mir
// START rustc.full_tested_match.QualifyAndPromoteConstants.after.mir
// bb0: {
// ...
// _2 = std::option::Option<i32>::Some(const 42i32,);
// _5 = discriminant(_2);
// switchInt(move _5) -> [0isize: bb5, 1isize: bb3, otherwise: bb7];
// switchInt(move _5) -> [0isize: bb6, 1isize: bb4, otherwise: bb8];
// }
// bb1: { // arm1
// bb1: {
// resume;
// }
// bb2: { // arm1
// StorageLive(_7);
// _7 = _3;
// _1 = (const 1i32, move _7);
// StorageDead(_7);
// goto -> bb12;
// goto -> bb13;
// }
// bb2: { // binding3(empty) and arm3
// bb3: { // binding3(empty) and arm3
// _1 = (const 3i32, const 3i32);
// goto -> bb12;
// }
// bb3: {
// falseEdges -> [real: bb8, imaginary: bb4]; //pre_binding1
// goto -> bb13;
// }
// bb4: {
// falseEdges -> [real: bb11, imaginary: bb5]; //pre_binding2
// falseEdges -> [real: bb9, imaginary: bb5]; //pre_binding1
// }
// bb5: {
// falseEdges -> [real: bb2, imaginary: bb6]; //pre_binding3
// falseEdges -> [real: bb12, imaginary: bb6]; //pre_binding2
// }
// bb6: {
// unreachable;
// falseEdges -> [real: bb3, imaginary: bb7]; //pre_binding3
// }
// bb7: {
// unreachable;
// }
// bb8: { // binding1 and guard
// bb8: {
// unreachable;
// }
// bb9: { // binding1 and guard
// StorageLive(_3);
// _3 = ((_2 as Some).0: i32);
// StorageLive(_6);
// _6 = const guard() -> bb9;
// _6 = const guard() -> [return: bb10, unwind: bb1];
// }
// bb9: { // end of guard
// switchInt(move _6) -> [0u8: bb10, otherwise: bb1];
// bb10: { // end of guard
// switchInt(move _6) -> [0u8: bb11, otherwise: bb2];
// }
// bb10: { // to pre_binding2
// falseEdges -> [real: bb4, imaginary: bb4];
// bb11: { // to pre_binding2
// falseEdges -> [real: bb5, imaginary: bb5];
// }
// bb11: { // bindingNoLandingPads.before.mir2 and arm2
// bb12: { // bindingNoLandingPads.before.mir2 and arm2
// StorageLive(_4);
// _4 = ((_2 as Some).0: i32);
// StorageLive(_8);
// _8 = _4;
// _1 = (const 2i32, move _8);
// StorageDead(_8);
// goto -> bb12;
// goto -> bb13;
// }
// bb12: {
// bb13: {
// ...
// return;
// }
// END rustc.full_tested_match.SimplifyBranches-initial.before.mir
// END rustc.full_tested_match.QualifyAndPromoteConstants.after.mir
//
// START rustc.full_tested_match2.SimplifyBranches-initial.before.mir
// START rustc.full_tested_match2.QualifyAndPromoteConstants.before.mir
// bb0: {
// ...
// _2 = std::option::Option<i32>::Some(const 42i32,);
// _5 = discriminant(_2);
// switchInt(move _5) -> [0isize: bb4, 1isize: bb3, otherwise: bb7];
// switchInt(move _5) -> [0isize: bb5, 1isize: bb4, otherwise: bb8];
// }
// bb1: {
// resume;
// }
// bb1: { // arm1
// bb2: { // arm1
// StorageLive(_7);
// _7 = _3;
// _1 = (const 1i32, move _7);
// StorageDead(_7);
// goto -> bb12;
// goto -> bb13;
// }
// bb2: { // binding3(empty) and arm3
// bb3: { // binding3(empty) and arm3
// _1 = (const 3i32, const 3i32);
// goto -> bb12;
// }
// bb3: {
// falseEdges -> [real: bb8, imaginary: bb4]; //pre_binding1
// goto -> bb13;
// }
// bb4: {
// falseEdges -> [real: bb2, imaginary: bb5]; //pre_binding2
// falseEdges -> [real: bb9, imaginary: bb5]; //pre_binding1
// }
// bb5: {
// falseEdges -> [real: bb11, imaginary: bb6]; //pre_binding3
// falseEdges -> [real: bb3, imaginary: bb6]; //pre_binding2
// }
// bb6: {
// unreachable;
// falseEdges -> [real: bb12, imaginary: bb7]; //pre_binding3
// }
// bb7: {
// unreachable;
// }
// bb8: { // binding1 and guard
// bb8: {
// unreachable;
// }
// bb9: { // binding1 and guard
// StorageLive(_3);
// _3 = ((_2 as Some).0: i32);
// StorageLive(_6);
// _6 = const guard() -> bb9;
// _6 = const guard() -> [return: bb10, unwind: bb1];
// }
// bb9: { // end of guard
// switchInt(move _6) -> [0u8: bb10, otherwise: bb1];
// bb10: { // end of guard
// switchInt(move _6) -> [0u8: bb11, otherwise: bb2];
// }
// bb10: { // to pre_binding2
// falseEdges -> [real: bb5, imaginary: bb4];
// bb11: { // to pre_binding2
// falseEdges -> [real: bb6, imaginary: bb5];
// }
// bb11: { // binding2 and arm2
// bb12: { // binding2 and arm2
// StorageLive(_4);
// _4 = ((_2 as Some).0: i32);
// StorageLive(_8);
// _8 = _4;
// _1 = (const 2i32, move _8);
// StorageDead(_8);
// goto -> bb12;
// goto -> bb13;
// }
// bb12: {
// bb13: {
// ...
// return;
// }
// END rustc.full_tested_match2.SimplifyBranches-initial.before.mir
// END rustc.full_tested_match2.QualifyAndPromoteConstants.before.mir
//
// START rustc.main.SimplifyBranches-initial.before.mir
// START rustc.main.QualifyAndPromoteConstants.before.mir
// bb0: {
// ...
// _2 = std::option::Option<i32>::Some(const 1i32,);
// _7 = discriminant(_2);
// switchInt(move _7) -> [1isize: bb3, otherwise: bb4];
// switchInt(move _7) -> [1isize: bb4, otherwise: bb5];
// }
// bb1: { // arm1
// bb1: {
// resume;
// }
// bb2: { // arm1
// _1 = const 1i32;
// goto -> bb16;
// goto -> bb17;
// }
// bb2: { // arm3
// bb3: { // arm3
// _1 = const 3i32;
// goto -> bb16;
// goto -> bb17;
// }
//
// bb3: {
// falseEdges -> [real: bb8, imaginary: bb4]; //pre_binding1
// }
// bb4: {
// falseEdges -> [real: bb11, imaginary: bb5]; //pre_binding2
// falseEdges -> [real: bb9, imaginary: bb5]; //pre_binding1
// }
// bb5: {
// falseEdges -> [real: bb12, imaginary: bb6]; //pre_binding3
// falseEdges -> [real: bb12, imaginary: bb6]; //pre_binding2
// }
// bb6: {
// falseEdges -> [real: bb15, imaginary: bb7]; //pre_binding4
// falseEdges -> [real: bb13, imaginary: bb7]; //pre_binding3
// }
// bb7: {
// falseEdges -> [real: bb16, imaginary: bb8]; //pre_binding4
// }
// bb8: {
// unreachable;
// }
// bb8: { // binding1: Some(w) if guard()
// bb9: { // binding1: Some(w) if guard()
// StorageLive(_3);
// _3 = ((_2 as Some).0: i32);
// StorageLive(_8);
// _8 = const guard() -> bb9;
// _8 = const guard() -> [return: bb10, unwind: bb1];
// }
// bb9: { //end of guard
// switchInt(move _8) -> [0u8: bb10, otherwise: bb1];
// bb10: { //end of guard
// switchInt(move _8) -> [0u8: bb11, otherwise: bb2];
// }
// bb10: { // to pre_binding2
// falseEdges -> [real: bb4, imaginary: bb4];
// bb11: { // to pre_binding2
// falseEdges -> [real: bb5, imaginary: bb5];
// }
// bb11: { // binding2 & arm2
// bb12: { // binding2 & arm2
// StorageLive(_4);
// _4 = _2;
// _1 = const 2i32;
// goto -> bb16;
// goto -> bb17;
// }
// bb12: { // binding3: Some(y) if guard2(y)
// bb13: { // binding3: Some(y) if guard2(y)
// StorageLive(_5);
// _5 = ((_2 as Some).0: i32);
// StorageLive(_10);
// StorageLive(_11);
// _11 = _5;
// _10 = const guard2(move _11) -> bb13;
// _10 = const guard2(move _11) -> [return: bb14, unwind: bb1];
// }
// bb13: { // end of guard2
// bb14: { // end of guard2
// StorageDead(_11);
// switchInt(move _10) -> [0u8: bb14, otherwise: bb2];
// switchInt(move _10) -> [0u8: bb15, otherwise: bb3];
// }
// bb14: { // to pre_binding4
// falseEdges -> [real: bb6, imaginary: bb6];
// bb15: { // to pre_binding4
// falseEdges -> [real: bb7, imaginary: bb7];
// }
// bb15: { // binding4 & arm4
// bb16: { // binding4 & arm4
// StorageLive(_6);
// _6 = _2;
// _1 = const 4i32;
// goto -> bb16;
// goto -> bb17;
// }
// bb16: {
// bb17: {
// ...
// return;
// }
// END rustc.main.SimplifyBranches-initial.before.mir
// END rustc.main.QualifyAndPromoteConstants.before.mir
......@@ -31,15 +31,15 @@ fn main() {
// | Live variables at bb0[0]: []
// StorageLive(_1);
// | Live variables at bb0[1]: []
// _1 = const <std::boxed::Box<T>>::new(const 22usize) -> bb1;
// _1 = const <std::boxed::Box<T>>::new(const 22usize) -> [return: bb2, unwind: bb1];
// }
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
// | Live variables on entry to bb1: [_1 (drop)]
// bb1: {
// | Live variables at bb1[0]: [_1 (drop)]
// | Live variables on entry to bb2: [_1 (drop)]
// bb2: {
// | Live variables at bb2[0]: [_1 (drop)]
// StorageLive(_2);
// | Live variables at bb1[1]: [_1 (drop)]
// _2 = const can_panic() -> [return: bb2, unwind: bb4];
// | Live variables at bb2[1]: [_1 (drop)]
// _2 = const can_panic() -> [return: bb3, unwind: bb4];
// }
// END rustc.main.nll.0.mir
......@@ -25,17 +25,17 @@ fn main() {
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | Live variables on entry to bb1: []
// bb1: {
// | Live variables at bb1[0]: []
// | Live variables on entry to bb2: []
// bb2: {
// | Live variables at bb2[0]: []
// _1 = const 55usize;
// | Live variables at bb1[1]: [_1]
// | Live variables at bb2[1]: [_1]
// StorageLive(_3);
// | Live variables at bb1[2]: [_1]
// | Live variables at bb2[2]: [_1]
// StorageLive(_4);
// | Live variables at bb1[3]: [_1]
// | Live variables at bb2[3]: [_1]
// _4 = _1;
// | Live variables at bb1[4]: [_4]
// _3 = const use_x(move _4) -> bb2;
// | Live variables at bb2[4]: [_4]
// _3 = const use_x(move _4) -> [return: bb3, unwind: bb1];
// }
// END rustc.main.nll.0.mir
......@@ -29,22 +29,20 @@ fn main() {
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | Live variables on entry to bb2: [_1]
// bb2: {
// | Live variables at bb2[0]: [_1]
// | Live variables on entry to bb3: [_1]
// bb3: {
// | Live variables at bb3[0]: [_1]
// StorageLive(_4);
// | Live variables at bb2[1]: [_1]
// | Live variables at bb3[1]: [_1]
// _4 = _1;
// | Live variables at bb2[2]: [_4]
// _3 = const make_live(move _4) -> bb4;
// | Live variables at bb3[2]: [_4]
// _3 = const make_live(move _4) -> [return: bb5, unwind: bb1];
// }
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
// | Live variables on entry to bb3: []
// bb3: {
// | Live variables at bb3[0]: []
// _5 = const make_dead() -> bb5;
// | Live variables on entry to bb4: []
// bb4: {
// | Live variables at bb4[0]: []
// _5 = const make_dead() -> [return: bb6, unwind: bb1];
// }
// END rustc.main.nll.0.mir
......@@ -31,26 +31,26 @@ fn main() {
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | '_#1r: {bb1[1], bb2[0], bb2[1]}
// | '_#2r: {bb1[1], bb2[0], bb2[1]}
// | '_#1r: {bb2[1], bb3[0], bb3[1]}
// | '_#2r: {bb2[1], bb3[0], bb3[1]}
// ...
// let _2: &'_#2r usize;
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
// bb1: {
// | Live variables at bb1[0]: [_1, _3]
// bb2: {
// | Live variables at bb2[0]: [_1, _3]
// _2 = &'_#1r _1[_3];
// | Live variables at bb1[1]: [_2]
// switchInt(const true) -> [0u8: bb3, otherwise: bb2];
// | Live variables at bb2[1]: [_2]
// switchInt(const true) -> [0u8: bb4, otherwise: bb3];
// }
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
// bb2: {
// | Live variables at bb2[0]: [_2]
// bb3: {
// | Live variables at bb3[0]: [_2]
// StorageLive(_7);
// | Live variables at bb2[1]: [_2]
// | Live variables at bb3[1]: [_2]
// _7 = (*_2);
// | Live variables at bb2[2]: [_7]
// _6 = const use_x(move _7) -> bb4;
// | Live variables at bb3[2]: [_7]
// _6 = const use_x(move _7) -> [return: bb5, unwind: bb1];
// }
// END rustc.main.nll.0.mir
......@@ -44,5 +44,5 @@ fn drop(&mut self) { }
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | '_#5r: {bb1[3], bb1[4], bb1[5], bb2[0], bb2[1]}
// | '_#5r: {bb2[3], bb2[4], bb2[5], bb3[0], bb3[1]}
// END rustc.main.nll.0.mir
......@@ -13,7 +13,6 @@
// including) the call to `use_x`. The `else` branch is not included.
// ignore-tidy-linelength
// ignore-test #46267
// compile-flags:-Znll -Zverbose
// ^^^^^^^^^ force compiler to dump more region information
......@@ -47,5 +46,5 @@ fn drop(&mut self) { }
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | '_#5r: {bb1[3], bb1[4], bb1[5], bb2[0], bb2[1], bb2[2], bb3[0], bb4[0], bb4[1], bb4[2], bb6[0], bb7[0], bb7[1], bb8[0]}
// | '_#5r: {bb2[3], bb2[4], bb2[5], bb3[0], bb3[1], bb3[2], bb4[0], bb5[0], bb5[1], bb5[2], bb6[0], bb7[0], bb7[1], bb8[0]}
// END rustc.main.nll.0.mir
......@@ -36,10 +36,10 @@ fn main() {
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | '_#1r: {bb1[1], bb2[0], bb2[1]}
// | '_#1r: {bb2[1], bb3[0], bb3[1]}
// ...
// | '_#3r: {bb7[2], bb7[3], bb7[4]}
// | '_#4r: {bb1[1], bb2[0], bb2[1], bb7[2], bb7[3], bb7[4]}
// | '_#3r: {bb8[2], bb8[3], bb8[4]}
// | '_#4r: {bb2[1], bb3[0], bb3[1], bb8[2], bb8[3], bb8[4]}
// ...
// let mut _2: &'_#4r usize;
// ...
......
......@@ -32,9 +32,9 @@ fn main() {
// END RUST SOURCE
// START rustc.main.nll.0.mir
// | '_#1r: {bb1[1], bb1[2], bb1[3], bb1[4], bb1[5], bb1[6], bb2[0], bb2[1]}
// | '_#2r: {bb1[1], bb1[2], bb1[3], bb1[4], bb1[5], bb1[6], bb2[0], bb2[1]}
// | '_#3r: {bb1[5], bb1[6], bb2[0], bb2[1]}
// | '_#1r: {bb2[1], bb2[2], bb2[3], bb2[4], bb2[5], bb2[6], bb3[0], bb3[1]}
// | '_#2r: {bb2[1], bb2[2], bb2[3], bb2[4], bb2[5], bb2[6], bb3[0], bb3[1]}
// | '_#3r: {bb2[5], bb2[6], bb3[0], bb3[1]}
// END rustc.main.nll.0.mir
// START rustc.main.nll.0.mir
// let _2: &'_#2r usize;
......
......@@ -64,7 +64,7 @@ fn drop(&mut self) {}
// (_1.0: Aligned) = move _4;
// StorageDead(_4);
// _0 = ();
// drop(_1) -> bb2;
// drop(_1) -> [return: bb2, unwind: bb1];
// }
// }
// END rustc.main.EraseRegions.before.mir
......@@ -17,11 +17,11 @@ fn main() {
// END RUST SOURCE
// START rustc.main.SimplifyBranches-initial.before.mir
// bb0: {
// switchInt(const false) -> [0u8: bb2, otherwise: bb1];
// switchInt(const false) -> [0u8: bb3, otherwise: bb2];
// }
// END rustc.main.SimplifyBranches-initial.before.mir
// START rustc.main.SimplifyBranches-initial.after.mir
// bb0: {
// goto -> bb2;
// goto -> bb3;
// }
// END rustc.main.SimplifyBranches-initial.after.mir
......@@ -9,6 +9,8 @@
// except according to those terms.
// ignore-tidy-linelength
// ignore-wasm32-bare unwinding being disabled causes differences in output
// ignore-wasm64-bare unwinding being disabled causes differences in output
// compile-flags: -Z verbose -Z mir-emit-validate=1
fn main() {
......@@ -28,7 +30,7 @@ fn main() {
// StorageDead(_3);
// _0 = ();
// Validate(Release, [_1: std::boxed::Box<[i32]>]);
// drop(_1) -> bb2;
// drop(_1) -> [return: bb2, unwind: bb3];
// }
// ...
// }
......
......@@ -54,6 +54,7 @@ fn main() {
// Validate(Acquire, [_1: &ReFree(DefId(0/1:9 ~ validate_4[317d]::write_42[0]::{{closure}}[0]), BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// Validate(Release, [_1: &ReFree(DefId(0/1:9 ~ validate_4[317d]::write_42[0]::{{closure}}[0]), BrEnv) [closure@NodeId(22)], _2: *mut i32]);
// (*_2) = const 23i32;
// _0 = ();
// return;
// }
// }
......
......@@ -8,6 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z borrowck=compare
static mut DROP: isize = 0;
static mut DROP_S: isize = 0;
......
......@@ -8,6 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -Z borrowck=compare
use std::cell::Cell;
use std::mem::swap;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册