提交 649c73f9 编写于 作者: P Paul Daniel Faria

Simplify Cache wrapper to single type, impl Deref on it, fix all compilation...

Simplify Cache wrapper to single type, impl Deref on it, fix all compilation errors in librustc_codegen_ssa
上级 c0592faa
......@@ -6,7 +6,7 @@
use rustc_data_structures::graph::{self, GraphPredecessors, GraphSuccessors};
use rustc_data_structures::graph::dominators::{dominators, Dominators};
use std::iter;
use std::ops::{Index, IndexMut};
use std::ops::{Deref, DerefMut, Index, IndexMut};
use std::vec::IntoIter;
#[derive(Clone, Debug)]
......@@ -111,33 +111,21 @@ pub fn basic_blocks_and_local_decls_mut<'a, 'tcx>(
}
}
pub struct OwningCache<'tcx> {
pub struct BodyCache<T> {
cache: Cache,
body: Body<'tcx>,
body: T,
}
impl<'tcx> OwningCache<'tcx> {
pub fn borrow(&mut self) -> BorrowedCache<'_, 'tcx> {
BorrowedCache {
cache: &mut self.cache,
body: &self.body,
}
}
pub fn borrow_mut(&mut self) -> MutCache<'_, 'tcx> {
MutCache {
cache: &mut self.cache,
body: &mut self.body,
impl<T> BodyCache<T> {
pub fn new(body: T) -> Self {
Self {
cache: Cache::new(),
body
}
}
}
pub struct BorrowedCache<'a, 'tcx> {
cache: &'a mut Cache,
body: &'a Body<'tcx>
}
impl<'a, 'tcx> BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> BodyCache<&'a Body<'tcx>> {
#[inline]
pub fn predecessors_for(&mut self, bb: BasicBlock) -> &[BasicBlock] {
self.cache.predecessors_for(bb, self.body)
......@@ -159,7 +147,14 @@ pub fn dominators(&mut self) -> Dominators<BasicBlock> {
}
}
impl<'a, 'tcx> Index<BasicBlock> for BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> Deref for BodyCache<&'a Body<'tcx>> {
type Target = Body<'tcx>;
fn deref(&self) -> &Body<'tcx> {
self.body
}
}
impl<'a, 'tcx> Index<BasicBlock> for BodyCache<&'a Body<'tcx>> {
type Output = BasicBlockData<'tcx>;
#[inline]
......@@ -168,16 +163,16 @@ fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
}
}
impl<'a, 'tcx> graph::DirectedGraph for BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> graph::DirectedGraph for BodyCache<&'a Body<'tcx>> {
type Node = BasicBlock;
}
impl<'a, 'graph, 'tcx> graph::GraphPredecessors<'graph> for BorrowedCache<'a, 'tcx> {
impl<'a, 'graph, 'tcx> graph::GraphPredecessors<'graph> for BodyCache<&'a Body<'tcx>> {
type Item = BasicBlock;
type Iter = IntoIter<BasicBlock>;
}
impl<'a, 'tcx> graph::WithPredecessors for BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> graph::WithPredecessors for BodyCache<&'a Body<'tcx>> {
fn predecessors(
&mut self,
node: Self::Node,
......@@ -186,19 +181,19 @@ fn predecessors(
}
}
impl<'a, 'tcx> graph::WithNumNodes for BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> graph::WithNumNodes for BodyCache<&'a Body<'tcx>> {
fn num_nodes(&self) -> usize {
self.body.num_nodes()
}
}
impl<'a, 'tcx> graph::WithStartNode for BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> graph::WithStartNode for BodyCache<&'a Body<'tcx>> {
fn start_node(&self) -> Self::Node {
self.body.start_node()
}
}
impl<'a, 'tcx> graph::WithSuccessors for BorrowedCache<'a, 'tcx> {
impl<'a, 'tcx> graph::WithSuccessors for BodyCache<&'a Body<'tcx>> {
fn successors(
&self,
node: Self::Node,
......@@ -207,17 +202,12 @@ fn successors(
}
}
impl<'a, 'b, 'tcx> graph::GraphSuccessors<'b> for BorrowedCache<'a, 'tcx> {
impl<'a, 'b, 'tcx> graph::GraphSuccessors<'b> for BodyCache<&'a Body<'tcx>> {
type Item = BasicBlock;
type Iter = iter::Cloned<Successors<'b>>;
}
pub struct MutCache<'a, 'tcx> {
cache: &'a mut Cache,
body: &'a mut Body<'tcx>,
}
impl<'a, 'tcx> MutCache<'a, 'tcx> {
impl<'a, 'tcx> BodyCache<&'a mut Body<'tcx>> {
#[inline]
pub fn body(&mut self) -> &mut Body<'tcx> {
self.body
......@@ -234,7 +224,21 @@ pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'
}
}
impl<'a, 'tcx> Index<BasicBlock> for MutCache<'a, 'tcx> {
impl<'a, 'tcx> Deref for BodyCache<&'a mut Body<'tcx>> {
type Target = Body<'tcx>;
fn deref(&self) -> &Body<'tcx> {
self.body
}
}
impl<'a, 'tcx> DerefMut for BodyCache<&'a mut Body<'tcx>> {
fn deref_mut(&mut self) -> &mut Body<'tcx> {
self.body
}
}
impl<'a, 'tcx> Index<BasicBlock> for BodyCache<&'a mut Body<'tcx>> {
type Output = BasicBlockData<'tcx>;
#[inline]
......@@ -243,13 +247,9 @@ fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
}
}
impl<'a, 'tcx> IndexMut<BasicBlock> for MutCache<'a, 'tcx> {
impl<'a, 'tcx> IndexMut<BasicBlock> for BodyCache<&'a mut Body<'tcx>> {
fn index_mut(&mut self, index: BasicBlock) -> &mut Self::Output {
self.cache.invalidate_predecessors();
&mut self.body.basic_blocks[index]
}
}
//CloneTypeFoldableAndLiftImpls! {
// Cache,
//}
......@@ -38,6 +38,7 @@
use syntax_pos::{Span, DUMMY_SP};
pub use crate::mir::interpret::AssertMessage;
pub use crate::mir::cache::BodyCache;
pub mod cache;
pub mod interpret;
......@@ -2596,7 +2597,11 @@ pub fn successor_within_block(&self) -> Location {
}
/// Returns `true` if `other` is earlier in the control flow graph than `self`.
pub fn is_predecessor_of<'tcx>(&self, other: Location, mut body_cache: cache::BorrowedCache<'_, 'tcx>) -> bool {
pub fn is_predecessor_of<'tcx>(
&self,
other: Location,
mut body_cache: BodyCache<&'_ Body<'tcx>>
) -> bool {
// If we are in the same block as the other location and are an earlier statement
// then we are a predecessor of `other`.
if self.block == other.block && self.statement_index < other.statement_index {
......
use crate::ty::subst::SubstsRef;
use crate::ty::{CanonicalUserTypeAnnotation, Ty};
use crate::mir::*;
use crate::mir::cache::*;
use syntax_pos::Span;
// # The MIR Visitor
......@@ -72,7 +71,10 @@ pub trait $visitor_trait_name<'tcx> {
// Override these, and call `self.super_xxx` to revert back to the
// default behavior.
fn visit_body(&mut self, body_cache: & $($mutability)? cache_type!('tcx $($mutability)?)) {
fn visit_body(
&mut self,
body_cache: & $($mutability)? BodyCache<&'_ $($mutability)? Body<'tcx>>
) {
self.super_body(body_cache);
}
......@@ -241,8 +243,10 @@ fn visit_source_scope(&mut self,
// The `super_xxx` methods comprise the default behavior and are
// not meant to be overridden.
fn super_body(&mut self,
body_cache: & $($mutability)? cache_type!('tcx $($mutability)?)) {
fn super_body(
&mut self,
body_cache: & $($mutability)? BodyCache<&'_ $($mutability)? Body<'tcx>>
) {
let span = body_cache.body().span;
if let Some(yield_ty) = &$($mutability)? body_cache.body().yield_ty {
self.visit_ty(yield_ty, TyContext::YieldTy(SourceInfo {
......@@ -793,7 +797,11 @@ fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
// Convenience methods
fn visit_location(&mut self, body_cache: & $($mutability)? cache_type!('tcx $($mutability)?), location: Location) {
fn visit_location(
&mut self,
body_cache: & $($mutability)? BodyCache<&'_ $($mutability)? Body<'tcx>>,
location: Location
) {
let basic_block = & $($mutability)? body_cache[location.block];
if basic_block.statements.len() == location.statement_index {
if let Some(ref $($mutability)? terminator) = basic_block.terminator {
......@@ -809,11 +817,6 @@ fn visit_location(&mut self, body_cache: & $($mutability)? cache_type!('tcx $($m
}
}
macro_rules! cache_type {
($tcx:lifetime mut) => {MutCache<'_, $tcx>};
($tcx:lifetime) => {BorrowedCache<'_, $tcx>};
}
macro_rules! visit_place_fns {
(mut) => (
fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
......
......@@ -31,6 +31,7 @@
use rustc::middle::cstore::EncodedMetadata;
use rustc::middle::lang_items::StartFnLangItem;
use rustc::middle::weak_lang_items;
use rustc::mir::BodyCache;
use rustc::mir::mono::{CodegenUnitNameBuilder, CodegenUnit, MonoItem};
use rustc::ty::{self, Ty, TyCtxt, Instance};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
......@@ -374,7 +375,9 @@ pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let lldecl = cx.get_fn(instance);
let mir = cx.tcx().instance_mir(instance.def);
mir::codegen_mir::<Bx>(cx, lldecl, &mir, instance, sig);
// TODO(nashenas88) move this into instance_mir before merging PR
let mut mir = BodyCache::new(mir);
mir::codegen_mir::<Bx>(cx, lldecl, &mut mir, instance, sig);
}
/// Creates the `main` function which will initialize the rust runtime and call
......
......@@ -4,7 +4,7 @@
use rustc_index::bit_set::BitSet;
use rustc_data_structures::graph::dominators::Dominators;
use rustc_index::vec::{Idx, IndexVec};
use rustc::mir::{self, Location, TerminatorKind};
use rustc::mir::{self, Body, BodyCache, Location, TerminatorKind};
use rustc::mir::visit::{
Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext, NonUseContext,
};
......@@ -17,10 +17,10 @@
use crate::traits::*;
pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
fx: &FunctionCx<'a, 'tcx, Bx>,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
) -> BitSet<mir::Local> {
let mir = fx.mir;
let mut analyzer = LocalAnalyzer::new(fx);
let mir = fx.mir.take().unwrap();
let mut analyzer = LocalAnalyzer::new(fx, mir);
analyzer.visit_body(mir);
......@@ -54,11 +54,14 @@ pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
}
analyzer.non_ssa_locals
let (mir, non_ssa_locals) = analyzer.finalize();
fx.mir = Some(mir);
non_ssa_locals
}
struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
fx: &'mir FunctionCx<'a, 'tcx, Bx>,
mir: &'a mut BodyCache<&'a Body<'tcx>>,
dominators: Dominators<mir::BasicBlock>,
non_ssa_locals: BitSet<mir::Local>,
// The location of the first visited direct assignment to each
......@@ -67,27 +70,32 @@ struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
}
impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self {
fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>, mir: &'a mut BodyCache<&'a Body<'tcx>>) -> Self {
let invalid_location =
mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location();
mir::BasicBlock::new(mir.basic_blocks().len()).start_location();
let mut analyzer = LocalAnalyzer {
fx,
dominators: fx.mir.dominators(),
non_ssa_locals: BitSet::new_empty(fx.mir.local_decls.len()),
first_assignment: IndexVec::from_elem(invalid_location, &fx.mir.local_decls)
dominators: mir.dominators(),
mir,
non_ssa_locals: BitSet::new_empty(mir.local_decls.len()),
first_assignment: IndexVec::from_elem(invalid_location, &mir.local_decls)
};
// Arguments get assigned to by means of the function being called
for arg in fx.mir.args_iter() {
for arg in mir.args_iter() {
analyzer.first_assignment[arg] = mir::START_BLOCK.start_location();
}
analyzer
}
fn finalize(self) -> (&'a mut BodyCache<&'a Body<'tcx>>, BitSet<mir::Local>) {
(self.mir, self.non_ssa_locals)
}
fn first_assignment(&self, local: mir::Local) -> Option<Location> {
let location = self.first_assignment[local];
if location.block.index() < self.fx.mir.basic_blocks().len() {
if location.block.index() < self.mir.basic_blocks().len() {
Some(location)
} else {
None
......@@ -130,7 +138,7 @@ fn process_place(
};
if is_consume {
let base_ty =
mir::Place::ty_from(place_ref.base, proj_base, self.fx.mir, cx.tcx());
mir::Place::ty_from(place_ref.base, proj_base, self.mir.body(), cx.tcx());
let base_ty = self.fx.monomorphize(&base_ty);
// ZSTs don't require any actual memory access.
......@@ -139,7 +147,7 @@ fn process_place(
.ty;
let elem_ty = self.fx.monomorphize(&elem_ty);
let span = if let mir::PlaceBase::Local(index) = place_ref.base {
self.fx.mir.local_decls[*index].source_info.span
self.mir.local_decls[*index].source_info.span
} else {
DUMMY_SP
};
......@@ -243,7 +251,7 @@ fn visit_assign(&mut self,
if let Some(index) = place.as_local() {
self.assign(index, location);
let decl_span = self.fx.mir.local_decls[index].source_info.span;
let decl_span = self.mir.local_decls[index].source_info.span;
if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
self.not_ssa(index);
}
......@@ -348,7 +356,7 @@ fn visit_local(&mut self,
}
PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
let ty = self.fx.mir.local_decls[local].ty;
let ty = self.mir.local_decls[local].ty;
let ty = self.fx.monomorphize(&ty);
// Only need the place if we're actually dropping it.
......
......@@ -132,7 +132,7 @@ fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
} else {
let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[*self.bb].is_cleanup {
if fx.mir.unwrap()[*self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
// struct, there are "symmetry" issues that cause
......@@ -324,7 +324,7 @@ fn codegen_drop_terminator<'b>(
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
) {
let ty = location.ty(self.mir, bx.tcx()).ty;
let ty = location.ty(self.mir.unwrap().body(), bx.tcx()).ty;
let ty = self.monomorphize(&ty);
let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
......@@ -510,7 +510,7 @@ fn codegen_call_terminator<'b>(
let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(self.mir, bx.tcx());
let op_ty = op_arg.ty(self.mir.unwrap().body(), bx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
......@@ -791,7 +791,7 @@ pub fn codegen_block(
bb: mir::BasicBlock,
) {
let mut bx = self.build_block(bb);
let data = &self.mir[bb];
let data = &self.mir.unwrap()[bb];
debug!("codegen_block({:?}={:?})", bb, data);
......@@ -1053,7 +1053,7 @@ fn landing_pad_uncached(
target_bb: Bx::BasicBlock
) -> Bx::BasicBlock {
if base::wants_msvc_seh(self.cx.sess()) {
span_bug!(self.mir.span, "landing pad was not inserted?")
span_bug!(self.mir.unwrap().span, "landing pad was not inserted?")
}
let mut bx = self.new_block("cleanup");
......@@ -1154,7 +1154,7 @@ fn make_return_dest(
//
// If someone changes that, please update this code path
// to create a temporary.
span_bug!(self.mir.span, "can't directly store to unaligned value");
span_bug!(self.mir.unwrap().span, "can't directly store to unaligned value");
}
llargs.push(dest.llval);
ReturnDest::Nothing
......
use rustc::ty::{self, Ty, TypeFoldable, Instance};
use rustc::ty::layout::{TyLayout, HasTyCtxt, FnAbiExt};
use rustc::mir::{self, Body};
use rustc::mir::{self, Body, BodyCache};
use rustc_target::abi::call::{FnAbi, PassMode};
use crate::base;
use crate::traits::*;
......@@ -21,7 +21,7 @@
pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>,
mir: &'a mir::Body<'tcx>,
mir: Option<&'a mut BodyCache<&'a mir::Body<'tcx>>>,
debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
......@@ -122,7 +122,7 @@ fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
llfn: Bx::Function,
mir: &'a Body<'tcx>,
mir: &'a mut BodyCache<&'a Body<'tcx>>,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
) {
......@@ -159,7 +159,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let mut fx = FunctionCx {
instance,
mir,
mir: Some(mir),
llfn,
fn_abi,
cx,
......@@ -174,7 +174,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
per_local_var_debug_info: debuginfo::per_local_var_debug_info(cx.tcx(), mir),
};
let memory_locals = analyze::non_ssa_locals(&fx);
let memory_locals = analyze::non_ssa_locals(&mut fx);
// Allocate variable and temp allocas
fx.locals = {
......@@ -327,10 +327,10 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let mut idx = 0;
let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
mir.args_iter().enumerate().map(|(arg_index, local)| {
let arg_decl = &mir.local_decls[local];
mir.unwrap().args_iter().enumerate().map(|(arg_index, local)| {
let arg_decl = &mir.unwrap().local_decls[local];
if Some(local) == mir.spread_arg {
if Some(local) == mir.unwrap().spread_arg {
// This argument (e.g., the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have
// to reconstruct it into a tuple local variable, from multiple
......
......@@ -591,7 +591,7 @@ pub fn codegen_place(
pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx();
let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, self.mir, tcx);
let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, self.mir.unwrap().body(), tcx);
self.monomorphize(&place_ty.ty)
}
}
......@@ -460,7 +460,7 @@ pub fn codegen_rvalue_operand(
}
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
let discr_ty = rvalue.ty(self.mir.unwrap().body(), bx.tcx());
let discr = self.codegen_place(&mut bx, &place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
(bx, OperandRef {
......@@ -513,7 +513,7 @@ pub fn codegen_rvalue_operand(
mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = rvalue.ty(self.mir.unwrap().body(), self.cx.tcx());
let operand = OperandRef::new_zst(
&mut bx,
self.cx.layout_of(self.monomorphize(&ty)),
......@@ -710,7 +710,7 @@ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) ->
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = rvalue.ty(self.mir.unwrap().body(), self.cx.tcx());
let ty = self.monomorphize(&ty);
self.cx.spanned_layout_of(ty, span).is_zst()
}
......
......@@ -9,8 +9,8 @@
use rustc::lint::builtin::{MUTABLE_BORROW_RESERVATION_CONFLICT};
use rustc::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
use rustc::mir::{
ClearCrossCrate, Local, Location, Body, Mutability, Operand, Place, PlaceBase, PlaceElem,
PlaceRef, Static, StaticKind
ClearCrossCrate, Local, Location, Body, BodyCache, Mutability, Operand, Place, PlaceBase,
PlaceElem, PlaceRef, Static, StaticKind
};
use rustc::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
use rustc::mir::{Terminator, TerminatorKind};
......@@ -167,16 +167,12 @@ fn do_mir_borrowck<'a, 'tcx>(
let free_regions =
nll::replace_regions_in_mir(infcx, def_id, param_env, &mut body, &mut promoted);
// Region replacement above very likely invalidated the predecessors cache. It's used later on
// when retrieving the dominators from the body, so we need to ensure it exists before locking
// the body for changes.
body.ensure_predecessors();
let body = &body; // no further changes
let location_table = &LocationTable::new(body);
let body_cache = &BodyCache::new(&body); // no further changes
let location_table = &LocationTable::new(body_cache);
let mut errors_buffer = Vec::new();
let (move_data, move_errors): (MoveData<'tcx>, Option<Vec<(Place<'tcx>, MoveError<'tcx>)>>) =
match MoveData::gather_moves(body, tcx) {
match MoveData::gather_moves(body_cache, tcx) {
Ok(move_data) => (move_data, None),
Err((move_data, move_errors)) => (move_data, Some(move_errors)),
};
......@@ -186,27 +182,27 @@ fn do_mir_borrowck<'a, 'tcx>(
param_env,
};
let dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
let dead_unwinds = BitSet::new_empty(body_cache.basic_blocks().len());
let mut flow_inits = FlowAtLocation::new(do_dataflow(
tcx,
body,
body_cache,
def_id,
&attributes,
&dead_unwinds,
MaybeInitializedPlaces::new(tcx, body, &mdpe),
MaybeInitializedPlaces::new(tcx, body_cache, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind(id).is_fn_or_closure();
let borrow_set = Rc::new(BorrowSet::build(
tcx, body, locals_are_invalidated_at_exit, &mdpe.move_data));
tcx, body_cache, locals_are_invalidated_at_exit, &mdpe.move_data));
// If we are in non-lexical mode, compute the non-lexical lifetimes.
let (regioncx, polonius_output, opt_closure_req) = nll::compute_regions(
infcx,
def_id,
free_regions,
body,
body_cache,
&promoted,
&local_names,
&upvars,
......@@ -227,29 +223,29 @@ fn do_mir_borrowck<'a, 'tcx>(
let flow_borrows = FlowAtLocation::new(do_dataflow(
tcx,
body,
body_cache,
def_id,
&attributes,
&dead_unwinds,
Borrows::new(tcx, body, param_env, regioncx.clone(), &borrow_set),
Borrows::new(tcx, body_cache, param_env, regioncx.clone(), &borrow_set),
|rs, i| DebugFormatted::new(&rs.location(i)),
));
let flow_uninits = FlowAtLocation::new(do_dataflow(
tcx,
body,
body_cache,
def_id,
&attributes,
&dead_unwinds,
MaybeUninitializedPlaces::new(tcx, body, &mdpe),
MaybeUninitializedPlaces::new(tcx, body_cache, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]),
));
let flow_ever_inits = FlowAtLocation::new(do_dataflow(
tcx,
body,
body_cache,
def_id,
&attributes,
&dead_unwinds,
EverInitializedPlaces::new(tcx, body, &mdpe),
EverInitializedPlaces::new(tcx, body_cache, &mdpe),
|bd, i| DebugFormatted::new(&bd.move_data().inits[i]),
));
......@@ -261,11 +257,11 @@ fn do_mir_borrowck<'a, 'tcx>(
_ => true,
};
let dominators = body.dominators();
let dominators = body_cache.dominators();
let mut mbcx = MirBorrowckCtxt {
infcx,
body,
body_cache,
mir_def_id: def_id,
param_env,
move_data: &mdpe.move_data,
......@@ -403,7 +399,7 @@ fn do_mir_borrowck<'a, 'tcx>(
crate struct MirBorrowckCtxt<'cx, 'tcx> {
crate infcx: &'cx InferCtxt<'cx, 'tcx>,
body: &'cx Body<'tcx>,
body_cache: BodyCache<&'cx Body<'tcx>>,
mir_def_id: DefId,
param_env: ty::ParamEnv<'tcx>,
move_data: &'cx MoveData<'tcx>,
......@@ -494,7 +490,7 @@ impl<'cx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'tcx
type FlowState = Flows<'cx, 'tcx>;
fn body(&self) -> &'cx Body<'tcx> {
self.body
self.body_cache
}
fn visit_block_entry(&mut self, bb: BasicBlock, flow_state: &Self::FlowState) {
......@@ -644,7 +640,7 @@ fn visit_terminator_entry(
let tcx = self.infcx.tcx;
// Compute the type with accurate region information.
let drop_place_ty = drop_place.ty(self.body, self.infcx.tcx);
let drop_place_ty = drop_place.ty(self.body_cache, self.infcx.tcx);
// Erase the regions.
let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;
......@@ -988,7 +984,7 @@ fn check_access_for_conflict(
let mut error_reported = false;
let tcx = self.infcx.tcx;
let body = self.body;
let body = self.body_cache;
let param_env = self.param_env;
let location_table = self.location_table.start_index(location);
let borrow_set = self.borrow_set.clone();
......
......@@ -385,15 +385,15 @@ fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
}
}
fn visit_body(&mut self, body: &Body<'tcx>) {
self.sanitize_type(&"return type", body.return_ty());
for local_decl in &body.local_decls {
fn visit_body(&mut self, body_cache: &BodyCache<&'_ Body<'tcx>>) {
self.sanitize_type(&"return type", body_cache.return_ty());
for local_decl in &body_cache.local_decls {
self.sanitize_type(local_decl, local_decl.ty);
}
if self.errors_reported {
return;
}
self.super_body(body);
self.super_body(body_cache);
}
}
......
......@@ -123,17 +123,6 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx
&add_call_guards::CriticalCallEdges,
]);
// The `ensure_predecessors_cache::EnsurePredecessorsCache` MirPass wasn't used in the
// `run_passes` above because the above pass is not always guaranteed to run. There can be
// instances where, e.g. a `MirPhase::Validated` pass has already been run on a `Body` by the
// time it arrived at this line, and so the above `run_passes` call will NOT run any of the
// passes (They do not run if a same or later pass has already been executed on a `Body`).
// Adding the ensure pass during the `run_passes` for `MirPhase::Validated` would not
// help because the predecessors cache would be invalidated between that pass and this call.
// Having the single ensure outside of the `run_passes` list here guarantees that anyone
// using this `Body` could call `Body::unwrap_predecessors()` without worrying about a panic.
result.ensure_predecessors();
debug!("make_shim({:?}) = {:?}", instance, result);
tcx.arena.alloc(result)
......@@ -926,7 +915,6 @@ pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> &Body<'_> {
sig.inputs().len(),
span,
);
body.ensure_predecessors();
crate::util::dump_mir(
tcx,
......
use rustc::mir::Body;
use rustc::ty::TyCtxt;
use crate::transform::{MirPass, MirSource};
pub struct EnsurePredecessorsCache {
label: String,
}
impl EnsurePredecessorsCache {
pub fn new<S: Into<String>>(label: S) -> Self {
Self {
label: label.into(),
}
}
}
impl<'tcx> MirPass<'tcx> for EnsurePredecessorsCache {
fn run_pass(&self, _: TyCtxt<'tcx>, _: MirSource<'tcx>, body: &mut Body<'tcx>) {
debug!("{}: Ensure predecessors cache: {:?}", self.label, body.span.data());
// predecessors is lazily calculated. We want to ensure that the cache is properly filled
// before the next stages of compilation, since these following stages will only be allowed
// to read the cache and not generate it. If the cache is already up to date, this line is
// a nop.
body.ensure_predecessors();
}
}
......@@ -1203,10 +1203,6 @@ fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'
// RETURN_PLACE then is a fresh unused local with type ret_ty.
let new_ret_local = replace_result_variable(ret_ty, body, tcx);
// Replacing result variables very likely clears the predecessors cache (needed inside of
// compute layout), so we need to ensure the cache exists.
body.ensure_predecessors();
// Extract locals which are live across suspension point into `layout`
// `remap` gives a mapping from local indices onto generator struct indices
// `storage_liveness` tells us which locals have live storage at suspension points
......
......@@ -20,7 +20,6 @@
pub mod simplify_branches;
pub mod simplify_try;
pub mod simplify;
pub mod ensure_predecessors_cache;
pub mod erase_regions;
pub mod no_landing_pads;
pub mod rustc_peek;
......@@ -251,7 +250,6 @@ fn mir_validated(
&simplify::SimplifyCfg::new("qualify-consts"),
]);
body.ensure_predecessors();
let promoted = promote_pass.promoted_fragments.into_inner();
(tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
}
......@@ -316,7 +314,6 @@ fn run_optimization_passes<'tcx>(
&simplify::SimplifyLocals,
&add_call_guards::CriticalCallEdges,
&ensure_predecessors_cache::EnsurePredecessorsCache::new("before-opt-dump"),
&dump_mir::Marker("PreCodegen"),
]);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册