提交 9228d231 编写于 作者: B bors

Auto merge of #43028 - michaelwoerister:dedup-dep-nodes, r=nikomatsakis

incr.comp.: Deduplicate some DepNodes and introduce anonymous DepNodes

This is a parallel PR to the pending https://github.com/rust-lang/rust/pull/42769. It implements most of what is possible in terms of DepNode re-opening without having anonymous DepNodes yet (https://github.com/rust-lang/rust/issues/42298).

r? @nikomatsakis
......@@ -16,7 +16,7 @@ The nodes of the graph are defined by the enum `DepNode`. They represent
one of three things:
1. HIR nodes (like `Hir(DefId)`) represent the HIR input itself.
2. Data nodes (like `ItemSignature(DefId)`) represent some computed
2. Data nodes (like `TypeOfItem(DefId)`) represent some computed
information about a particular item.
3. Procedure nodes (like `CoherenceCheckTrait(DefId)`) represent some
procedure that is executing. Usually this procedure is
......@@ -289,7 +289,7 @@ to see something like:
Hir(foo) -> Collect(bar)
Collect(bar) -> TypeckTables(bar)
That first edge looks suspicious to you. So you set
`RUST_FORBID_DEP_GRAPH_EDGE` to `Hir&foo -> Collect&bar`, re-run, and
then observe the backtrace. Voila, bug fixed!
......@@ -64,7 +64,9 @@
use hir::map::DefPathHash;
use ich::Fingerprint;
use ty::TyCtxt;
use ty::{TyCtxt, Instance, InstanceDef};
use ty::fast_reject::SimplifiedType;
use ty::subst::Substs;
use rustc_data_structures::stable_hasher::{StableHasher, HashStable};
use ich::StableHashingContext;
use std::fmt;
......@@ -77,8 +79,14 @@
($x:tt) => ({})
}
macro_rules! anon_attr_to_bool {
(anon) => (true)
}
macro_rules! define_dep_nodes {
($(
(<$tcx:tt>
$(
[$($anon:ident)*]
$variant:ident $(( $($tuple_arg:tt),* ))*
$({ $($struct_arg_name:ident : $struct_arg_ty:ty),* })*
,)*
......@@ -92,7 +100,7 @@ pub enum DepKind {
impl DepKind {
#[allow(unreachable_code)]
#[inline]
pub fn can_reconstruct_query_key(&self) -> bool {
pub fn can_reconstruct_query_key<$tcx>(&self) -> bool {
match *self {
$(
DepKind :: $variant => {
......@@ -114,6 +122,19 @@ pub fn can_reconstruct_query_key(&self) -> bool {
}
}
#[allow(unreachable_code)]
#[inline]
pub fn is_anon<$tcx>(&self) -> bool {
match *self {
$(
DepKind :: $variant => {
$(return anon_attr_to_bool!($anon);)*
false
}
)*
}
}
#[allow(unreachable_code)]
#[inline]
pub fn has_params(&self) -> bool {
......@@ -139,7 +160,7 @@ pub fn has_params(&self) -> bool {
}
}
pub enum DepConstructor {
pub enum DepConstructor<$tcx> {
$(
$variant $(( $($tuple_arg),* ))*
$({ $($struct_arg_name : $struct_arg_ty),* })*
......@@ -155,7 +176,12 @@ pub struct DepNode {
impl DepNode {
#[allow(unreachable_code, non_snake_case)]
pub fn new(tcx: TyCtxt, dep: DepConstructor) -> DepNode {
pub fn new<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
dep: DepConstructor<'gcx>)
-> DepNode
where 'gcx: 'a + 'tcx,
'tcx: 'a
{
match dep {
$(
DepConstructor :: $variant $(( $($tuple_arg),* ))*
......@@ -336,7 +362,7 @@ pub fn to_dep_node(self, tcx: TyCtxt, kind: DepKind) -> DepNode {
}
}
define_dep_nodes!(
define_dep_nodes!( <'tcx>
// Represents the `Krate` as a whole (the `hir::Krate` value) (as
// distinct from the krate module). This is basically a hash of
// the entire krate, so if you read from `Krate` (e.g., by calling
......@@ -348,90 +374,101 @@ pub fn to_dep_node(self, tcx: TyCtxt, kind: DepKind) -> DepNode {
// suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain
// access to the krate, but you must remember to add suitable
// edges yourself for the individual items that you read.
Krate,
[] Krate,
// Represents the HIR node with the given node-id
Hir(DefId),
[] Hir(DefId),
// Represents the body of a function or method. The def-id is that of the
// function/method.
HirBody(DefId),
[] HirBody(DefId),
// Represents the metadata for a given HIR node, typically found
// in an extern crate.
MetaData(DefId),
[] MetaData(DefId),
// Represents some artifact that we save to disk. Note that these
// do not have a def-id as part of their identifier.
WorkProduct(WorkProductId),
[] WorkProduct(WorkProductId),
// Represents different phases in the compiler.
RegionMaps(DefId),
Coherence,
Resolve,
CoherenceCheckTrait(DefId),
PrivacyAccessLevels(CrateNum),
[] RegionMaps(DefId),
[] Coherence,
[] Resolve,
[] CoherenceCheckTrait(DefId),
[] PrivacyAccessLevels(CrateNum),
// Represents the MIR for a fn; also used as the task node for
// things read/modify that MIR.
Mir(DefId),
MirShim(DefIdList),
BorrowCheckKrate,
BorrowCheck(DefId),
RvalueCheck(DefId),
Reachability,
MirKeys,
TransWriteMetadata,
CrateVariances,
[] MirConstQualif(DefId),
[] MirConst(DefId),
[] MirValidated(DefId),
[] MirOptimized(DefId),
[] MirShim { instance_def: InstanceDef<'tcx> },
[] BorrowCheckKrate,
[] BorrowCheck(DefId),
[] RvalueCheck(DefId),
[] Reachability,
[] MirKeys,
[] TransWriteMetadata,
[] CrateVariances,
// Nodes representing bits of computed IR in the tcx. Each shared
// table in the tcx (or elsewhere) maps to one of these
// nodes. Often we map multiple tables to the same node if there
// is no point in distinguishing them (e.g., both the type and
// predicates for an item wind up in `ItemSignature`).
AssociatedItems(DefId),
ItemSignature(DefId),
ItemVarianceConstraints(DefId),
ItemVariances(DefId),
IsConstFn(DefId),
IsForeignItem(DefId),
TypeParamPredicates { item_id: DefId, param_id: DefId },
SizedConstraint(DefId),
DtorckConstraint(DefId),
AdtDestructor(DefId),
AssociatedItemDefIds(DefId),
InherentImpls(DefId),
TypeckBodiesKrate,
TypeckTables(DefId),
ConstEval(DefId),
SymbolName(DefId),
SpecializationGraph(DefId),
ObjectSafety(DefId),
IsCopy(DefId),
IsSized(DefId),
IsFreeze(DefId),
NeedsDrop(DefId),
Layout(DefId),
// The set of impls for a given trait. Ultimately, it would be
// nice to get more fine-grained here (e.g., to include a
// simplified type), but we can't do that until we restructure the
// HIR to distinguish the *header* of an impl from its body. This
// is because changes to the header may change the self-type of
// the impl and hence would require us to be more conservative
// than changes in the impl body.
TraitImpls(DefId),
AllLocalTraitImpls,
// nodes.
[] AssociatedItems(DefId),
[] TypeOfItem(DefId),
[] GenericsOfItem(DefId),
[] PredicatesOfItem(DefId),
[] SuperPredicatesOfItem(DefId),
[] TraitDefOfItem(DefId),
[] AdtDefOfItem(DefId),
[] IsDefaultImpl(DefId),
[] ImplTraitRef(DefId),
[] ImplPolarity(DefId),
[] ClosureKind(DefId),
[] FnSignature(DefId),
[] CoerceUnsizedInfo(DefId),
[] ItemVarianceConstraints(DefId),
[] ItemVariances(DefId),
[] IsConstFn(DefId),
[] IsForeignItem(DefId),
[] TypeParamPredicates { item_id: DefId, param_id: DefId },
[] SizedConstraint(DefId),
[] DtorckConstraint(DefId),
[] AdtDestructor(DefId),
[] AssociatedItemDefIds(DefId),
[] InherentImpls(DefId),
[] TypeckBodiesKrate,
[] TypeckTables(DefId),
[] HasTypeckTables(DefId),
[] ConstEval { def_id: DefId, substs: &'tcx Substs<'tcx> },
[] SymbolName(DefId),
[] InstanceSymbolName { instance: Instance<'tcx> },
[] SpecializationGraph(DefId),
[] ObjectSafety(DefId),
[anon] IsCopy(DefId),
[anon] IsSized(DefId),
[anon] IsFreeze(DefId),
[anon] NeedsDrop(DefId),
[anon] Layout(DefId),
// The set of impls for a given trait.
[] TraitImpls(DefId),
[] RelevantTraitImpls(DefId, SimplifiedType),
[] AllLocalTraitImpls,
// Nodes representing caches. To properly handle a true cache, we
// don't use a DepTrackingMap, but rather we push a task node.
// Otherwise the write into the map would be incorrectly
// attributed to the first task that happened to fill the cache,
// which would yield an overly conservative dep-graph.
TraitItems(DefId),
ReprHints(DefId),
[] TraitItems(DefId),
[] ReprHints(DefId),
// Trait selection cache is a little funny. Given a trait
// reference like `Foo: SomeTrait<Bar>`, there could be
......@@ -458,35 +495,45 @@ pub fn to_dep_node(self, tcx: TyCtxt, kind: DepKind) -> DepNode {
// imprecision in our dep-graph tracking. The important thing is
// that for any given trait-ref, we always map to the **same**
// trait-select node.
TraitSelect { trait_def_id: DefId, input_def_id: DefId },
[] TraitSelect { trait_def_id: DefId, input_def_id: DefId },
// For proj. cache, we just keep a list of all def-ids, since it is
// not a hotspot.
ProjectionCache { def_ids: DefIdList },
ParamEnv(DefId),
DescribeDef(DefId),
DefSpan(DefId),
Stability(DefId),
Deprecation(DefId),
ItemBodyNestedBodies(DefId),
ConstIsRvaluePromotableToStatic(DefId),
ImplParent(DefId),
TraitOfItem(DefId),
IsExportedSymbol(DefId),
IsMirAvailable(DefId),
ItemAttrs(DefId),
FnArgNames(DefId),
DylibDepFormats(DefId),
IsAllocator(DefId),
IsPanicRuntime(DefId),
ExternCrate(DefId),
[] ProjectionCache { def_ids: DefIdList },
[] ParamEnv(DefId),
[] DescribeDef(DefId),
[] DefSpan(DefId),
[] Stability(DefId),
[] Deprecation(DefId),
[] ItemBodyNestedBodies(DefId),
[] ConstIsRvaluePromotableToStatic(DefId),
[] ImplParent(DefId),
[] TraitOfItem(DefId),
[] IsExportedSymbol(DefId),
[] IsMirAvailable(DefId),
[] ItemAttrs(DefId),
[] FnArgNames(DefId),
[] DylibDepFormats(DefId),
[] IsAllocator(DefId),
[] IsPanicRuntime(DefId),
[] ExternCrate(DefId),
);
trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> {
trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug {
const CAN_RECONSTRUCT_QUERY_KEY: bool;
fn to_fingerprint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Fingerprint;
fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String;
/// This method turns the parameters of a DepNodeConstructor into an opaque
/// Fingerprint to be used in DepNode.
/// Not all DepNodeParams support being turned into a Fingerprint (they
/// don't need to if the corresponding DepNode is anonymous).
fn to_fingerprint(&self, _: TyCtxt<'a, 'gcx, 'tcx>) -> Fingerprint {
panic!("Not implemented. Accidentally called on anonymous node?")
}
fn to_debug_str(&self, _: TyCtxt<'a, 'gcx, 'tcx>) -> String {
format!("{:?}", self)
}
}
impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a, T> DepNodeParams<'a, 'gcx, 'tcx> for T
......
......@@ -8,25 +8,35 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ich::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use super::{DepGraphQuery, DepNode};
use rustc_data_structures::stable_hasher::StableHasher;
use std::env;
use std::hash::Hash;
use std::mem;
use super::{DepGraphQuery, DepKind, DepNode};
use super::debug::EdgeFilter;
pub struct DepGraphEdges {
nodes: Vec<DepNode>,
indices: FxHashMap<DepNode, IdIndex>,
edges: FxHashSet<(IdIndex, IdIndex)>,
open_nodes: Vec<OpenNode>,
indices: FxHashMap<DepNode, DepNodeIndex>,
edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>,
task_stack: Vec<OpenTask>,
forbidden_edge: Option<EdgeFilter>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct IdIndex {
pub struct DepNodeIndex {
index: u32
}
impl IdIndex {
fn new(v: usize) -> IdIndex {
impl DepNodeIndex {
pub const INVALID: DepNodeIndex = DepNodeIndex { index: ::std::u32::MAX };
fn new(v: usize) -> DepNodeIndex {
assert!((v & 0xFFFF_FFFF) == v);
IdIndex { index: v as u32 }
DepNodeIndex { index: v as u32 }
}
fn index(self) -> usize {
......@@ -35,67 +45,136 @@ fn index(self) -> usize {
}
#[derive(Clone, Debug, PartialEq)]
enum OpenNode {
Node(IdIndex),
enum OpenTask {
Regular {
node: DepNode,
reads: Vec<DepNode>,
read_set: FxHashSet<DepNode>,
},
Anon {
reads: Vec<DepNode>,
read_set: FxHashSet<DepNode>,
},
Ignore,
}
impl DepGraphEdges {
pub fn new() -> DepGraphEdges {
let forbidden_edge = if cfg!(debug_assertions) {
match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
Ok(s) => {
match EdgeFilter::new(&s) {
Ok(f) => Some(f),
Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
}
}
Err(_) => None,
}
} else {
None
};
DepGraphEdges {
nodes: vec![],
indices: FxHashMap(),
edges: FxHashSet(),
open_nodes: Vec::new()
task_stack: Vec::new(),
forbidden_edge,
}
}
fn id(&self, index: IdIndex) -> DepNode {
self.nodes[index.index()].clone()
}
/// Creates a node for `id` in the graph.
fn make_node(&mut self, id: DepNode) -> IdIndex {
if let Some(&i) = self.indices.get(&id) {
return i;
}
let index = IdIndex::new(self.nodes.len());
self.nodes.push(id.clone());
self.indices.insert(id, index);
index
}
/// Top of the stack of open nodes.
fn current_node(&self) -> Option<OpenNode> {
self.open_nodes.last().cloned()
fn id(&self, index: DepNodeIndex) -> DepNode {
self.nodes[index.index()]
}
pub fn push_ignore(&mut self) {
self.open_nodes.push(OpenNode::Ignore);
self.task_stack.push(OpenTask::Ignore);
}
pub fn pop_ignore(&mut self) {
let popped_node = self.open_nodes.pop().unwrap();
assert_eq!(popped_node, OpenNode::Ignore);
let popped_node = self.task_stack.pop().unwrap();
debug_assert_eq!(popped_node, OpenTask::Ignore);
}
pub fn push_task(&mut self, key: DepNode) {
let top_node = self.current_node();
self.task_stack.push(OpenTask::Regular {
node: key,
reads: Vec::new(),
read_set: FxHashSet(),
});
}
let new_node = self.make_node(key);
self.open_nodes.push(OpenNode::Node(new_node));
pub fn pop_task(&mut self, key: DepNode) -> DepNodeIndex {
let popped_node = self.task_stack.pop().unwrap();
// if we are in the midst of doing task T, then this new task
// N is a subtask of T, so add an edge N -> T.
if let Some(top_node) = top_node {
self.add_edge_from_open_node(top_node, |t| (new_node, t));
if let OpenTask::Regular {
node,
read_set: _,
reads
} = popped_node {
debug_assert_eq!(node, key);
let target_id = self.get_or_create_node(node);
for read in reads.into_iter() {
let source_id = self.get_or_create_node(read);
self.edges.insert((source_id, target_id));
}
target_id
} else {
bug!("pop_task() - Expected regular task to be popped")
}
}
pub fn pop_task(&mut self, key: DepNode) {
let popped_node = self.open_nodes.pop().unwrap();
assert_eq!(OpenNode::Node(self.indices[&key]), popped_node);
pub fn push_anon_task(&mut self) {
self.task_stack.push(OpenTask::Anon {
reads: Vec::new(),
read_set: FxHashSet(),
});
}
pub fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndex {
let popped_node = self.task_stack.pop().unwrap();
if let OpenTask::Anon {
read_set: _,
reads
} = popped_node {
let mut fingerprint = Fingerprint::zero();
let mut hasher = StableHasher::new();
for read in reads.iter() {
mem::discriminant(&read.kind).hash(&mut hasher);
// Fingerprint::combine() is faster than sending Fingerprint
// through the StableHasher (at least as long as StableHasher
// is so slow).
fingerprint = fingerprint.combine(read.hash);
}
fingerprint = fingerprint.combine(hasher.finish());
let target_dep_node = DepNode {
kind,
hash: fingerprint,
};
if let Some(&index) = self.indices.get(&target_dep_node) {
return index;
}
let target_id = self.get_or_create_node(target_dep_node);
for read in reads.into_iter() {
let source_id = self.get_or_create_node(read);
self.edges.insert((source_id, target_id));
}
target_id
} else {
bug!("pop_anon_task() - Expected anonymous task to be popped")
}
}
/// Indicates that the current task `C` reads `v` by adding an
......@@ -103,58 +182,42 @@ pub fn pop_task(&mut self, key: DepNode) {
/// effect. Note that *reading* from tracked state is harmless if
/// you are not in a task; what is bad is *writing* to tracked
/// state (and leaking data that you read into a tracked task).
pub fn read(&mut self, v: DepNode) {
if self.current_node().is_some() {
let source = self.make_node(v);
self.add_edge_from_current_node(|current| (source, current))
}
}
/// Indicates that the current task `C` writes `v` by adding an
/// edge from `C` to `v`. If there is no current task, panics. If
/// you want to suppress this edge, use `ignore`.
pub fn write(&mut self, v: DepNode) {
let target = self.make_node(v);
self.add_edge_from_current_node(|current| (current, target))
}
/// Invoke `add_edge_from_open_node` with the top of the stack, or
/// panic if stack is empty.
fn add_edge_from_current_node<OP>(&mut self,
op: OP)
where OP: FnOnce(IdIndex) -> (IdIndex, IdIndex)
{
match self.current_node() {
Some(open_node) => self.add_edge_from_open_node(open_node, op),
None => bug!("no current node, cannot add edge into dependency graph")
pub fn read(&mut self, source: DepNode) {
match self.task_stack.last_mut() {
Some(&mut OpenTask::Regular {
node: target,
ref mut reads,
ref mut read_set,
}) => {
if read_set.insert(source) {
reads.push(source);
if cfg!(debug_assertions) {
if let Some(ref forbidden_edge) = self.forbidden_edge {
if forbidden_edge.test(&source, &target) {
bug!("forbidden edge {:?} -> {:?} created", source, target)
}
}
}
}
}
Some(&mut OpenTask::Anon {
ref mut reads,
ref mut read_set,
}) => {
if read_set.insert(source) {
reads.push(source);
}
}
Some(&mut OpenTask::Ignore) | None => {
// ignore
}
}
}
/// Adds an edge to or from the `open_node`, assuming `open_node`
/// is not `Ignore`. The direction of the edge is determined by
/// the closure `op` --- we pass as argument the open node `n`,
/// and the closure returns a (source, target) tuple, which should
/// include `n` in one spot or another.
fn add_edge_from_open_node<OP>(&mut self,
open_node: OpenNode,
op: OP)
where OP: FnOnce(IdIndex) -> (IdIndex, IdIndex)
{
let (source, target) = match open_node {
OpenNode::Node(n) => op(n),
OpenNode::Ignore => { return; }
};
// ignore trivial self edges, which are not very interesting
if source == target {
return;
}
if self.edges.insert((source, target)) {
debug!("adding edge from {:?} to {:?}",
self.id(source),
self.id(target));
}
pub fn read_index(&mut self, source: DepNodeIndex) {
let dep_node = self.nodes[source.index()];
self.read(dep_node);
}
pub fn query(&self) -> DepGraphQuery {
......@@ -163,4 +226,30 @@ pub fn query(&self) -> DepGraphQuery {
.collect();
DepGraphQuery::new(&self.nodes, &edges)
}
#[inline]
pub fn add_edge(&mut self, source: DepNode, target: DepNode) {
let source = self.get_or_create_node(source);
let target = self.get_or_create_node(target);
self.edges.insert((source, target));
}
pub fn add_node(&mut self, node: DepNode) {
self.get_or_create_node(node);
}
#[inline]
fn get_or_create_node(&mut self, dep_node: DepNode) -> DepNodeIndex {
let DepGraphEdges {
ref mut indices,
ref mut nodes,
..
} = *self;
*indices.entry(dep_node).or_insert_with(|| {
let next_id = nodes.len();
nodes.push(dep_node);
DepNodeIndex::new(next_id)
})
}
}
......@@ -13,21 +13,20 @@
use std::cell::{Ref, RefCell};
use std::rc::Rc;
use super::dep_node::{DepNode, WorkProductId};
use super::dep_node::{DepNode, DepKind, WorkProductId};
use super::query::DepGraphQuery;
use super::raii;
use super::safe::DepGraphSafe;
use super::thread::{DepGraphThreadData, DepMessage};
use super::edges::{DepGraphEdges, DepNodeIndex};
#[derive(Clone)]
pub struct DepGraph {
data: Rc<DepGraphData>
data: Option<Rc<DepGraphData>>
}
struct DepGraphData {
/// We send messages to the thread to let it build up the dep-graph
/// from the current run.
thread: DepGraphThreadData,
/// The actual graph data.
edges: RefCell<DepGraphEdges>,
/// When we load, there may be `.o` files, cached mir, or other such
/// things available to us. If we find that they are not dirty, we
......@@ -44,31 +43,35 @@ struct DepGraphData {
impl DepGraph {
pub fn new(enabled: bool) -> DepGraph {
DepGraph {
data: Rc::new(DepGraphData {
thread: DepGraphThreadData::new(enabled),
previous_work_products: RefCell::new(FxHashMap()),
work_products: RefCell::new(FxHashMap()),
dep_node_debug: RefCell::new(FxHashMap()),
})
data: if enabled {
Some(Rc::new(DepGraphData {
previous_work_products: RefCell::new(FxHashMap()),
work_products: RefCell::new(FxHashMap()),
edges: RefCell::new(DepGraphEdges::new()),
dep_node_debug: RefCell::new(FxHashMap()),
}))
} else {
None
}
}
}
/// True if we are actually building the full dep-graph.
#[inline]
pub fn is_fully_enabled(&self) -> bool {
self.data.thread.is_fully_enabled()
self.data.is_some()
}
pub fn query(&self) -> DepGraphQuery {
self.data.thread.query()
self.data.as_ref().unwrap().edges.borrow().query()
}
pub fn in_ignore<'graph>(&'graph self) -> Option<raii::IgnoreTask<'graph>> {
raii::IgnoreTask::new(&self.data.thread)
self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.edges))
}
pub fn in_task<'graph>(&'graph self, key: DepNode) -> Option<raii::DepTask<'graph>> {
raii::DepTask::new(&self.data.thread, key)
self.data.as_ref().map(|data| raii::DepTask::new(&data.edges, key))
}
pub fn with_ignore<OP,R>(&self, op: OP) -> R
......@@ -105,26 +108,75 @@ pub fn with_ignore<OP,R>(&self, op: OP) -> R
/// `arg` parameter.
///
/// [README]: README.md
pub fn with_task<C, A, R>(&self, key: DepNode, cx: C, arg: A, task: fn(C, A) -> R) -> R
where C: DepGraphSafe, A: DepGraphSafe
pub fn with_task<C, A, R>(&self,
key: DepNode,
cx: C,
arg: A,
task: fn(C, A) -> R)
-> (R, DepNodeIndex)
where C: DepGraphSafe
{
let _task = self.in_task(key);
task(cx, arg)
if let Some(ref data) = self.data {
data.edges.borrow_mut().push_task(key);
let result = task(cx, arg);
let dep_node_index = data.edges.borrow_mut().pop_task(key);
(result, dep_node_index)
} else {
(task(cx, arg), DepNodeIndex::INVALID)
}
}
/// Execute something within an "anonymous" task, that is, a task the
/// DepNode of which is determined by the list of inputs it read from.
pub fn with_anon_task<OP,R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex)
where OP: FnOnce() -> R
{
if let Some(ref data) = self.data {
data.edges.borrow_mut().push_anon_task();
let result = op();
let dep_node = data.edges.borrow_mut().pop_anon_task(dep_kind);
(result, dep_node)
} else {
(op(), DepNodeIndex::INVALID)
}
}
#[inline]
pub fn read(&self, v: DepNode) {
if self.data.thread.is_enqueue_enabled() {
self.data.thread.enqueue(DepMessage::Read(v));
if let Some(ref data) = self.data {
data.edges.borrow_mut().read(v);
}
}
#[inline]
pub fn read_index(&self, v: DepNodeIndex) {
if let Some(ref data) = self.data {
data.edges.borrow_mut().read_index(v);
}
}
/// Only to be used during graph loading
#[inline]
pub fn add_edge_directly(&self, source: DepNode, target: DepNode) {
self.data.as_ref().unwrap().edges.borrow_mut().add_edge(source, target);
}
/// Only to be used during graph loading
pub fn add_node_directly(&self, node: DepNode) {
self.data.as_ref().unwrap().edges.borrow_mut().add_node(node);
}
/// Indicates that a previous work product exists for `v`. This is
/// invoked during initial start-up based on what nodes are clean
/// (and what files exist in the incr. directory).
pub fn insert_previous_work_product(&self, v: &WorkProductId, data: WorkProduct) {
debug!("insert_previous_work_product({:?}, {:?})", v, data);
self.data.previous_work_products.borrow_mut()
.insert(v.clone(), data);
self.data
.as_ref()
.unwrap()
.previous_work_products
.borrow_mut()
.insert(v.clone(), data);
}
/// Indicates that we created the given work-product in this run
......@@ -132,44 +184,50 @@ pub fn insert_previous_work_product(&self, v: &WorkProductId, data: WorkProduct)
/// run.
pub fn insert_work_product(&self, v: &WorkProductId, data: WorkProduct) {
debug!("insert_work_product({:?}, {:?})", v, data);
self.data.work_products.borrow_mut()
.insert(v.clone(), data);
self.data
.as_ref()
.unwrap()
.work_products
.borrow_mut()
.insert(v.clone(), data);
}
/// Check whether a previous work product exists for `v` and, if
/// so, return the path that leads to it. Used to skip doing work.
pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
self.data.previous_work_products.borrow()
.get(v)
.cloned()
self.data
.as_ref()
.and_then(|data| {
data.previous_work_products.borrow().get(v).cloned()
})
}
/// Access the map of work-products created during this run. Only
/// used during saving of the dep-graph.
pub fn work_products(&self) -> Ref<FxHashMap<WorkProductId, WorkProduct>> {
self.data.work_products.borrow()
self.data.as_ref().unwrap().work_products.borrow()
}
/// Access the map of work-products created during the cached run. Only
/// used during saving of the dep-graph.
pub fn previous_work_products(&self) -> Ref<FxHashMap<WorkProductId, WorkProduct>> {
self.data.previous_work_products.borrow()
self.data.as_ref().unwrap().previous_work_products.borrow()
}
#[inline(always)]
pub(super) fn register_dep_node_debug_str<F>(&self,
dep_node: DepNode,
debug_str_gen: F)
pub fn register_dep_node_debug_str<F>(&self,
dep_node: DepNode,
debug_str_gen: F)
where F: FnOnce() -> String
{
let mut dep_node_debug = self.data.dep_node_debug.borrow_mut();
let mut dep_node_debug = self.data.as_ref().unwrap().dep_node_debug.borrow_mut();
dep_node_debug.entry(dep_node)
.or_insert_with(debug_str_gen);
}
pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
self.data.dep_node_debug.borrow().get(&dep_node).cloned()
self.data.as_ref().and_then(|t| t.dep_node_debug.borrow().get(&dep_node).cloned())
}
}
......@@ -206,6 +264,7 @@ pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
/// previous hash. If it matches up, we can reuse the object file.
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct WorkProduct {
pub cgu_name: String,
/// Extra hash used to decide if work-product is still suitable;
/// note that this is *not* a hash of the work-product itself.
/// See documentation on `WorkProduct` type for an example.
......
......@@ -16,14 +16,13 @@
mod query;
mod raii;
mod safe;
mod shadow;
mod thread;
pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
pub use self::dep_node::DepNode;
pub use self::dep_node::WorkProductId;
pub use self::graph::DepGraph;
pub use self::graph::WorkProduct;
pub use self::edges::DepNodeIndex;
pub use self::query::DepGraphQuery;
pub use self::safe::AssertDepGraphSafe;
pub use self::safe::DepGraphSafe;
......
......@@ -9,53 +9,49 @@
// except according to those terms.
use super::DepNode;
use super::thread::{DepGraphThreadData, DepMessage};
use super::edges::DepGraphEdges;
use std::cell::RefCell;
pub struct DepTask<'graph> {
data: &'graph DepGraphThreadData,
key: Option<DepNode>,
graph: &'graph RefCell<DepGraphEdges>,
key: DepNode,
}
impl<'graph> DepTask<'graph> {
pub fn new(data: &'graph DepGraphThreadData, key: DepNode)
-> Option<DepTask<'graph>> {
if data.is_enqueue_enabled() {
data.enqueue(DepMessage::PushTask(key.clone()));
Some(DepTask { data: data, key: Some(key) })
} else {
None
pub fn new(graph: &'graph RefCell<DepGraphEdges>,
key: DepNode)
-> DepTask<'graph> {
graph.borrow_mut().push_task(key);
DepTask {
graph,
key,
}
}
}
impl<'graph> Drop for DepTask<'graph> {
fn drop(&mut self) {
if self.data.is_enqueue_enabled() {
self.data.enqueue(DepMessage::PopTask(self.key.take().unwrap()));
}
self.graph.borrow_mut().pop_task(self.key);
}
}
pub struct IgnoreTask<'graph> {
data: &'graph DepGraphThreadData
graph: &'graph RefCell<DepGraphEdges>,
}
impl<'graph> IgnoreTask<'graph> {
pub fn new(data: &'graph DepGraphThreadData) -> Option<IgnoreTask<'graph>> {
if data.is_enqueue_enabled() {
data.enqueue(DepMessage::PushIgnore);
Some(IgnoreTask { data: data })
} else {
None
pub fn new(graph: &'graph RefCell<DepGraphEdges>) -> IgnoreTask<'graph> {
graph.borrow_mut().push_ignore();
IgnoreTask {
graph
}
}
}
impl<'graph> Drop for IgnoreTask<'graph> {
fn drop(&mut self) {
if self.data.is_enqueue_enabled() {
self.data.enqueue(DepMessage::PopIgnore);
}
self.graph.borrow_mut().pop_ignore();
}
}
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The "Shadow Graph" is maintained on the main thread and which
//! tracks each message relating to the dep-graph and applies some
//! sanity checks as they go by. If an error results, it means you get
//! a nice stack-trace telling you precisely what caused the error.
//!
//! NOTE: This is a debugging facility which can potentially have non-trivial
//! runtime impact. Therefore, it is largely compiled out if
//! debug-assertions are not enabled.
//!
//! The basic sanity check, enabled if you have debug assertions
//! enabled, is that there is always a task (or ignore) on the stack
//! when you do read/write, and that the tasks are pushed/popped
//! according to a proper stack discipline.
//!
//! Optionally, if you specify RUST_FORBID_DEP_GRAPH_EDGE, you can
//! specify an edge filter to be applied to each edge as it is
//! created. See `./README.md` for details.
use std::cell::RefCell;
use std::env;
use super::DepNode;
use super::thread::DepMessage;
use super::debug::EdgeFilter;
pub struct ShadowGraph {
// if you push None onto the stack, that corresponds to an Ignore
stack: RefCell<Vec<Option<DepNode>>>,
forbidden_edge: Option<EdgeFilter>,
}
const ENABLED: bool = cfg!(debug_assertions);
impl ShadowGraph {
pub fn new() -> Self {
let forbidden_edge = if !ENABLED {
None
} else {
match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
Ok(s) => {
match EdgeFilter::new(&s) {
Ok(f) => Some(f),
Err(err) => bug!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
}
}
Err(_) => None,
}
};
ShadowGraph {
stack: RefCell::new(vec![]),
forbidden_edge,
}
}
#[inline]
pub fn enabled(&self) -> bool {
ENABLED
}
pub fn enqueue(&self, message: &DepMessage) {
if ENABLED {
if self.stack.try_borrow().is_err() {
// When we apply edge filters, that invokes the Debug trait on
// DefIds, which in turn reads from various bits of state and
// creates reads! Ignore those recursive reads.
return;
}
let mut stack = self.stack.borrow_mut();
match *message {
// It is ok to READ shared state outside of a
// task. That can't do any harm (at least, the only
// way it can do harm is by leaking that data into a
// query or task, which would be a problem
// anyway). What would be bad is WRITING to that
// state.
DepMessage::Read(_) => { }
DepMessage::Write(ref n) => self.check_edge(top(&stack), Some(Some(n))),
DepMessage::PushTask(ref n) => stack.push(Some(n.clone())),
DepMessage::PushIgnore => stack.push(None),
DepMessage::PopTask(ref n) => {
match stack.pop() {
Some(Some(m)) => {
if *n != m {
bug!("stack mismatch: found {:?} expected {:?}", m, n)
}
}
Some(None) => bug!("stack mismatch: found Ignore expected {:?}", n),
None => bug!("stack mismatch: found empty stack, expected {:?}", n),
}
}
DepMessage::PopIgnore => {
match stack.pop() {
Some(Some(m)) => bug!("stack mismatch: found {:?} expected ignore", m),
Some(None) => (),
None => bug!("stack mismatch: found empty stack, expected ignore"),
}
}
DepMessage::Query => (),
}
}
}
fn check_edge(&self,
source: Option<Option<&DepNode>>,
target: Option<Option<&DepNode>>) {
assert!(ENABLED);
match (source, target) {
// cannot happen, one side is always Some(Some(_))
(None, None) => unreachable!(),
// nothing on top of the stack
(None, Some(n)) | (Some(n), None) => bug!("write of {:?} but no current task", n),
// this corresponds to an Ignore being top of the stack
(Some(None), _) | (_, Some(None)) => (),
// a task is on top of the stack
(Some(Some(source)), Some(Some(target))) => {
if let Some(ref forbidden_edge) = self.forbidden_edge {
if forbidden_edge.test(source, target) {
bug!("forbidden edge {:?} -> {:?} created", source, target)
}
}
}
}
}
}
// Do a little juggling: we get back a reference to an option at the
// top of the stack, convert it to an optional reference.
fn top<'s>(stack: &'s Vec<Option<DepNode>>) -> Option<Option<&'s DepNode>> {
stack.last()
.map(|n: &'s Option<DepNode>| -> Option<&'s DepNode> {
// (*)
// (*) type annotation just there to clarify what would
// otherwise be some *really* obscure code
n.as_ref()
})
}
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Manages the communication between the compiler's main thread and
//! the thread that constructs the dependency graph. The basic idea is
//! to use double buffering to lower the cost of producing a message.
//! In the compiler thread, we accumulate messages in a vector until
//! the vector is full, or until we want to query the graph, and then
//! we send that vector over to the depgraph thread. At the same time,
//! we receive an empty vector from the depgraph thread that we can use
//! to accumulate more messages. This way we only ever have two vectors
//! allocated (and both have a fairly large capacity).
use rustc_data_structures::veccell::VecCell;
use std::sync::mpsc::{self, Sender, Receiver};
use std::thread;
use super::DepGraphQuery;
use super::DepNode;
use super::edges::DepGraphEdges;
use super::shadow::ShadowGraph;
#[derive(Debug)]
pub enum DepMessage {
Read(DepNode),
Write(DepNode),
PushTask(DepNode),
PopTask(DepNode),
PushIgnore,
PopIgnore,
Query,
}
pub struct DepGraphThreadData {
enabled: bool,
// The "shadow graph" is a debugging aid. We give it each message
// in real time as it arrives and it checks for various errors
// (for example, a read/write when there is no current task; it
// can also apply user-defined filters; see `shadow` module for
// details). This only occurs if debug-assertions are enabled.
//
// Note that in some cases the same errors will occur when the
// data is processed off the main thread, but that's annoying
// because it lacks precision about the source of the error.
shadow_graph: ShadowGraph,
// current buffer, where we accumulate messages
messages: VecCell<DepMessage>,
// where to receive new buffer when full
swap_in: Receiver<Vec<DepMessage>>,
// where to send buffer when full
swap_out: Sender<Vec<DepMessage>>,
// where to receive query results
query_in: Receiver<DepGraphQuery>,
}
const INITIAL_CAPACITY: usize = 2048;
impl DepGraphThreadData {
pub fn new(enabled: bool) -> DepGraphThreadData {
let (tx1, rx1) = mpsc::channel();
let (tx2, rx2) = mpsc::channel();
let (txq, rxq) = mpsc::channel();
if enabled {
thread::spawn(move || main(rx1, tx2, txq));
}
DepGraphThreadData {
enabled,
shadow_graph: ShadowGraph::new(),
messages: VecCell::with_capacity(INITIAL_CAPACITY),
swap_in: rx2,
swap_out: tx1,
query_in: rxq,
}
}
/// True if we are actually building the full dep-graph.
#[inline]
pub fn is_fully_enabled(&self) -> bool {
self.enabled
}
/// True if (a) we are actually building the full dep-graph, or (b) we are
/// only enqueuing messages in order to sanity-check them (which happens
/// when debug assertions are enabled).
#[inline]
pub fn is_enqueue_enabled(&self) -> bool {
self.is_fully_enabled() || self.shadow_graph.enabled()
}
/// Sends the current batch of messages to the thread. Installs a
/// new vector of messages.
fn swap(&self) {
assert!(self.is_fully_enabled(), "should never swap if not fully enabled");
// should be a buffer waiting for us (though of course we may
// have to wait for depgraph thread to finish processing the
// old messages)
let new_messages = self.swap_in.recv().unwrap();
assert!(new_messages.is_empty());
// swap in the empty buffer and extract the full one
let old_messages = self.messages.swap(new_messages);
// send full buffer to depgraph thread to be processed
self.swap_out.send(old_messages).unwrap();
}
pub fn query(&self) -> DepGraphQuery {
assert!(self.is_fully_enabled(), "should never query if not fully enabled");
self.enqueue(DepMessage::Query);
self.swap();
self.query_in.recv().unwrap()
}
/// Enqueue a message to be sent when things are next swapped. (If
/// the buffer is full, this may swap.)
#[inline]
pub fn enqueue(&self, message: DepMessage) {
assert!(self.is_enqueue_enabled(), "should never enqueue if not enqueue-enabled");
self.shadow_graph.enqueue(&message);
if self.is_fully_enabled() {
self.enqueue_enabled(message);
}
}
// Outline this fn since I expect it may want to be inlined
// separately.
fn enqueue_enabled(&self, message: DepMessage) {
let len = self.messages.push(message);
if len == INITIAL_CAPACITY {
self.swap();
}
}
}
/// Definition of the depgraph thread.
pub fn main(swap_in: Receiver<Vec<DepMessage>>,
swap_out: Sender<Vec<DepMessage>>,
query_out: Sender<DepGraphQuery>) {
let mut edges = DepGraphEdges::new();
// the compiler thread always expects a fresh buffer to be
// waiting, so queue one up
swap_out.send(Vec::with_capacity(INITIAL_CAPACITY)).unwrap();
// process the buffers from compiler thread as we receive them
for mut messages in swap_in {
for msg in messages.drain(..) {
match msg {
DepMessage::Read(node) => edges.read(node),
DepMessage::Write(node) => edges.write(node),
DepMessage::PushTask(node) => edges.push_task(node),
DepMessage::PopTask(node) => edges.pop_task(node),
DepMessage::PushIgnore => edges.push_ignore(),
DepMessage::PopIgnore => edges.pop_ignore(),
DepMessage::Query => query_out.send(edges.query()).unwrap(),
}
}
if let Err(_) = swap_out.send(messages) {
// the receiver must have been dropped already
break;
}
}
}
......@@ -661,3 +661,60 @@ fn hash_stable<W: StableHasherResult>(&self,
})
}
}
impl_stable_hash_for!(enum ty::fast_reject::SimplifiedType {
BoolSimplifiedType,
CharSimplifiedType,
IntSimplifiedType(int_ty),
UintSimplifiedType(int_ty),
FloatSimplifiedType(float_ty),
AdtSimplifiedType(def_id),
StrSimplifiedType,
ArraySimplifiedType,
PtrSimplifiedType,
NeverSimplifiedType,
TupleSimplifiedType(size),
TraitSimplifiedType(def_id),
ClosureSimplifiedType(def_id),
AnonSimplifiedType(def_id),
FunctionSimplifiedType(params),
ParameterSimplifiedType
});
impl_stable_hash_for!(struct ty::Instance<'tcx> {
def,
substs
});
impl<'a, 'gcx, 'tcx> HashStable<StableHashingContext<'a, 'gcx, 'tcx>> for ty::InstanceDef<'tcx> {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'a, 'gcx, 'tcx>,
hasher: &mut StableHasher<W>) {
mem::discriminant(self).hash_stable(hcx, hasher);
match *self {
ty::InstanceDef::Item(def_id) => {
def_id.hash_stable(hcx, hasher);
}
ty::InstanceDef::Intrinsic(def_id) => {
def_id.hash_stable(hcx, hasher);
}
ty::InstanceDef::FnPtrShim(def_id, ty) => {
def_id.hash_stable(hcx, hasher);
ty.hash_stable(hcx, hasher);
}
ty::InstanceDef::Virtual(def_id, n) => {
def_id.hash_stable(hcx, hasher);
n.hash_stable(hcx, hasher);
}
ty::InstanceDef::ClosureOnceShim { call_once } => {
call_once.hash_stable(hcx, hasher);
}
ty::InstanceDef::DropGlue(def_id, t) => {
def_id.hash_stable(hcx, hasher);
t.hash_stable(hcx, hasher);
}
}
}
}
......@@ -489,6 +489,12 @@ pub fn build_dep_graph(&self) -> bool {
self.debugging_opts.query_dep_graph
}
#[inline(always)]
pub fn enable_dep_node_debug_strs(&self) -> bool {
cfg!(debug_assertions) &&
(self.debugging_opts.query_dep_graph || self.debugging_opts.incremental_info)
}
pub fn single_codegen_unit(&self) -> bool {
self.incremental.is_none() ||
self.cg.codegen_units == 1
......
......@@ -8,7 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use dep_graph::DepConstructor;
use hir::def_id::DefId;
use ty::{self, Ty, TypeFoldable, Substs};
use util::ppaux;
......@@ -59,27 +58,6 @@ pub fn def_ty<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
pub fn attrs<'a>(&self, tcx: ty::TyCtxt<'a, 'tcx, 'tcx>) -> ty::Attributes<'tcx> {
tcx.get_attrs(self.def_id())
}
pub //(crate)
fn dep_node(&self) -> DepConstructor {
// HACK: def-id binning, project-style; someone replace this with
// real on-demand.
let ty = match self {
&InstanceDef::FnPtrShim(_, ty) => Some(ty),
&InstanceDef::DropGlue(_, ty) => ty,
_ => None
}.into_iter();
DepConstructor::MirShim(
Some(self.def_id()).into_iter().chain(
ty.flat_map(|t| t.walk()).flat_map(|t| match t.sty {
ty::TyAdt(adt_def, _) => Some(adt_def.did),
ty::TyProjection(ref proj) => Some(proj.trait_ref.def_id),
_ => None,
})
).collect()
)
}
}
impl<'tcx> fmt::Display for Instance<'tcx> {
......
......@@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use dep_graph::{DepConstructor, DepNode, DepTrackingMapConfig};
use dep_graph::{DepConstructor, DepNode, DepNodeIndex};
use hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefId, LOCAL_CRATE};
use hir::def::Def;
use hir;
......@@ -186,7 +186,7 @@ fn from_cycle_error<'a>(_: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
struct QueryMap<D: QueryDescription> {
phantom: PhantomData<D>,
map: FxHashMap<D::Key, D::Value>,
map: FxHashMap<D::Key, (D::Value, DepNodeIndex)>,
}
impl<M: QueryDescription> QueryMap<M> {
......@@ -261,11 +261,16 @@ fn cycle_check<F, R>(self, span: Span, query: Query<'gcx>, compute: F)
}
}
trait QueryDescription: DepTrackingMapConfig {
pub trait QueryConfig {
type Key: Eq + Hash + Clone;
type Value;
}
trait QueryDescription: QueryConfig {
fn describe(tcx: TyCtxt, key: Self::Key) -> String;
}
impl<M: DepTrackingMapConfig<Key=DefId>> QueryDescription for M {
impl<M: QueryConfig<Key=DefId>> QueryDescription for M {
default fn describe(tcx: TyCtxt, def_id: DefId) -> String {
format!("processing `{}`", tcx.item_path_str(def_id))
}
......@@ -550,18 +555,19 @@ pub struct $name<$tcx> {
})*
}
$(impl<$tcx> DepTrackingMapConfig for queries::$name<$tcx> {
$(impl<$tcx> QueryConfig for queries::$name<$tcx> {
type Key = $K;
type Value = $V;
}
impl<'a, $tcx, 'lcx> queries::$name<$tcx> {
#[allow(unused)]
fn to_dep_node(tcx: TyCtxt, key: &$K) -> DepNode {
fn to_dep_node(tcx: TyCtxt<'a, $tcx, 'lcx>, key: &$K) -> DepNode {
use dep_graph::DepConstructor::*;
DepNode::new(tcx, $node(*key))
}
}
impl<'a, $tcx, 'lcx> queries::$name<$tcx> {
fn try_get_with<F, R>(tcx: TyCtxt<'a, $tcx, 'lcx>,
mut span: Span,
key: $K,
......@@ -574,7 +580,8 @@ fn try_get_with<F, R>(tcx: TyCtxt<'a, $tcx, 'lcx>,
key,
span);
if let Some(result) = tcx.maps.$name.borrow().map.get(&key) {
if let Some(&(ref result, dep_node_index)) = tcx.maps.$name.borrow().map.get(&key) {
tcx.dep_graph.read_index(dep_node_index);
return Ok(f(result));
}
......@@ -585,26 +592,46 @@ fn try_get_with<F, R>(tcx: TyCtxt<'a, $tcx, 'lcx>,
span = key.default_span(tcx)
}
let _task = tcx.dep_graph.in_task(Self::to_dep_node(tcx, &key));
let result = tcx.cycle_check(span, Query::$name(key), || {
let provider = tcx.maps.providers[key.map_crate()].$name;
provider(tcx.global_tcx(), key)
let (result, dep_node_index) = tcx.cycle_check(span, Query::$name(key), || {
let dep_node = Self::to_dep_node(tcx, &key);
if dep_node.kind.is_anon() {
tcx.dep_graph.with_anon_task(dep_node.kind, || {
let provider = tcx.maps.providers[key.map_crate()].$name;
provider(tcx.global_tcx(), key)
})
} else {
fn run_provider<'a, 'tcx, 'lcx>(tcx: TyCtxt<'a, 'tcx, 'lcx>,
key: $K)
-> $V {
let provider = tcx.maps.providers[key.map_crate()].$name;
provider(tcx.global_tcx(), key)
}
tcx.dep_graph.with_task(dep_node, tcx, key, run_provider)
}
})?;
Ok(f(tcx.maps.$name.borrow_mut().map.entry(key).or_insert(result)))
tcx.dep_graph.read_index(dep_node_index);
Ok(f(&tcx.maps
.$name
.borrow_mut()
.map
.entry(key)
.or_insert((result, dep_node_index))
.0))
}
pub fn try_get(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K)
-> Result<$V, CycleError<'a, $tcx>> {
// We register the `read` here, but not in `force`, since
// `force` does not give access to the value produced (and thus
// we actually don't read it).
tcx.dep_graph.read(Self::to_dep_node(tcx, &key));
Self::try_get_with(tcx, span, key, Clone::clone)
}
pub fn force(tcx: TyCtxt<'a, $tcx, 'lcx>, span: Span, key: $K) {
// Ignore dependencies, since we not reading the computed value
let _task = tcx.dep_graph.in_ignore();
match Self::try_get_with(tcx, span, key, |_| ()) {
Ok(()) => {}
Err(e) => tcx.report_cycle(e)
......@@ -796,12 +823,12 @@ fn default() -> Self {
// the driver creates (using several `rustc_*` crates).
define_maps! { <'tcx>
/// Records the type of every item.
[] type_of: ItemSignature(DefId) -> Ty<'tcx>,
[] type_of: TypeOfItem(DefId) -> Ty<'tcx>,
/// Maps from the def-id of an item (trait/struct/enum/fn) to its
/// associated generics and predicates.
[] generics_of: ItemSignature(DefId) -> &'tcx ty::Generics,
[] predicates_of: ItemSignature(DefId) -> ty::GenericPredicates<'tcx>,
[] generics_of: GenericsOfItem(DefId) -> &'tcx ty::Generics,
[] predicates_of: PredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>,
/// Maps from the def-id of a trait to the list of
/// super-predicates. This is a subset of the full list of
......@@ -809,15 +836,15 @@ fn default() -> Self {
/// evaluate them even during type conversion, often before the
/// full predicates are available (note that supertraits have
/// additional acyclicity requirements).
[] super_predicates_of: ItemSignature(DefId) -> ty::GenericPredicates<'tcx>,
[] super_predicates_of: SuperPredicatesOfItem(DefId) -> ty::GenericPredicates<'tcx>,
/// To avoid cycles within the predicates of a single item we compute
/// per-type-parameter predicates for resolving `T::AssocTy`.
[] type_param_predicates: type_param_predicates((DefId, DefId))
-> ty::GenericPredicates<'tcx>,
[] trait_def: ItemSignature(DefId) -> &'tcx ty::TraitDef,
[] adt_def: ItemSignature(DefId) -> &'tcx ty::AdtDef,
[] trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef,
[] adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef,
[] adt_destructor: AdtDestructor(DefId) -> Option<ty::Destructor>,
[] adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>],
[] adt_dtorck_constraint: DtorckConstraint(DefId) -> ty::DtorckConstraint<'tcx>,
......@@ -829,7 +856,7 @@ fn default() -> Self {
[] is_foreign_item: IsForeignItem(DefId) -> bool,
/// True if this is a default impl (aka impl Foo for ..)
[] is_default_impl: ItemSignature(DefId) -> bool,
[] is_default_impl: IsDefaultImpl(DefId) -> bool,
/// Get a map with the variance of every item; use `item_variance`
/// instead.
......@@ -845,8 +872,8 @@ fn default() -> Self {
/// Maps from a trait item to the trait item "descriptor"
[] associated_item: AssociatedItems(DefId) -> ty::AssociatedItem,
[] impl_trait_ref: ItemSignature(DefId) -> Option<ty::TraitRef<'tcx>>,
[] impl_polarity: ItemSignature(DefId) -> hir::ImplPolarity,
[] impl_trait_ref: ImplTraitRef(DefId) -> Option<ty::TraitRef<'tcx>>,
[] impl_polarity: ImplPolarity(DefId) -> hir::ImplPolarity,
/// Maps a DefId of a type to a list of its inherent impls.
/// Contains implementations of methods that are inherent to a type.
......@@ -861,36 +888,36 @@ fn default() -> Self {
/// Maps DefId's that have an associated Mir to the result
/// of the MIR qualify_consts pass. The actual meaning of
/// the value isn't known except to the pass itself.
[] mir_const_qualif: Mir(DefId) -> u8,
[] mir_const_qualif: MirConstQualif(DefId) -> u8,
/// Fetch the MIR for a given def-id up till the point where it is
/// ready for const evaluation.
///
/// See the README for the `mir` module for details.
[] mir_const: Mir(DefId) -> &'tcx Steal<mir::Mir<'tcx>>,
[] mir_const: MirConst(DefId) -> &'tcx Steal<mir::Mir<'tcx>>,
[] mir_validated: Mir(DefId) -> &'tcx Steal<mir::Mir<'tcx>>,
[] mir_validated: MirValidated(DefId) -> &'tcx Steal<mir::Mir<'tcx>>,
/// MIR after our optimization passes have run. This is MIR that is ready
/// for trans. This is also the only query that can fetch non-local MIR, at present.
[] optimized_mir: Mir(DefId) -> &'tcx mir::Mir<'tcx>,
[] optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>,
/// Type of each closure. The def ID is the ID of the
/// expression defining the closure.
[] closure_kind: ItemSignature(DefId) -> ty::ClosureKind,
[] closure_kind: ClosureKind(DefId) -> ty::ClosureKind,
/// The signature of functions and closures.
[] fn_sig: ItemSignature(DefId) -> ty::PolyFnSig<'tcx>,
[] fn_sig: FnSignature(DefId) -> ty::PolyFnSig<'tcx>,
/// Caches CoerceUnsized kinds for impls on custom types.
[] coerce_unsized_info: ItemSignature(DefId)
[] coerce_unsized_info: CoerceUnsizedInfo(DefId)
-> ty::adjustment::CoerceUnsizedInfo,
[] typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult,
[] typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>,
[] has_typeck_tables: TypeckTables(DefId) -> bool,
[] has_typeck_tables: HasTypeckTables(DefId) -> bool,
[] coherent_trait: coherent_trait_dep_node((CrateNum, DefId)) -> (),
......@@ -972,80 +999,81 @@ fn default() -> Self {
[] extern_crate: ExternCrate(DefId) -> Rc<Option<ExternCrate>>,
}
fn type_param_predicates((item_id, param_id): (DefId, DefId)) -> DepConstructor {
fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> {
DepConstructor::TypeParamPredicates {
item_id,
param_id
}
}
fn coherent_trait_dep_node((_, def_id): (CrateNum, DefId)) -> DepConstructor {
fn coherent_trait_dep_node<'tcx>((_, def_id): (CrateNum, DefId)) -> DepConstructor<'tcx> {
DepConstructor::CoherenceCheckTrait(def_id)
}
fn crate_inherent_impls_dep_node(_: CrateNum) -> DepConstructor {
fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::Coherence
}
fn reachability_dep_node(_: CrateNum) -> DepConstructor {
fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::Reachability
}
fn mir_shim_dep_node(instance: ty::InstanceDef) -> DepConstructor {
instance.dep_node()
fn mir_shim_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) -> DepConstructor<'tcx> {
DepConstructor::MirShim {
instance_def
}
}
fn symbol_name_dep_node(instance: ty::Instance) -> DepConstructor {
// symbol_name uses the substs only to traverse them to find the
// hash, and that does not create any new dep-nodes.
DepConstructor::SymbolName(instance.def.def_id())
fn symbol_name_dep_node<'tcx>(instance: ty::Instance<'tcx>) -> DepConstructor<'tcx> {
DepConstructor::InstanceSymbolName { instance }
}
fn typeck_item_bodies_dep_node(_: CrateNum) -> DepConstructor {
fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::TypeckBodiesKrate
}
fn const_eval_dep_node((def_id, _): (DefId, &Substs)) -> DepConstructor {
DepConstructor::ConstEval(def_id)
fn const_eval_dep_node<'tcx>((def_id, substs): (DefId, &'tcx Substs<'tcx>))
-> DepConstructor<'tcx> {
DepConstructor::ConstEval { def_id, substs }
}
fn mir_keys(_: CrateNum) -> DepConstructor {
fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::MirKeys
}
fn crate_variances(_: CrateNum) -> DepConstructor {
fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> {
DepConstructor::CrateVariances
}
fn relevant_trait_impls_for((def_id, _): (DefId, SimplifiedType)) -> DepConstructor {
DepConstructor::TraitImpls(def_id)
fn relevant_trait_impls_for<'tcx>((def_id, t): (DefId, SimplifiedType)) -> DepConstructor<'tcx> {
DepConstructor::RelevantTraitImpls(def_id, t)
}
fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
fn is_copy_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::IsCopy(def_id)
}
fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
fn is_sized_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::IsSized(def_id)
}
fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
fn is_freeze_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::IsFreeze(def_id)
}
fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
fn needs_drop_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::NeedsDrop(def_id)
}
fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor {
fn layout_dep_node<'tcx>(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> {
let def_id = ty::item_path::characteristic_def_id_of_type(key.value)
.unwrap_or(DefId::local(CRATE_DEF_INDEX));
DepConstructor::Layout(def_id)
......
......@@ -189,7 +189,8 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
&serialized_dep_graph.nodes,
&dirty_raw_nodes,
&mut clean_work_products,
&mut dirty_work_products);
&mut dirty_work_products,
&work_products);
}
}
......@@ -201,11 +202,7 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
clean_work_products.insert(wp_id);
}
tcx.dep_graph.with_task(*bootstrap_output, (), (), create_node);
fn create_node((): (), (): ()) {
// just create the node with no inputs
}
tcx.dep_graph.add_node_directly(*bootstrap_output);
}
// Add in work-products that are still clean, and delete those that are
......@@ -394,7 +391,8 @@ fn process_edge<'a, 'tcx, 'edges>(
nodes: &IndexVec<DepNodeIndex, DepNode>,
dirty_raw_nodes: &DirtyNodes,
clean_work_products: &mut FxHashSet<WorkProductId>,
dirty_work_products: &mut FxHashSet<WorkProductId>)
dirty_work_products: &mut FxHashSet<WorkProductId>,
work_products: &[SerializedWorkProduct])
{
// If the target is dirty, skip the edge. If this is an edge
// that targets a work-product, we can print the blame
......@@ -418,9 +416,11 @@ fn process_edge<'a, 'tcx, 'edges>(
format!("{:?}", blame)
};
eprintln!("incremental: module {:?} is dirty because {:?} \
changed or was removed",
wp_id,
let wp = work_products.iter().find(|swp| swp.id == wp_id).unwrap();
eprintln!("incremental: module {:?} is dirty because \
{:?} changed or was removed",
wp.work_product.cgu_name,
blame_str);
}
}
......@@ -449,8 +449,7 @@ fn process_edge<'a, 'tcx, 'edges>(
if !dirty_raw_nodes.contains_key(&target) {
let target = nodes[target];
let source = nodes[source];
let _task = tcx.dep_graph.in_task(target);
tcx.dep_graph.read(source);
tcx.dep_graph.add_edge_directly(source, target);
if let DepKind::WorkProduct = target.kind {
let wp_id = WorkProductId::from_fingerprint(target.hash);
......@@ -458,4 +457,3 @@ fn process_edge<'a, 'tcx, 'edges>(
}
}
}
......@@ -55,6 +55,7 @@ pub fn save_trans_partition(sess: &Session,
};
let work_product = WorkProduct {
cgu_name: cgu_name.to_string(),
input_hash: partition_hash,
saved_files: saved_files,
};
......
......@@ -12,7 +12,7 @@
use encoder;
use schema;
use rustc::dep_graph::DepTrackingMapConfig;
use rustc::ty::maps::QueryConfig;
use rustc::middle::cstore::{CrateStore, CrateSource, LibSource, DepKind,
NativeLibrary, MetadataLoader, LinkMeta,
LinkagePreference, LoadedMacro, EncodedMetadata};
......@@ -45,7 +45,7 @@
pub fn provide<$lt>(providers: &mut Providers<$lt>) {
$(fn $name<'a, $lt:$lt>($tcx: TyCtxt<'a, $lt, $lt>, $def_id: DefId)
-> <ty::queries::$name<$lt> as
DepTrackingMapConfig>::Value {
QueryConfig>::Value {
assert!(!$def_id.is_local());
let def_path_hash = $tcx.def_path_hash($def_id);
......
......@@ -1120,7 +1120,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
.into_iter()
.map(|cgu| {
let dep_node = cgu.work_product_dep_node();
let (stats, module) =
let ((stats, module), _) =
tcx.dep_graph.with_task(dep_node,
AssertDepGraphSafe(&shared_ccx),
AssertDepGraphSafe(cgu),
......
......@@ -270,6 +270,14 @@ pub fn partition<'a, 'tcx, I>(scx: &SharedCrateContext<'a, 'tcx>,
(&cgu1.name[..]).cmp(&cgu2.name[..])
});
if scx.sess().opts.enable_dep_node_debug_strs() {
for cgu in &result {
let dep_node = cgu.work_product_dep_node();
scx.tcx().dep_graph.register_dep_node_debug_str(dep_node,
|| cgu.name().to_string());
}
}
result
}
......
......@@ -34,54 +34,64 @@ struct WontChange {
mod signatures {
use WillChange;
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
#[rustc_then_this_would_need(AssociatedItems)] //~ ERROR no path
#[rustc_then_this_would_need(TraitDefOfItem)] //~ ERROR no path
trait Bar {
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
fn do_something(x: WillChange);
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
fn some_fn(x: WillChange) { }
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
fn new_foo(x: u32, y: u32) -> WillChange {
WillChange { x: x, y: y }
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
impl WillChange {
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
fn new(x: u32, y: u32) -> WillChange { loop { } }
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
impl WillChange {
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
fn method(&self, x: u32) { }
}
struct WillChanges {
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
x: WillChange,
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
y: WillChange
}
// The fields change, not the type itself.
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
fn indirect(x: WillChanges) { }
}
mod invalid_signatures {
use WontChange;
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
trait A {
#[rustc_then_this_would_need(FnSignature)] //~ ERROR no path
fn do_something_else_twice(x: WontChange);
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(FnSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR no path
fn b(x: WontChange) { }
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path from `WillChange`
#[rustc_then_this_would_need(FnSignature)] //~ ERROR no path from `WillChange`
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR no path from `WillChange`
fn c(x: u32) { }
}
......@@ -25,40 +25,42 @@ fn main() { }
// The type alias directly affects the type of the field,
// not the enclosing struct:
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
struct Struct {
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
x: TypeAlias,
y: u32
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
enum Enum {
Variant1 {
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
t: TypeAlias
},
Variant2(i32)
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
trait Trait {
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
fn method(&self, _: TypeAlias);
}
struct SomeType;
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR no path
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR no path
impl SomeType {
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
fn method(&self, _: TypeAlias) {}
}
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeOfItem)] //~ ERROR OK
type TypeAlias2 = TypeAlias;
#[rustc_then_this_would_need(ItemSignature)] //~ ERROR OK
#[rustc_then_this_would_need(FnSignature)] //~ ERROR OK
#[rustc_then_this_would_need(TypeckTables)] //~ ERROR OK
fn function(_: TypeAlias) {
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册