提交 acb73dbe 编写于 作者: B bors

Auto merge of #44772 - michaelwoerister:new-graph, r=nikomatsakis

incr.comp.: Add new DepGraph implementation.

This commits does a few things:
1. It adds the new dep-graph implementation -- *in addition* to the old one. This way we can start testing the new implementation without switching all tests at once.
2. It persists the new dep-graph (which includes query result fingerprints) to the incr. comp. caching directory and also loads this data.
3. It removes support for loading fingerprints of metadata imported from other crates (except for when running autotests). This is not needed anymore with red/green. It could provide a performance advantage but that's yet to be determined. For now, as red/green is not fully implemented yet, the cross-crate incremental tests are disabled.

Note, this PR is based on top of soon-to-be-merged #44696 and only the last 4 commits are new:
```
- incr.comp.: Initial implemenation of append-only dep-graph. (c90147c)
- incr.comp.: Do some various cleanup. (8ce20c5)
- incr.comp.: Serialize and deserialize new DepGraph. (0e13c1a)
- incr.comp.: Remove support for loading metadata fingerprints. (270a134)
EDIT 2:
- incr.comp.: Make #[rustc_dirty/clean] test for fingerprint equality ... (d8f7ff9)
```
(EDIT: GH displays the commits in the wrong order for some reason)

Also note that this PR is expected to certainly result in performance regressions in the incr. comp. test cases, since we are adding quite a few things (a whole additional dep-graph, for example) without removing anything. End-to-end performance measurements will only make sense again after red/green is enabled and all the legacy tracking has been turned off.

EDIT 2: Pushed another commit that makes the `#[rustc_dirty]`/`#[rustc_clean]` based autotests compared query result fingerprints instead of testing `DepNode` existence.
......@@ -60,7 +60,7 @@
//! user of the `DepNode` API of having to know how to compute the expected
//! fingerprint for a given set of node parameters.
use hir::def_id::{CrateNum, DefId, DefIndex};
use hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX};
use hir::map::DefPathHash;
use hir::{HirId, ItemLocalId};
......@@ -420,7 +420,7 @@ pub fn fingerprint_needed_for_crate_hash(self) -> bool {
[input] Hir(DefId),
// Represents metadata from an extern crate.
[input] MetaData(DefId),
[input] CrateMetadata(CrateNum),
// Represents some artifact that we save to disk. Note that these
// do not have a def-id as part of their identifier.
......@@ -678,6 +678,22 @@ fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String {
}
}
impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (CrateNum,) {
const CAN_RECONSTRUCT_QUERY_KEY: bool = true;
fn to_fingerprint(&self, tcx: TyCtxt) -> Fingerprint {
let def_id = DefId {
krate: self.0,
index: CRATE_DEF_INDEX,
};
tcx.def_path_hash(def_id).0
}
fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String {
tcx.crate_name(self.0).as_str().to_string()
}
}
impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for (DefId, DefId) {
const CAN_RECONSTRUCT_QUERY_KEY: bool = false;
......
......@@ -17,7 +17,7 @@
use super::{DepGraphQuery, DepKind, DepNode};
use super::debug::EdgeFilter;
pub struct DepGraphEdges {
pub(super) struct DepGraphEdges {
nodes: Vec<DepNode>,
indices: FxHashMap<DepNode, DepNodeIndex>,
edges: FxHashSet<(DepNodeIndex, DepNodeIndex)>,
......@@ -31,8 +31,8 @@ pub struct DepGraphEdges {
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DepNodeIndex {
index: u32
pub(super) struct DepNodeIndex {
index: u32,
}
impl DepNodeIndex {
......
......@@ -8,11 +8,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
StableHashingContextProvider};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use session::config::OutputType;
use std::cell::{Ref, RefCell};
use std::hash::Hash;
use std::rc::Rc;
use util::common::{ProfileQueriesMsg, profq_msg};
......@@ -22,7 +24,9 @@
use super::query::DepGraphQuery;
use super::raii;
use super::safe::DepGraphSafe;
use super::edges::{DepGraphEdges, DepNodeIndex};
use super::edges::{self, DepGraphEdges};
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
use super::prev::PreviousDepGraph;
#[derive(Clone)]
pub struct DepGraph {
......@@ -38,10 +42,38 @@ pub struct DepGraph {
fingerprints: Rc<RefCell<FxHashMap<DepNode, Fingerprint>>>
}
/// As a temporary measure, while transitioning to the new DepGraph
/// implementation, we maintain the old and the new dep-graph encoding in
/// parallel, so a DepNodeIndex actually contains two indices, one for each
/// version.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DepNodeIndex {
legacy: edges::DepNodeIndex,
new: DepNodeIndexNew,
}
impl DepNodeIndex {
pub const INVALID: DepNodeIndex = DepNodeIndex {
legacy: edges::DepNodeIndex::INVALID,
new: DepNodeIndexNew::INVALID,
};
}
struct DepGraphData {
/// The actual graph data.
/// The old, initial encoding of the dependency graph. This will soon go
/// away.
edges: RefCell<DepGraphEdges>,
/// The new encoding of the dependency graph, optimized for red/green
/// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into
/// current one anymore.
current: RefCell<CurrentDepGraph>,
/// The dep-graph from the previous compilation session. It contains all
/// nodes and edges as well as all fingerprints of nodes that have them.
previous: PreviousDepGraph,
/// When we load, there may be `.o` files, cached mir, or other such
/// things available to us. If we find that they are not dirty, we
/// load the path to the file storing those work-products here into
......@@ -55,18 +87,24 @@ struct DepGraphData {
}
impl DepGraph {
pub fn new(enabled: bool) -> DepGraph {
pub fn new(prev_graph: PreviousDepGraph) -> DepGraph {
DepGraph {
data: if enabled {
Some(Rc::new(DepGraphData {
previous_work_products: RefCell::new(FxHashMap()),
work_products: RefCell::new(FxHashMap()),
edges: RefCell::new(DepGraphEdges::new()),
dep_node_debug: RefCell::new(FxHashMap()),
}))
} else {
None
},
data: Some(Rc::new(DepGraphData {
previous_work_products: RefCell::new(FxHashMap()),
work_products: RefCell::new(FxHashMap()),
edges: RefCell::new(DepGraphEdges::new()),
dep_node_debug: RefCell::new(FxHashMap()),
current: RefCell::new(CurrentDepGraph::new()),
previous: prev_graph,
})),
fingerprints: Rc::new(RefCell::new(FxHashMap())),
}
}
pub fn new_disabled() -> DepGraph {
DepGraph {
data: None,
fingerprints: Rc::new(RefCell::new(FxHashMap())),
}
}
......@@ -82,7 +120,8 @@ pub fn query(&self) -> DepGraphQuery {
}
pub fn in_ignore<'graph>(&'graph self) -> Option<raii::IgnoreTask<'graph>> {
self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.edges))
self.data.as_ref().map(|data| raii::IgnoreTask::new(&data.edges,
&data.current))
}
pub fn with_ignore<OP,R>(&self, op: OP) -> R
......@@ -130,6 +169,7 @@ pub fn with_task<C, A, R, HCX>(&self,
{
if let Some(ref data) = self.data {
data.edges.borrow_mut().push_task(key);
data.current.borrow_mut().push_task(key);
if cfg!(debug_assertions) {
profq_msg(ProfileQueriesMsg::TaskBegin(key.clone()))
};
......@@ -145,7 +185,9 @@ pub fn with_task<C, A, R, HCX>(&self,
if cfg!(debug_assertions) {
profq_msg(ProfileQueriesMsg::TaskEnd)
};
let dep_node_index = data.edges.borrow_mut().pop_task(key);
let dep_node_index_legacy = data.edges.borrow_mut().pop_task(key);
let dep_node_index_new = data.current.borrow_mut().pop_task(key);
let mut stable_hasher = StableHasher::new();
result.hash_stable(&mut hcx, &mut stable_hasher);
......@@ -155,7 +197,10 @@ pub fn with_task<C, A, R, HCX>(&self,
.insert(key, stable_hasher.finish())
.is_none());
(result, dep_node_index)
(result, DepNodeIndex {
legacy: dep_node_index_legacy,
new: dep_node_index_new,
})
} else {
if key.kind.fingerprint_needed_for_crate_hash() {
let mut hcx = cx.create_stable_hashing_context();
......@@ -180,9 +225,14 @@ pub fn with_anon_task<OP,R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeInde
{
if let Some(ref data) = self.data {
data.edges.borrow_mut().push_anon_task();
data.current.borrow_mut().push_anon_task();
let result = op();
let dep_node = data.edges.borrow_mut().pop_anon_task(dep_kind);
(result, dep_node)
let dep_node_index_legacy = data.edges.borrow_mut().pop_anon_task(dep_kind);
let dep_node_index_new = data.current.borrow_mut().pop_anon_task(dep_kind);
(result, DepNodeIndex {
legacy: dep_node_index_legacy,
new: dep_node_index_new,
})
} else {
(op(), DepNodeIndex::INVALID)
}
......@@ -192,13 +242,21 @@ pub fn with_anon_task<OP,R>(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeInde
pub fn read(&self, v: DepNode) {
if let Some(ref data) = self.data {
data.edges.borrow_mut().read(v);
let mut current = data.current.borrow_mut();
if let Some(&dep_node_index_new) = current.node_to_node_index.get(&v) {
current.read_index(dep_node_index_new);
} else {
bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind)
}
}
}
#[inline]
pub fn read_index(&self, v: DepNodeIndex) {
if let Some(ref data) = self.data {
data.edges.borrow_mut().read_index(v);
data.edges.borrow_mut().read_index(v.legacy);
data.current.borrow_mut().read_index(v.new);
}
}
......@@ -213,16 +271,12 @@ pub fn add_node_directly(&self, node: DepNode) {
self.data.as_ref().unwrap().edges.borrow_mut().add_node(node);
}
pub fn alloc_input_node(&self, node: DepNode) -> DepNodeIndex {
if let Some(ref data) = self.data {
data.edges.borrow_mut().add_node(node)
} else {
DepNodeIndex::INVALID
}
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
self.fingerprints.borrow()[dep_node]
}
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Option<Fingerprint> {
self.fingerprints.borrow().get(dep_node).cloned()
pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
}
/// Indicates that a previous work product exists for `v`. This is
......@@ -291,6 +345,44 @@ pub fn register_dep_node_debug_str<F>(&self,
pub(super) fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
self.data.as_ref().and_then(|t| t.dep_node_debug.borrow().get(&dep_node).cloned())
}
pub fn serialize(&self) -> SerializedDepGraph {
let fingerprints = self.fingerprints.borrow();
let current_dep_graph = self.data.as_ref().unwrap().current.borrow();
let nodes: IndexVec<_, _> = current_dep_graph.nodes.iter().map(|dep_node| {
let fingerprint = fingerprints.get(dep_node)
.cloned()
.unwrap_or(Fingerprint::zero());
(*dep_node, fingerprint)
}).collect();
let total_edge_count: usize = current_dep_graph.edges.iter()
.map(|v| v.len())
.sum();
let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
let mut edge_list_data = Vec::with_capacity(total_edge_count);
for (current_dep_node_index, edges) in current_dep_graph.edges.iter_enumerated() {
let start = edge_list_data.len() as u32;
// This should really just be a memcpy :/
edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex(i.index)));
let end = edge_list_data.len() as u32;
debug_assert_eq!(current_dep_node_index.index(), edge_list_indices.len());
edge_list_indices.push((start, end));
}
debug_assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
debug_assert_eq!(edge_list_data.len(), total_edge_count);
SerializedDepGraph {
nodes,
edge_list_indices,
edge_list_data,
}
}
}
/// A "work product" is an intermediate result that we save into the
......@@ -335,3 +427,182 @@ pub struct WorkProduct {
/// Saved files associated with this CGU
pub saved_files: Vec<(OutputType, String)>,
}
pub(super) struct CurrentDepGraph {
nodes: IndexVec<DepNodeIndexNew, DepNode>,
edges: IndexVec<DepNodeIndexNew, Vec<DepNodeIndexNew>>,
node_to_node_index: FxHashMap<DepNode, DepNodeIndexNew>,
task_stack: Vec<OpenTask>,
}
impl CurrentDepGraph {
fn new() -> CurrentDepGraph {
CurrentDepGraph {
nodes: IndexVec::new(),
edges: IndexVec::new(),
node_to_node_index: FxHashMap(),
task_stack: Vec::new(),
}
}
pub(super) fn push_ignore(&mut self) {
self.task_stack.push(OpenTask::Ignore);
}
pub(super) fn pop_ignore(&mut self) {
let popped_node = self.task_stack.pop().unwrap();
debug_assert_eq!(popped_node, OpenTask::Ignore);
}
pub(super) fn push_task(&mut self, key: DepNode) {
self.task_stack.push(OpenTask::Regular {
node: key,
reads: Vec::new(),
read_set: FxHashSet(),
});
}
pub(super) fn pop_task(&mut self, key: DepNode) -> DepNodeIndexNew {
let popped_node = self.task_stack.pop().unwrap();
if let OpenTask::Regular {
node,
read_set: _,
reads
} = popped_node {
debug_assert_eq!(node, key);
self.alloc_node(node, reads)
} else {
bug!("pop_task() - Expected regular task to be popped")
}
}
fn push_anon_task(&mut self) {
self.task_stack.push(OpenTask::Anon {
reads: Vec::new(),
read_set: FxHashSet(),
});
}
fn pop_anon_task(&mut self, kind: DepKind) -> DepNodeIndexNew {
let popped_node = self.task_stack.pop().unwrap();
if let OpenTask::Anon {
read_set: _,
reads
} = popped_node {
let mut fingerprint = Fingerprint::zero();
let mut hasher = StableHasher::new();
for &read in reads.iter() {
let read_dep_node = self.nodes[read];
::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher);
// Fingerprint::combine() is faster than sending Fingerprint
// through the StableHasher (at least as long as StableHasher
// is so slow).
fingerprint = fingerprint.combine(read_dep_node.hash);
}
fingerprint = fingerprint.combine(hasher.finish());
let target_dep_node = DepNode {
kind,
hash: fingerprint,
};
if let Some(&index) = self.node_to_node_index.get(&target_dep_node) {
return index;
}
self.alloc_node(target_dep_node, reads)
} else {
bug!("pop_anon_task() - Expected anonymous task to be popped")
}
}
fn read_index(&mut self, source: DepNodeIndexNew) {
match self.task_stack.last_mut() {
Some(&mut OpenTask::Regular {
ref mut reads,
ref mut read_set,
node: _,
}) => {
if read_set.insert(source) {
reads.push(source);
}
}
Some(&mut OpenTask::Anon {
ref mut reads,
ref mut read_set,
}) => {
if read_set.insert(source) {
reads.push(source);
}
}
Some(&mut OpenTask::Ignore) | None => {
// ignore
}
}
}
fn alloc_node(&mut self,
dep_node: DepNode,
edges: Vec<DepNodeIndexNew>)
-> DepNodeIndexNew {
debug_assert_eq!(self.edges.len(), self.nodes.len());
debug_assert_eq!(self.node_to_node_index.len(), self.nodes.len());
debug_assert!(!self.node_to_node_index.contains_key(&dep_node));
let dep_node_index = DepNodeIndexNew::new(self.nodes.len());
self.nodes.push(dep_node);
self.node_to_node_index.insert(dep_node, dep_node_index);
self.edges.push(edges);
dep_node_index
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub(super) struct DepNodeIndexNew {
index: u32,
}
impl Idx for DepNodeIndexNew {
fn new(idx: usize) -> Self {
DepNodeIndexNew::new(idx)
}
fn index(self) -> usize {
self.index()
}
}
impl DepNodeIndexNew {
const INVALID: DepNodeIndexNew = DepNodeIndexNew {
index: ::std::u32::MAX,
};
fn new(v: usize) -> DepNodeIndexNew {
assert!((v & 0xFFFF_FFFF) == v);
DepNodeIndexNew { index: v as u32 }
}
fn index(self) -> usize {
self.index as usize
}
}
#[derive(Clone, Debug, PartialEq)]
enum OpenTask {
Regular {
node: DepNode,
reads: Vec<DepNodeIndexNew>,
read_set: FxHashSet<DepNodeIndexNew>,
},
Anon {
reads: Vec<DepNodeIndexNew>,
read_set: FxHashSet<DepNodeIndexNew>,
},
Ignore,
}
......@@ -13,19 +13,17 @@
mod dep_tracking_map;
mod edges;
mod graph;
mod prev;
mod query;
mod raii;
mod safe;
mod serialized;
pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig};
pub use self::dep_node::DepNode;
pub use self::dep_node::WorkProductId;
pub use self::graph::DepGraph;
pub use self::graph::WorkProduct;
pub use self::edges::DepNodeIndex;
pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId};
pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex};
pub use self::prev::PreviousDepGraph;
pub use self::query::DepGraphQuery;
pub use self::safe::AssertDepGraphSafe;
pub use self::safe::DepGraphSafe;
pub use self::raii::DepTask;
pub use self::dep_node::{DepKind, DepConstructor};
pub use self::serialized::SerializedDepGraph;
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use ich::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use super::dep_node::DepNode;
use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex};
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct PreviousDepGraph {
data: SerializedDepGraph,
index: FxHashMap<DepNode, SerializedDepNodeIndex>,
}
impl PreviousDepGraph {
pub fn new(data: SerializedDepGraph) -> PreviousDepGraph {
let index: FxHashMap<_, _> = data.nodes
.iter_enumerated()
.map(|(idx, &(dep_node, _))| (dep_node, idx))
.collect();
PreviousDepGraph { data, index }
}
pub fn with_edges_from<F>(&self, dep_node: &DepNode, mut f: F)
where
F: FnMut(&(DepNode, Fingerprint)),
{
let node_index = self.index[dep_node];
self.data
.edge_targets_from(node_index)
.into_iter()
.for_each(|&index| f(&self.data.nodes[index]));
}
pub fn fingerprint_of(&self, dep_node: &DepNode) -> Fingerprint {
let node_index = self.index[dep_node];
self.data.nodes[node_index].1
}
}
......@@ -8,50 +8,33 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::DepNode;
use super::edges::DepGraphEdges;
use super::graph::CurrentDepGraph;
use std::cell::RefCell;
pub struct DepTask<'graph> {
graph: &'graph RefCell<DepGraphEdges>,
key: DepNode,
}
impl<'graph> DepTask<'graph> {
pub fn new(graph: &'graph RefCell<DepGraphEdges>,
key: DepNode)
-> DepTask<'graph> {
graph.borrow_mut().push_task(key);
DepTask {
graph,
key,
}
}
}
impl<'graph> Drop for DepTask<'graph> {
fn drop(&mut self) {
self.graph.borrow_mut().pop_task(self.key);
}
}
pub struct IgnoreTask<'graph> {
graph: &'graph RefCell<DepGraphEdges>,
legacy_graph: &'graph RefCell<DepGraphEdges>,
new_graph: &'graph RefCell<CurrentDepGraph>,
}
impl<'graph> IgnoreTask<'graph> {
pub fn new(graph: &'graph RefCell<DepGraphEdges>) -> IgnoreTask<'graph> {
graph.borrow_mut().push_ignore();
pub(super) fn new(legacy_graph: &'graph RefCell<DepGraphEdges>,
new_graph: &'graph RefCell<CurrentDepGraph>)
-> IgnoreTask<'graph> {
legacy_graph.borrow_mut().push_ignore();
new_graph.borrow_mut().push_ignore();
IgnoreTask {
graph
legacy_graph,
new_graph,
}
}
}
impl<'graph> Drop for IgnoreTask<'graph> {
fn drop(&mut self) {
self.graph.borrow_mut().pop_ignore();
self.legacy_graph.borrow_mut().pop_ignore();
self.new_graph.borrow_mut().pop_ignore();
}
}
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! The data that we will serialize and deserialize.
use dep_graph::DepNode;
use ich::Fingerprint;
use rustc_data_structures::indexed_vec::{IndexVec, Idx};
/// The index of a DepNode in the SerializedDepGraph::nodes array.
#[derive(Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd, Debug,
RustcEncodable, RustcDecodable)]
pub struct SerializedDepNodeIndex(pub u32);
impl SerializedDepNodeIndex {
#[inline]
pub fn new(idx: usize) -> SerializedDepNodeIndex {
assert!(idx <= ::std::u32::MAX as usize);
SerializedDepNodeIndex(idx as u32)
}
}
impl Idx for SerializedDepNodeIndex {
#[inline]
fn new(idx: usize) -> Self {
assert!(idx <= ::std::u32::MAX as usize);
SerializedDepNodeIndex(idx as u32)
}
#[inline]
fn index(self) -> usize {
self.0 as usize
}
}
/// Data for use when recompiling the **current crate**.
#[derive(Debug, RustcEncodable, RustcDecodable)]
pub struct SerializedDepGraph {
/// The set of all DepNodes in the graph
pub nodes: IndexVec<SerializedDepNodeIndex, (DepNode, Fingerprint)>,
/// For each DepNode, stores the list of edges originating from that
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
/// which holds the actual DepNodeIndices of the target nodes.
pub edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
/// A flattened list of all edge targets in the graph. Edge sources are
/// implicit in edge_list_indices.
pub edge_list_data: Vec<SerializedDepNodeIndex>,
}
impl SerializedDepGraph {
pub fn new() -> SerializedDepGraph {
SerializedDepGraph {
nodes: IndexVec::new(),
edge_list_indices: IndexVec::new(),
edge_list_data: Vec::new(),
}
}
pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
let targets = self.edge_list_indices[source];
&self.edge_list_data[targets.0 as usize..targets.1 as usize]
}
}
......@@ -97,6 +97,17 @@ fn default_decode<D: Decoder>(d: &mut D) -> Result<CrateNum, D::Error> {
RustcDecodable, Hash, Copy)]
pub struct DefIndex(u32);
impl Idx for DefIndex {
fn new(value: usize) -> Self {
assert!(value < (u32::MAX) as usize);
DefIndex(value as u32)
}
fn index(self) -> usize {
self.0 as usize
}
}
impl fmt::Debug for DefIndex {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
......
......@@ -88,6 +88,15 @@ pub(super) fn root(krate: &'hir Crate,
).1;
}
{
dep_graph.with_task(
DepNode::new_no_params(DepKind::AllLocalTraitImpls),
&hcx,
&krate.trait_impls,
identity_fn
);
}
let hir_body_nodes = vec![root_mod_def_path_hash];
let mut collector = NodeCollector {
......
......@@ -876,17 +876,7 @@ pub fn span(&self, id: NodeId) -> Span {
Some(RootCrate(_)) => self.forest.krate.span,
Some(NotPresent) | None => {
// Some nodes, notably macro definitions, are not
// present in the map for whatever reason, but
// they *do* have def-ids. So if we encounter an
// empty hole, check for that case.
if let Some(def_index) = self.definitions.opt_def_index(id) {
let def_path_hash = self.definitions.def_path_hash(def_index);
self.dep_graph.read(def_path_hash.to_dep_node(DepKind::Hir));
DUMMY_SP
} else {
bug!("hir::map::Map::span: id not in map: {:?}", id)
}
bug!("hir::map::Map::span: id not in map: {:?}", id)
}
}
}
......
......@@ -267,6 +267,8 @@ pub trait CrateStore {
fn export_macros_untracked(&self, cnum: CrateNum);
fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind;
fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol;
fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> Symbol;
fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh;
fn struct_field_names_untracked(&self, def: DefId) -> Vec<ast::Name>;
fn item_children_untracked(&self, did: DefId, sess: &Session) -> Vec<def::Export>;
fn load_macro_untracked(&self, did: DefId, sess: &Session) -> LoadedMacro;
......@@ -336,6 +338,10 @@ fn associated_item_cloned_untracked(&self, def: DefId) -> ty::AssociatedItem
fn dep_kind_untracked(&self, cnum: CrateNum) -> DepKind { bug!("is_explicitly_linked") }
fn export_macros_untracked(&self, cnum: CrateNum) { bug!("export_macros") }
fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol { bug!("crate_name") }
fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> Symbol {
bug!("crate_disambiguator")
}
fn crate_hash_untracked(&self, cnum: CrateNum) -> Svh { bug!("crate_hash") }
// resolve
fn def_key(&self, def: DefId) -> DefKey { bug!("def_key") }
......
......@@ -1021,7 +1021,7 @@ fn parse_optimization_fuel(slot: &mut Option<(String, u64)>, v: Option<&str>) ->
"attempt to recover from parse errors (experimental)"),
incremental: Option<String> = (None, parse_opt_string, [UNTRACKED],
"enable incremental compilation (experimental)"),
incremental_cc: bool = (true, parse_bool, [UNTRACKED],
incremental_cc: bool = (false, parse_bool, [UNTRACKED],
"enable cross-crate incremental compilation (even more experimental)"),
incremental_info: bool = (false, parse_bool, [UNTRACKED],
"print high-level information about incremental reuse (or the lack thereof)"),
......
......@@ -11,6 +11,7 @@
//! type context book-keeping
use dep_graph::DepGraph;
use dep_graph::{DepNode, DepConstructor};
use errors::DiagnosticBuilder;
use session::Session;
use session::config::OutputFilenames;
......@@ -1237,6 +1238,25 @@ pub fn create_stable_hashing_context(self) -> StableHashingContext<'gcx> {
self.cstore)
}
// This method makes sure that we have a DepNode and a Fingerprint for
// every upstream crate. It needs to be called once right after the tcx is
// created.
// With full-fledged red/green, the method will probably become unnecessary
// as this will be done on-demand.
pub fn allocate_metadata_dep_nodes(self) {
// We cannot use the query versions of crates() and crate_hash(), since
// those would need the DepNodes that we are allocating here.
for cnum in self.cstore.crates_untracked() {
let dep_node = DepNode::new(self, DepConstructor::CrateMetadata(cnum));
let crate_hash = self.cstore.crate_hash_untracked(cnum);
self.dep_graph.with_task(dep_node,
self,
crate_hash,
|_, x| x // No transformation needed
);
}
}
// This method exercises the `in_scope_traits_map` query for all possible
// values so that we have their fingerprints available in the DepGraph.
// This is only required as long as we still use the old dependency tracking
......
......@@ -643,7 +643,16 @@ pub fn phase_2_configure_and_expand<F>(sess: &Session,
&crate_name,
&disambiguator.as_str(),
);
let dep_graph = DepGraph::new(sess.opts.build_dep_graph());
let dep_graph = if sess.opts.build_dep_graph() {
let prev_dep_graph = time(time_passes, "load prev dep-graph (new)", || {
rustc_incremental::load_dep_graph_new(sess)
});
DepGraph::new(prev_dep_graph)
} else {
DepGraph::new_disabled()
};
time(time_passes, "recursion limit", || {
middle::recursion_limit::update_limits(sess, &krate);
......@@ -713,7 +722,6 @@ pub fn phase_2_configure_and_expand<F>(sess: &Session,
// item, much like we do for macro expansion. In other words, the hash reflects not just
// its contents but the results of name resolution on those contents. Hopefully we'll push
// this back at some point.
let _ignore = dep_graph.in_ignore();
let mut crate_loader = CrateLoader::new(sess, &cstore, crate_name);
let resolver_arenas = Resolver::arenas();
let mut resolver = Resolver::new(sess,
......
......@@ -20,7 +20,6 @@
use rustc::ty::{self, TyCtxt, GlobalArenas, Resolutions};
use rustc::cfg;
use rustc::cfg::graphviz::LabelledCFG;
use rustc::dep_graph::DepGraph;
use rustc::middle::cstore::CrateStore;
use rustc::session::Session;
use rustc::session::config::{Input, OutputFilenames};
......@@ -848,9 +847,6 @@ pub fn print_after_parsing(sess: &Session,
krate: &ast::Crate,
ppm: PpMode,
ofile: Option<&Path>) {
let dep_graph = DepGraph::new(false);
let _ignore = dep_graph.in_ignore();
let (src, src_name) = get_source(input, sess);
let mut rdr = &*src;
......@@ -893,9 +889,6 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session,
output_filenames: &OutputFilenames,
opt_uii: Option<UserIdentifiedItem>,
ofile: Option<&Path>) {
let dep_graph = DepGraph::new(false);
let _ignore = dep_graph.in_ignore();
if ppm.needs_analysis() {
print_with_analysis(sess,
cstore,
......
......@@ -32,6 +32,7 @@
pub use assert_dep_graph::assert_dep_graph;
pub use persist::load_dep_graph;
pub use persist::load_dep_graph_new;
pub use persist::save_dep_graph;
pub use persist::save_trans_partition;
pub use persist::save_work_products;
......
......@@ -8,18 +8,17 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Debugging code to test the state of the dependency graph just
//! after it is loaded from disk and just after it has been saved.
//! Debugging code to test fingerprints computed for query results.
//! For each node marked with `#[rustc_clean]` or `#[rustc_dirty]`,
//! we will check that a suitable node for that item either appears
//! or does not appear in the dep-graph, as appropriate:
//! we will compare the fingerprint from the current and from the previous
//! compilation session as appropriate:
//!
//! - `#[rustc_dirty(label="TypeckTables", cfg="rev2")]` if we are
//! in `#[cfg(rev2)]`, then there MUST NOT be a node
//! `DepNode::TypeckTables(X)` where `X` is the def-id of the
//! current node.
//! in `#[cfg(rev2)]`, then the fingerprints associated with
//! `DepNode::TypeckTables(X)` must be DIFFERENT (`X` is the def-id of the
//! current node).
//! - `#[rustc_clean(label="TypeckTables", cfg="rev2")]` same as above,
//! except that the node MUST exist.
//! except that the fingerprints must be the SAME.
//!
//! Errors are reported if we are in the suitable configuration but
//! the required condition is not met.
......@@ -40,9 +39,7 @@
//! previous revision to compare things to.
//!
use super::data::DepNodeIndex;
use super::load::DirtyNodes;
use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind};
use rustc::dep_graph::DepNode;
use rustc::hir;
use rustc::hir::def_id::DefId;
use rustc::hir::itemlikevisit::ItemLikeVisitor;
......@@ -51,41 +48,22 @@
ATTR_CLEAN_METADATA};
use syntax::ast::{self, Attribute, NestedMetaItem};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use rustc_data_structures::indexed_vec::IndexVec;
use syntax_pos::Span;
use rustc::ty::TyCtxt;
const LABEL: &'static str = "label";
const CFG: &'static str = "cfg";
pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
nodes: &IndexVec<DepNodeIndex, DepNode>,
dirty_inputs: &DirtyNodes) {
pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
// can't add `#[rustc_dirty]` etc without opting in to this feature
if !tcx.sess.features.borrow().rustc_attrs {
return;
}
let _ignore = tcx.dep_graph.in_ignore();
let dirty_inputs: FxHashSet<DepNode> =
dirty_inputs.keys()
.filter_map(|dep_node_index| {
let dep_node = nodes[*dep_node_index];
if dep_node.extract_def_id(tcx).is_some() {
Some(dep_node)
} else {
None
}
})
.collect();
let query = tcx.dep_graph.query();
debug!("query-nodes: {:?}", query.nodes());
let krate = tcx.hir.krate();
let mut dirty_clean_visitor = DirtyCleanVisitor {
tcx,
query: &query,
dirty_inputs,
checked_attrs: FxHashSet(),
};
krate.visit_all_item_likes(&mut dirty_clean_visitor);
......@@ -105,8 +83,6 @@ pub fn check_dirty_clean_annotations<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
pub struct DirtyCleanVisitor<'a, 'tcx:'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
query: &'a DepGraphQuery,
dirty_inputs: FxHashSet<DepNode>,
checked_attrs: FxHashSet<ast::AttrId>,
}
......@@ -143,59 +119,28 @@ fn dep_node_str(&self, dep_node: &DepNode) -> String {
fn assert_dirty(&self, item_span: Span, dep_node: DepNode) {
debug!("assert_dirty({:?})", dep_node);
match dep_node.kind {
DepKind::Krate |
DepKind::Hir |
DepKind::HirBody => {
// HIR nodes are inputs, so if we are asserting that the HIR node is
// dirty, we check the dirty input set.
if !self.dirty_inputs.contains(&dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
&format!("`{}` not found in dirty set, but should be dirty",
dep_node_str));
}
}
_ => {
// Other kinds of nodes would be targets, so check if
// the dep-graph contains the node.
if self.query.contains_node(&dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
&format!("`{}` found in dep graph, but should be dirty", dep_node_str));
}
}
let current_fingerprint = self.tcx.dep_graph.fingerprint_of(&dep_node);
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
if current_fingerprint == prev_fingerprint {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
&format!("`{}` should be dirty but is not", dep_node_str));
}
}
fn assert_clean(&self, item_span: Span, dep_node: DepNode) {
debug!("assert_clean({:?})", dep_node);
match dep_node.kind {
DepKind::Krate |
DepKind::Hir |
DepKind::HirBody => {
// For HIR nodes, check the inputs.
if self.dirty_inputs.contains(&dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
&format!("`{}` found in dirty-node set, but should be clean",
dep_node_str));
}
}
_ => {
// Otherwise, check if the dep-node exists.
if !self.query.contains_node(&dep_node) {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
&format!("`{}` not found in dep graph, but should be clean",
dep_node_str));
}
}
let current_fingerprint = self.tcx.dep_graph.fingerprint_of(&dep_node);
let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node);
if current_fingerprint != prev_fingerprint {
let dep_node_str = self.dep_node_str(&dep_node);
self.tcx.sess.span_err(
item_span,
&format!("`{}` should be clean but is not", dep_node_str));
}
}
......
......@@ -114,15 +114,12 @@
//! unsupported file system and emit a warning in that case. This is not yet
//! implemented.
use rustc::hir::def_id::CrateNum;
use rustc::hir::svh::Svh;
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc::util::fs as fs_util;
use rustc_data_structures::{flock, base_n};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
use std::ffi::OsString;
use std::fs as std_fs;
use std::io;
use std::mem;
......@@ -132,6 +129,7 @@
const LOCK_FILE_EXT: &'static str = ".lock";
const DEP_GRAPH_FILENAME: &'static str = "dep-graph.bin";
const DEP_GRAPH_NEW_FILENAME: &'static str = "dep-graph-new.bin";
const WORK_PRODUCTS_FILENAME: &'static str = "work-products.bin";
const METADATA_HASHES_FILENAME: &'static str = "metadata.bin";
......@@ -145,6 +143,10 @@ pub fn dep_graph_path(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME)
}
pub fn dep_graph_path_new(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, DEP_GRAPH_NEW_FILENAME)
}
pub fn work_products_path(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME)
}
......@@ -153,10 +155,6 @@ pub fn metadata_hash_export_path(sess: &Session) -> PathBuf {
in_incr_comp_dir_sess(sess, METADATA_HASHES_FILENAME)
}
pub fn metadata_hash_import_path(import_session_dir: &Path) -> PathBuf {
import_session_dir.join(METADATA_HASHES_FILENAME)
}
pub fn lock_file_path(session_dir: &Path) -> PathBuf {
let crate_dir = session_dir.parent().unwrap();
......@@ -616,70 +614,6 @@ fn string_to_timestamp(s: &str) -> Result<SystemTime, ()> {
Ok(UNIX_EPOCH + duration)
}
fn crate_path_tcx(tcx: TyCtxt, cnum: CrateNum) -> PathBuf {
crate_path(tcx.sess, &tcx.crate_name(cnum).as_str(), &tcx.crate_disambiguator(cnum).as_str())
}
/// Finds the session directory containing the correct metadata hashes file for
/// the given crate. In order to do that it has to compute the crate directory
/// of the given crate, and in there, look for the session directory with the
/// correct SVH in it.
/// Note that we have to match on the exact SVH here, not just the
/// crate's (name, disambiguator) pair. The metadata hashes are only valid for
/// the exact version of the binary we are reading from now (i.e. the hashes
/// are part of the dependency graph of a specific compilation session).
pub fn find_metadata_hashes_for(tcx: TyCtxt, cnum: CrateNum) -> Option<PathBuf> {
let crate_directory = crate_path_tcx(tcx, cnum);
if !crate_directory.exists() {
return None
}
let dir_entries = match crate_directory.read_dir() {
Ok(dir_entries) => dir_entries,
Err(e) => {
tcx.sess
.err(&format!("incremental compilation: Could not read crate directory `{}`: {}",
crate_directory.display(), e));
return None
}
};
let target_svh = tcx.crate_hash(cnum);
let target_svh = base_n::encode(target_svh.as_u64(), INT_ENCODE_BASE);
let sub_dir = find_metadata_hashes_iter(&target_svh, dir_entries.filter_map(|e| {
e.ok().map(|e| e.file_name().to_string_lossy().into_owned())
}));
sub_dir.map(|sub_dir_name| crate_directory.join(&sub_dir_name))
}
fn find_metadata_hashes_iter<'a, I>(target_svh: &str, iter: I) -> Option<OsString>
where I: Iterator<Item=String>
{
for sub_dir_name in iter {
if !is_session_directory(&sub_dir_name) || !is_finalized(&sub_dir_name) {
// This is not a usable session directory
continue
}
let is_match = if let Some(last_dash_pos) = sub_dir_name.rfind("-") {
let candidate_svh = &sub_dir_name[last_dash_pos + 1 .. ];
target_svh == candidate_svh
} else {
// some kind of invalid directory name
continue
};
if is_match {
return Some(OsString::from(sub_dir_name))
}
}
None
}
fn crate_path(sess: &Session,
crate_name: &str,
crate_disambiguator: &str)
......@@ -1019,52 +953,3 @@ fn test_find_source_directory_in_iter() {
PathBuf::from("crate-dir/s-1234-0000-working")].into_iter(), &already_visited),
None);
}
#[test]
fn test_find_metadata_hashes_iter()
{
assert_eq!(find_metadata_hashes_iter("testsvh2",
vec![
String::from("s-timestamp1-testsvh1"),
String::from("s-timestamp2-testsvh2"),
String::from("s-timestamp3-testsvh3"),
].into_iter()),
Some(OsString::from("s-timestamp2-testsvh2"))
);
assert_eq!(find_metadata_hashes_iter("testsvh2",
vec![
String::from("s-timestamp1-testsvh1"),
String::from("s-timestamp2-testsvh2"),
String::from("invalid-name"),
].into_iter()),
Some(OsString::from("s-timestamp2-testsvh2"))
);
assert_eq!(find_metadata_hashes_iter("testsvh2",
vec![
String::from("s-timestamp1-testsvh1"),
String::from("s-timestamp2-testsvh2-working"),
String::from("s-timestamp3-testsvh3"),
].into_iter()),
None
);
assert_eq!(find_metadata_hashes_iter("testsvh1",
vec![
String::from("s-timestamp1-random1-working"),
String::from("s-timestamp2-random2-working"),
String::from("s-timestamp3-random3-working"),
].into_iter()),
None
);
assert_eq!(find_metadata_hashes_iter("testsvh2",
vec![
String::from("timestamp1-testsvh2"),
String::from("timestamp2-testsvh2"),
String::from("timestamp3-testsvh2"),
].into_iter()),
None
);
}
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::dep_graph::{DepNode, DepKind};
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::hir::svh::Svh;
use rustc::ich::Fingerprint;
use rustc::ty::TyCtxt;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::flock;
use rustc_serialize::Decodable;
use rustc_serialize::opaque::Decoder;
use super::data::*;
use super::fs::*;
use super::file_format;
use std::hash::Hash;
use std::fmt::Debug;
pub struct HashContext<'a, 'tcx: 'a> {
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
metadata_hashes: FxHashMap<DefId, Fingerprint>,
crate_hashes: FxHashMap<CrateNum, Svh>,
}
impl<'a, 'tcx> HashContext<'a, 'tcx> {
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Self {
HashContext {
tcx,
metadata_hashes: FxHashMap(),
crate_hashes: FxHashMap(),
}
}
pub fn hash(&mut self, dep_node: &DepNode) -> Option<Fingerprint> {
match dep_node.kind {
// HIR nodes (which always come from our crate) are an input:
DepKind::Krate |
DepKind::InScopeTraits |
DepKind::Hir |
DepKind::HirBody => {
Some(self.tcx.dep_graph.fingerprint_of(dep_node).unwrap())
}
// MetaData from other crates is an *input* to us.
// MetaData nodes from *our* crates are an *output*; we
// don't hash them, but we do compute a hash for them and
// save it for others to use.
DepKind::MetaData => {
let def_id = dep_node.extract_def_id(self.tcx).unwrap();
assert!(!def_id.is_local());
Some(self.metadata_hash(def_id,
def_id.krate,
|this| &mut this.metadata_hashes))
}
_ => {
// Other kinds of nodes represent computed by-products
// that we don't hash directly; instead, they should
// have some transitive dependency on a Hir or
// MetaData node, so we'll just hash that
None
}
}
}
fn metadata_hash<K, C>(&mut self,
key: K,
cnum: CrateNum,
cache: C)
-> Fingerprint
where K: Hash + Eq + Debug,
C: Fn(&mut Self) -> &mut FxHashMap<K, Fingerprint>,
{
debug!("metadata_hash(key={:?})", key);
debug_assert!(cnum != LOCAL_CRATE);
loop {
// check whether we have a result cached for this def-id
if let Some(&hash) = cache(self).get(&key) {
return hash;
}
// check whether we did not find detailed metadata for this
// krate; in that case, we just use the krate's overall hash
if let Some(&svh) = self.crate_hashes.get(&cnum) {
// micro-"optimization": avoid a cache miss if we ask
// for metadata from this particular def-id again.
let fingerprint = svh_to_fingerprint(svh);
cache(self).insert(key, fingerprint);
return fingerprint;
}
// otherwise, load the data and repeat.
self.load_data(cnum);
assert!(self.crate_hashes.contains_key(&cnum));
}
}
fn load_data(&mut self, cnum: CrateNum) {
debug!("load_data(cnum={})", cnum);
let svh = self.tcx.crate_hash(cnum);
let old = self.crate_hashes.insert(cnum, svh);
debug!("load_data: svh={}", svh);
assert!(old.is_none(), "loaded data for crate {:?} twice", cnum);
if let Some(session_dir) = find_metadata_hashes_for(self.tcx, cnum) {
debug!("load_data: session_dir={:?}", session_dir);
// Lock the directory we'll be reading the hashes from.
let lock_file_path = lock_file_path(&session_dir);
let _lock = match flock::Lock::new(&lock_file_path,
false, // don't wait
false, // don't create the lock-file
false) { // shared lock
Ok(lock) => lock,
Err(err) => {
debug!("Could not acquire lock on `{}` while trying to \
load metadata hashes: {}",
lock_file_path.display(),
err);
// Could not acquire the lock. The directory is probably in
// in the process of being deleted. It's OK to just exit
// here. It's the same scenario as if the file had not
// existed in the first place.
return
}
};
let hashes_file_path = metadata_hash_import_path(&session_dir);
match file_format::read_file(self.tcx.sess, &hashes_file_path)
{
Ok(Some(data)) => {
match self.load_from_data(cnum, &data, svh) {
Ok(()) => { }
Err(err) => {
bug!("decoding error in dep-graph from `{}`: {}",
&hashes_file_path.display(), err);
}
}
}
Ok(None) => {
// If the file is not found, that's ok.
}
Err(err) => {
self.tcx.sess.err(
&format!("could not load dep information from `{}`: {}",
hashes_file_path.display(), err));
}
}
}
}
fn load_from_data(&mut self,
cnum: CrateNum,
data: &[u8],
expected_svh: Svh) -> Result<(), String> {
debug!("load_from_data(cnum={})", cnum);
// Load up the hashes for the def-ids from this crate.
let mut decoder = Decoder::new(data, 0);
let svh_in_hashes_file = Svh::decode(&mut decoder)?;
if svh_in_hashes_file != expected_svh {
// We should not be able to get here. If we do, then
// `fs::find_metadata_hashes_for()` has messed up.
bug!("mismatch between SVH in crate and SVH in incr. comp. hashes")
}
let serialized_hashes = SerializedMetadataHashes::decode(&mut decoder)?;
for serialized_hash in serialized_hashes.entry_hashes {
// the hashes are stored with just a def-index, which is
// always relative to the old crate; convert that to use
// our internal crate number
let def_id = DefId { krate: cnum, index: serialized_hash.def_index };
// record the hash for this dep-node
let old = self.metadata_hashes.insert(def_id, serialized_hash.hash);
debug!("load_from_data: def_id={:?} hash={}", def_id, serialized_hash.hash);
assert!(old.is_none(), "already have hash for {:?}", def_id);
}
Ok(())
}
}
fn svh_to_fingerprint(svh: Svh) -> Fingerprint {
Fingerprint::from_smaller_hash(svh.as_u64())
}
......@@ -10,7 +10,7 @@
//! Code to save/load the dep-graph from files.
use rustc::dep_graph::{DepNode, WorkProductId, DepKind};
use rustc::dep_graph::{DepNode, WorkProductId, DepKind, PreviousDepGraph};
use rustc::hir::svh::Svh;
use rustc::ich::Fingerprint;
use rustc::session::Session;
......@@ -23,8 +23,6 @@
use std::path::{Path};
use super::data::*;
use super::dirty_clean;
use super::hash::*;
use super::fs::*;
use super::file_format;
use super::work_product;
......@@ -40,6 +38,7 @@
/// actually it doesn't matter all that much.) See `README.md` for
/// more general overview.
pub fn load_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
tcx.allocate_metadata_dep_nodes();
tcx.precompute_in_scope_traits_hashes();
if tcx.sess.incr_session_load_dep_graph() {
let _ignore = tcx.dep_graph.in_ignore();
......@@ -103,7 +102,7 @@ fn does_still_exist(tcx: TyCtxt, dep_node: &DepNode) -> bool {
DepKind::Hir |
DepKind::HirBody |
DepKind::InScopeTraits |
DepKind::MetaData => {
DepKind::CrateMetadata => {
dep_node.extract_def_id(tcx).is_some()
}
_ => {
......@@ -186,9 +185,6 @@ pub fn decode_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
// dirty.
reconcile_work_products(tcx, work_products, &clean_work_products);
dirty_clean::check_dirty_clean_annotations(tcx,
&serialized_dep_graph.nodes,
&dirty_raw_nodes);
Ok(())
}
......@@ -198,15 +194,12 @@ fn initial_dirty_nodes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
nodes: &IndexVec<DepNodeIndex, DepNode>,
serialized_hashes: &[(DepNodeIndex, Fingerprint)])
-> DirtyNodes {
let mut hcx = HashContext::new(tcx);
let mut dirty_nodes = FxHashMap();
for &(dep_node_index, prev_hash) in serialized_hashes {
let dep_node = nodes[dep_node_index];
if does_still_exist(tcx, &dep_node) {
let current_hash = hcx.hash(&dep_node).unwrap_or_else(|| {
bug!("Cannot find current ICH for input that still exists?")
});
let current_hash = tcx.dep_graph.fingerprint_of(&dep_node);
if current_hash == prev_hash {
debug!("initial_dirty_nodes: {:?} is clean (hash={:?})",
......@@ -416,7 +409,7 @@ fn process_edge<'a, 'tcx, 'edges>(
// clean target because removing the input would have dirtied the input
// node and transitively dirtied the target.
debug_assert!(match nodes[source].kind {
DepKind::Hir | DepKind::HirBody | DepKind::MetaData => {
DepKind::Hir | DepKind::HirBody | DepKind::CrateMetadata => {
does_still_exist(tcx, &nodes[source])
}
_ => true,
......@@ -433,3 +426,38 @@ fn process_edge<'a, 'tcx, 'edges>(
}
}
}
pub fn load_dep_graph_new(sess: &Session) -> PreviousDepGraph {
use rustc::dep_graph::SerializedDepGraph as SerializedDepGraphNew;
let empty = PreviousDepGraph::new(SerializedDepGraphNew::new());
if sess.opts.incremental.is_none() {
return empty
}
if let Some(bytes) = load_data(sess, &dep_graph_path_new(sess)) {
let mut decoder = Decoder::new(&bytes, 0);
let prev_commandline_args_hash = u64::decode(&mut decoder)
.expect("Error reading commandline arg hash from cached dep-graph");
if prev_commandline_args_hash != sess.opts.dep_tracking_hash() {
if sess.opts.debugging_opts.incremental_info {
eprintln!("incremental: completely ignoring cache because of \
differing commandline arguments");
}
// We can't reuse the cache, purge it.
debug!("load_dep_graph_new: differing commandline arg hashes");
// No need to do any further work
return empty
}
let dep_graph = SerializedDepGraphNew::decode(&mut decoder)
.expect("Error reading cached dep-graph");
PreviousDepGraph::new(dep_graph)
} else {
empty
}
}
......@@ -15,7 +15,6 @@
mod data;
mod dirty_clean;
mod fs;
mod hash;
mod load;
mod preds;
mod save;
......@@ -26,6 +25,7 @@
pub use self::fs::finalize_session_directory;
pub use self::fs::in_incr_comp_dir;
pub use self::load::load_dep_graph;
pub use self::load::load_dep_graph_new;
pub use self::save::save_dep_graph;
pub use self::save::save_work_products;
pub use self::work_product::save_trans_partition;
......@@ -10,10 +10,10 @@
use rustc::dep_graph::{DepGraphQuery, DepNode, DepKind};
use rustc::ich::Fingerprint;
use rustc::ty::TyCtxt;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::{Graph, NodeIndex};
use super::hash::*;
mod compress;
......@@ -40,15 +40,13 @@ pub struct Predecessors<'query> {
}
impl<'q> Predecessors<'q> {
pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self {
let tcx = hcx.tcx;
pub fn new(tcx: TyCtxt, query: &'q DepGraphQuery) -> Self {
// Find the set of "start nodes". These are nodes that we will
// possibly query later.
let is_output = |node: &DepNode| -> bool {
match node.kind {
DepKind::WorkProduct => true,
DepKind::MetaData => {
DepKind::CrateMetadata => {
// We do *not* create dep-nodes for the current crate's
// metadata anymore, just for metadata that we import/read
// from other crates.
......@@ -74,7 +72,7 @@ pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self {
let input = *graph.node_data(input_index);
debug!("computing hash for input node `{:?}`", input);
hashes.entry(input)
.or_insert_with(|| hcx.hash(input).unwrap());
.or_insert_with(|| tcx.dep_graph.fingerprint_of(&input));
}
if tcx.sess.opts.debugging_opts.query_dep_graph {
......@@ -89,7 +87,7 @@ pub fn new(query: &'q DepGraphQuery, hcx: &mut HashContext) -> Self {
for node in hir_nodes {
hashes.entry(node)
.or_insert_with(|| hcx.hash(node).unwrap());
.or_insert_with(|| tcx.dep_graph.fingerprint_of(&node));
}
}
......
......@@ -15,6 +15,7 @@
use rustc::middle::cstore::EncodedMetadataHashes;
use rustc::session::Session;
use rustc::ty::TyCtxt;
use rustc::util::common::time;
use rustc::util::nodemap::DefIdMap;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph;
......@@ -26,7 +27,6 @@
use std::path::PathBuf;
use super::data::*;
use super::hash::*;
use super::preds::*;
use super::fs::*;
use super::dirty_clean;
......@@ -45,13 +45,6 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
return;
}
let query = tcx.dep_graph.query();
if tcx.sess.opts.debugging_opts.incremental_info {
eprintln!("incremental: {} nodes in dep-graph", query.graph.len_nodes());
eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges());
}
// We load the previous metadata hashes now before overwriting the file
// (if we need them for testing).
let prev_metadata_hashes = if tcx.sess.opts.debugging_opts.query_dep_graph {
......@@ -60,8 +53,6 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
DefIdMap()
};
let mut hcx = HashContext::new(tcx);
let preds = Predecessors::new(&query, &mut hcx);
let mut current_metadata_hashes = FxHashMap();
// IMPORTANT: We are saving the metadata hashes *before* the dep-graph,
......@@ -78,10 +69,27 @@ pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
e));
}
save_in(sess,
dep_graph_path(sess),
|e| encode_dep_graph(tcx, &preds, e));
time(sess.time_passes(), "persist dep-graph (old)", || {
let query = tcx.dep_graph.query();
if tcx.sess.opts.debugging_opts.incremental_info {
eprintln!("incremental: {} nodes in dep-graph", query.graph.len_nodes());
eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges());
}
let preds = Predecessors::new(tcx, &query);
save_in(sess,
dep_graph_path(sess),
|e| encode_dep_graph(tcx, &preds, e));
});
time(sess.time_passes(), "persist dep-graph (new)", || {
save_in(sess,
dep_graph_path_new(sess),
|e| encode_dep_graph_new(tcx, e));
});
dirty_clean::check_dirty_clean_annotations(tcx);
dirty_clean::check_dirty_clean_metadata(tcx,
&prev_metadata_hashes,
&current_metadata_hashes);
......@@ -174,6 +182,19 @@ fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
}
}
fn encode_dep_graph_new(tcx: TyCtxt,
encoder: &mut Encoder)
-> io::Result<()> {
// First encode the commandline arguments hash
tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
// Encode the graph data.
let serialized_graph = tcx.dep_graph.serialize();
serialized_graph.encode(encoder)?;
Ok(())
}
pub fn encode_dep_graph(tcx: TyCtxt,
preds: &Predecessors,
encoder: &mut Encoder)
......
......@@ -55,9 +55,14 @@ pub fn provide<$lt>(providers: &mut Providers<$lt>) {
let ($def_id, $other) = def_id_arg.into_args();
assert!(!$def_id.is_local());
let def_path_hash = $tcx.def_path_hash($def_id);
let dep_node = def_path_hash.to_dep_node(::rustc::dep_graph::DepKind::MetaData);
let def_path_hash = $tcx.def_path_hash(DefId {
krate: $def_id.krate,
index: CRATE_DEF_INDEX
});
let dep_node = def_path_hash
.to_dep_node(::rustc::dep_graph::DepKind::CrateMetadata);
// The DepNodeIndex of the DepNode::CrateMetadata should be
// cached somewhere, so that we can use read_index().
$tcx.dep_graph.read(dep_node);
let $cdata = $tcx.crate_data_as_rc_any($def_id.krate);
......@@ -379,6 +384,16 @@ fn crate_name_untracked(&self, cnum: CrateNum) -> Symbol
self.get_crate_data(cnum).name
}
fn crate_disambiguator_untracked(&self, cnum: CrateNum) -> Symbol
{
self.get_crate_data(cnum).disambiguator()
}
fn crate_hash_untracked(&self, cnum: CrateNum) -> hir::svh::Svh
{
self.get_crate_data(cnum).hash()
}
/// Returns the `DefKey` for a given `DefId`. This indicates the
/// parent `DefId` as well as some idea of what kind of data the
/// `DefId` refers to.
......
......@@ -942,8 +942,7 @@ pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let crate_hash = tcx.dep_graph
.fingerprint_of(&DepNode::new_no_params(DepKind::Krate))
.unwrap();
.fingerprint_of(&DepNode::new_no_params(DepKind::Krate));
let link_meta = link::build_link_meta(crate_hash);
let exported_symbol_node_ids = find_exported_symbols(tcx);
......
......@@ -12,6 +12,8 @@
// revisions:rpass1 rpass2
// compile-flags:-Z query-dep-graph
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
#![feature(rustc_attrs)]
extern crate a;
......
......@@ -15,6 +15,8 @@
// compile-flags: -Z query-dep-graph
// aux-build:point.rs
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
#![feature(rustc_attrs)]
#![feature(stmt_expr_attributes)]
#![allow(dead_code)]
......
......@@ -15,6 +15,8 @@
// compile-flags: -Z query-dep-graph
// aux-build:point.rs
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
#![feature(rustc_attrs)]
#![feature(stmt_expr_attributes)]
#![allow(dead_code)]
......
......@@ -37,7 +37,7 @@ mod y {
#[rustc_clean(label="TypeckTables", cfg="cfail2")]
pub fn y() {
//[cfail2]~^ ERROR `TypeckTables(y::y)` not found in dep graph, but should be clean
//[cfail2]~^ ERROR `TypeckTables(y::y)` should be clean but is not
x::x();
}
}
......@@ -45,6 +45,6 @@ pub fn y() {
mod z {
#[rustc_dirty(label="TypeckTables", cfg="cfail2")]
pub fn z() {
//[cfail2]~^ ERROR `TypeckTables(z::z)` found in dep graph, but should be dirty
//[cfail2]~^ ERROR `TypeckTables(z::z)` should be dirty but is not
}
}
......@@ -143,7 +143,7 @@ enum EnumChangeValueCStyleVariant1 {
#[cfg(not(cfail1))]
#[rustc_dirty(label="Hir", cfg="cfail2")]
#[rustc_clean(label="Hir", cfg="cfail3")]
#[rustc_clean(label="HirBody", cfg="cfail2")]
#[rustc_dirty(label="HirBody", cfg="cfail2")]
#[rustc_clean(label="HirBody", cfg="cfail3")]
#[rustc_metadata_dirty(cfg="cfail2")]
#[rustc_metadata_clean(cfg="cfail3")]
......
......@@ -18,6 +18,8 @@
// no-prefer-dynamic
// compile-flags: -Z query-dep-graph
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
#![feature(rustc_attrs)]
extern crate a;
......
......@@ -27,7 +27,8 @@ pub fn x() {
}
#[cfg(rpass2)]
#[rustc_dirty(label="TypeckTables", cfg="rpass2")]
#[rustc_dirty(label="HirBody", cfg="rpass2")]
#[rustc_dirty(label="MirOptimized", cfg="rpass2")]
pub fn x() {
println!("{}", "2");
}
......@@ -37,6 +38,7 @@ mod y {
use x;
#[rustc_clean(label="TypeckTables", cfg="rpass2")]
#[rustc_clean(label="MirOptimized", cfg="rpass2")]
pub fn y() {
x::x();
}
......@@ -46,6 +48,7 @@ mod z {
use y;
#[rustc_clean(label="TypeckTables", cfg="rpass2")]
#[rustc_clean(label="MirOptimized", cfg="rpass2")]
pub fn z() {
y::y();
}
......
......@@ -12,6 +12,8 @@
// revisions:rpass1 rpass2
// compile-flags: -Z query-dep-graph
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
#![feature(rustc_attrs)]
extern crate a;
......
......@@ -12,6 +12,8 @@
// revisions:rpass1 rpass2 rpass3
// compile-flags: -Z query-dep-graph
// ignore-test -- ignored until red/green restores cross-crate tracking fidelity
#![feature(rustc_attrs)]
extern crate a;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册