graph.rs 46.0 KB
Newer Older
1
use parking_lot::Mutex;
2
use rustc_data_structures::fingerprint::Fingerprint;
3
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
R
Ryan Levick 已提交
4
use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
M
Mark Rousskov 已提交
5 6
use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
C
Camille GILLOT 已提交
7 8 9 10
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
use rustc_index::vec::IndexVec;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
11
use smallvec::{smallvec, SmallVec};
M
Mark Rousskov 已提交
12
use std::collections::hash_map::Entry;
C
Camille GILLOT 已提交
13
use std::fmt::Debug;
14
use std::hash::Hash;
C
Camille GILLOT 已提交
15
use std::marker::PhantomData;
M
Michael Woerister 已提交
16
use std::sync::atomic::Ordering::Relaxed;
17 18

use super::query::DepGraphQuery;
C
Camille GILLOT 已提交
19
use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
20
use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
21
use crate::ich::StableHashingContext;
22
use crate::query::{QueryContext, QuerySideEffects};
23

C
Camille GILLOT 已提交
24 25 26
#[cfg(debug_assertions)]
use {super::debug::EdgeFilter, std::env};

27
#[derive(Clone)]
28 29
pub struct DepGraph<K: DepKind> {
    data: Option<Lrc<DepGraphData<K>>>,
30 31 32

    /// This field is used for assigning DepNodeIndices when running in
    /// non-incremental mode. Even in non-incremental mode we make sure that
33
    /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
34 35
    /// ID is used for self-profiling.
    virtual_dep_node_index: Lrc<AtomicU32>,
R
Ryan Levick 已提交
36 37 38 39

    /// The cached event id for profiling node interning. This saves us
    /// from having to look up the event id every time we intern a node
    /// which may incur too much overhead.
R
Ryan Levick 已提交
40
    /// This will be None if self-profiling is disabled.
R
Ryan Levick 已提交
41
    node_intern_event_id: Option<EventId>,
42 43
}

44
rustc_index::newtype_index! {
45 46
    pub struct DepNodeIndex { .. }
}
47 48

impl DepNodeIndex {
49
    pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
50
    pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
51 52
}

53 54 55
impl std::convert::From<DepNodeIndex> for QueryInvocationId {
    #[inline]
    fn from(dep_node_index: DepNodeIndex) -> Self {
M
Michael Woerister 已提交
56
        QueryInvocationId(dep_node_index.as_u32())
57 58 59
    }
}

60
#[derive(PartialEq)]
61 62
pub enum DepNodeColor {
    Red,
M
Mark Rousskov 已提交
63
    Green(DepNodeIndex),
64 65
}

66 67 68 69 70 71 72 73 74
impl DepNodeColor {
    pub fn is_green(self) -> bool {
        match self {
            DepNodeColor::Red => false,
            DepNodeColor::Green(_) => true,
        }
    }
}

75
struct DepGraphData<K: DepKind> {
76 77 78
    /// The new encoding of the dependency graph, optimized for red/green
    /// tracking. The `current` field is the dependency graph of only the
    /// current compilation session: We don't merge the previous dep-graph into
79
    /// current one anymore, but we do reference shared data to save space.
80
    current: CurrentDepGraph<K>,
81

82 83
    /// The dep-graph from the previous compilation session. It contains all
    /// nodes and edges as well as all fingerprints of nodes that have them.
C
Camille GILLOT 已提交
84
    previous: SerializedDepGraph<K>,
85

86
    colors: DepNodeColorMap,
87

88
    processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
89

A
Alexander Regueiro 已提交
90
    /// When we load, there may be `.o` files, cached MIR, or other such
91 92 93
    /// things available to us. If we find that they are not dirty, we
    /// load the path to the file storing those work-products here into
    /// this map. We can later look for and extract that data.
94
    previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
95

96
    dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
97 98
}

C
Camille GILLOT 已提交
99
pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
100
where
101
    R: for<'a> HashStable<StableHashingContext<'a>>,
102 103 104
{
    let mut stable_hasher = StableHasher::new();
    result.hash_stable(hcx, &mut stable_hasher);
C
Camille GILLOT 已提交
105
    stable_hasher.finish()
106 107
}

108
impl<K: DepKind> DepGraph<K> {
M
Mark Rousskov 已提交
109
    pub fn new(
110
        profiler: &SelfProfilerRef,
C
Camille GILLOT 已提交
111
        prev_graph: SerializedDepGraph<K>,
M
Mark Rousskov 已提交
112
        prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
C
Camille GILLOT 已提交
113 114 115
        encoder: FileEncoder,
        record_graph: bool,
        record_stats: bool,
116
    ) -> DepGraph<K> {
117 118
        let prev_graph_node_count = prev_graph.node_count();

119 120 121
        let current =
            CurrentDepGraph::new(prev_graph_node_count, encoder, record_graph, record_stats);

122
        // Instantiate a dependy-less node only once for anonymous queries.
123 124 125 126 127 128
        let _green_node_index = current.intern_new_node(
            profiler,
            DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
            smallvec![],
            Fingerprint::ZERO,
        );
129
        debug_assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
130

R
Ryan Levick 已提交
131 132 133 134
        let node_intern_event_id = profiler
            .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
            .map(EventId::from_label);

135
        DepGraph {
136
            data: Some(Lrc::new(DepGraphData {
137
                previous_work_products: prev_work_products,
138
                dep_node_debug: Default::default(),
139
                current,
140
                processed_side_effects: Default::default(),
141
                previous: prev_graph,
142
                colors: DepNodeColorMap::new(prev_graph_node_count),
143
            })),
144
            virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
R
Ryan Levick 已提交
145
            node_intern_event_id,
146 147 148
        }
    }

149
    pub fn new_disabled() -> DepGraph<K> {
R
Ryan Levick 已提交
150 151 152 153 154
        DepGraph {
            data: None,
            virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
            node_intern_event_id: None,
        }
155 156
    }

A
Alexander Regueiro 已提交
157
    /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
158 159
    #[inline]
    pub fn is_fully_enabled(&self) -> bool {
160
        self.data.is_some()
161 162
    }

C
Camille GILLOT 已提交
163 164 165
    pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
        if let Some(data) = &self.data {
            data.current.encoder.borrow().with_query(f)
166
        }
167 168
    }

M
Mark Rousskov 已提交
169
    pub fn assert_ignored(&self) {
J
John Kåre Alsaker 已提交
170
        if let Some(..) = self.data {
171 172 173
            K::read_deps(|task_deps| {
                assert!(task_deps.is_none(), "expected no task dependency tracking");
            })
174
        }
175 176
    }

M
Mark Rousskov 已提交
177 178 179
    pub fn with_ignore<OP, R>(&self, op: OP) -> R
    where
        OP: FnOnce() -> R,
180
    {
181
        K::with_deps(None, op)
182 183
    }

184 185 186 187 188 189
    /// Starts a new dep-graph task. Dep-graph tasks are specified
    /// using a free function (`task`) and **not** a closure -- this
    /// is intentional because we want to exercise tight control over
    /// what state they have access to. In particular, we want to
    /// prevent implicit 'leaks' of tracked state into the task (which
    /// could then be read without generating correct edges in the
190
    /// dep-graph -- see the [rustc dev guide] for more details on
M
Malo Jaffré 已提交
191
    /// the dep-graph). To this end, the task function gets exactly two
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
    /// pieces of state: the context `cx` and an argument `arg`. Both
    /// of these bits of state must be of some type that implements
    /// `DepGraphSafe` and hence does not leak.
    ///
    /// The choice of two arguments is not fundamental. One argument
    /// would work just as well, since multiple values can be
    /// collected using tuples. However, using two arguments works out
    /// to be quite convenient, since it is common to need a context
    /// (`cx`) and some argument (e.g., a `DefId` identifying what
    /// item to process).
    ///
    /// For cases where you need some other number of arguments:
    ///
    /// - If you only need one argument, just use `()` for the `arg`
    ///   parameter.
    /// - If you need 3+ arguments, use a tuple for the
    ///   `arg` parameter.
    ///
210
    /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
C
Camille GILLOT 已提交
211
    pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
212
        &self,
213
        key: DepNode<K>,
214
        cx: Ctxt,
215
        arg: A,
216
        task: fn(Ctxt, A) -> R,
C
Camille GILLOT 已提交
217
        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
218
    ) -> (R, DepNodeIndex) {
C
Camille GILLOT 已提交
219 220 221 222 223 224 225 226 227
        if self.is_fully_enabled() {
            self.with_task_impl(key, cx, arg, task, hash_result)
        } else {
            // Incremental compilation is turned off. We just execute the task
            // without tracking. We still provide a dep-node index that uniquely
            // identifies the task so that we have a cheap way of referring to
            // the query for self-profiling.
            (task(cx, arg), self.next_virtual_depnode_index())
        }
J
John Kåre Alsaker 已提交
228 229
    }

C
Camille GILLOT 已提交
230
    fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
J
John Kåre Alsaker 已提交
231
        &self,
232
        key: DepNode<K>,
233
        cx: Ctxt,
J
John Kåre Alsaker 已提交
234
        arg: A,
235
        task: fn(Ctxt, A) -> R,
C
Camille GILLOT 已提交
236
        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
237
    ) -> (R, DepNodeIndex) {
C
Camille GILLOT 已提交
238 239 240 241 242 243 244 245 246 247 248
        // This function is only called when the graph is enabled.
        let data = self.data.as_ref().unwrap();

        // If the following assertion triggers, it can have two reasons:
        // 1. Something is wrong with DepNode creation, either here or
        //    in `DepGraph::try_mark_green()`.
        // 2. Two distinct query keys get mapped to the same `DepNode`
        //    (see for example #48923).
        assert!(
            !self.dep_node_exists(&key),
            "forcing query with already existing `DepNode`\n\
C
Camille GILLOT 已提交
249 250
                 - query-key: {:?}\n\
                 - dep-node: {:?}",
C
Camille GILLOT 已提交
251 252 253
            arg,
            key
        );
C
Camille GILLOT 已提交
254

C
Camille GILLOT 已提交
255
        let task_deps = if cx.dep_context().is_eval_always(key.kind) {
C
Camille GILLOT 已提交
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
            None
        } else {
            Some(Lock::new(TaskDeps {
                #[cfg(debug_assertions)]
                node: Some(key),
                reads: SmallVec::new(),
                read_set: Default::default(),
                phantom_data: PhantomData,
            }))
        };
        let result = K::with_deps(task_deps.as_ref(), || task(cx, arg));
        let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);

        let dcx = cx.dep_context();
        let hashing_timer = dcx.profiler().incr_result_hashing();
C
Camille GILLOT 已提交
271 272 273 274
        let current_fingerprint = hash_result.map(|f| {
            let mut hcx = dcx.create_stable_hashing_context();
            f(&mut hcx, &result)
        });
C
Camille GILLOT 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

        let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks;

        // Get timer for profiling `DepNode` interning
        let node_intern_timer =
            self.node_intern_event_id.map(|eid| dcx.profiler().generic_activity_with_event_id(eid));
        // Intern the new `DepNode`.
        let (dep_node_index, prev_and_color) = data.current.intern_node(
            dcx.profiler(),
            &data.previous,
            key,
            edges,
            current_fingerprint,
            print_status,
        );
        drop(node_intern_timer);
R
Ryan Levick 已提交
291

C
Camille GILLOT 已提交
292
        hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
293

C
Camille GILLOT 已提交
294 295 296 297
        if let Some((prev_index, color)) = prev_and_color {
            debug_assert!(
                data.colors.get(prev_index).is_none(),
                "DepGraph::with_task() - Duplicate DepNodeColor \
M
Mark Rousskov 已提交
298
                            insertion for {:?}",
C
Camille GILLOT 已提交
299 300
                key
            );
301

C
Camille GILLOT 已提交
302
            data.colors.insert(prev_index, color);
303
        }
C
Camille GILLOT 已提交
304 305

        (result, dep_node_index)
306 307
    }

A
Alexander Regueiro 已提交
308 309
    /// Executes something within an "anonymous" task, that is, a task the
    /// `DepNode` of which is determined by the list of inputs it read from.
C
Camille GILLOT 已提交
310 311 312 313 314 315
    pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
        &self,
        cx: Ctxt,
        dep_kind: K,
        op: OP,
    ) -> (R, DepNodeIndex)
M
Mark Rousskov 已提交
316 317
    where
        OP: FnOnce() -> R,
318
    {
C
Camille GILLOT 已提交
319
        debug_assert!(!cx.is_eval_always(dep_kind));
320

321
        if let Some(ref data) = self.data {
322 323 324
            let task_deps = Lock::new(TaskDeps::default());
            let result = K::with_deps(Some(&task_deps), op);
            let task_deps = task_deps.into_inner();
325 326 327 328
            let task_deps = task_deps.reads;

            let dep_node_index = match task_deps.len() {
                0 => {
329 330 331 332 333 334
                    // Because the dep-node id of anon nodes is computed from the sets of its
                    // dependencies we already know what the ID of this dependency-less node is
                    // going to be (i.e. equal to the precomputed
                    // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
                    // a `StableHasher` and sending the node through interning.
                    DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
                }
                1 => {
                    // When there is only one dependency, don't bother creating a node.
                    task_deps[0]
                }
                _ => {
                    // The dep node indices are hashed here instead of hashing the dep nodes of the
                    // dependencies. These indices may refer to different nodes per session, but this isn't
                    // a problem here because we that ensure the final dep node hash is per session only by
                    // combining it with the per session random number `anon_id_seed`. This hash only need
                    // to map the dependencies to a single value on a per session basis.
                    let mut hasher = StableHasher::new();
                    task_deps.hash(&mut hasher);

                    let target_dep_node = DepNode {
                        kind: dep_kind,
                        // Fingerprint::combine() is faster than sending Fingerprint
                        // through the StableHasher (at least as long as StableHasher
                        // is so slow).
                        hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
                    };

                    data.current.intern_new_node(
                        cx.profiler(),
                        target_dep_node,
                        task_deps,
                        Fingerprint::ZERO,
                    )
                }
364 365
            };

366
            (result, dep_node_index)
367
        } else {
368
            (op(), self.next_virtual_depnode_index())
369 370 371
        }
    }

372
    #[inline]
373
    pub fn read_index(&self, dep_node_index: DepNodeIndex) {
374
        if let Some(ref data) = self.data {
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
            K::read_deps(|task_deps| {
                if let Some(task_deps) = task_deps {
                    let mut task_deps = task_deps.lock();
                    let task_deps = &mut *task_deps;
                    if cfg!(debug_assertions) {
                        data.current.total_read_count.fetch_add(1, Relaxed);
                    }

                    // As long as we only have a low number of reads we can avoid doing a hash
                    // insert and potentially allocating/reallocating the hashmap
                    let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
                        task_deps.reads.iter().all(|other| *other != dep_node_index)
                    } else {
                        task_deps.read_set.insert(dep_node_index)
                    };
                    if new_read {
                        task_deps.reads.push(dep_node_index);
                        if task_deps.reads.len() == TASK_DEPS_READS_CAP {
                            // Fill `read_set` with what we have so far so we can use the hashset
                            // next time
                            task_deps.read_set.extend(task_deps.reads.iter().copied());
                        }

                        #[cfg(debug_assertions)]
                        {
                            if let Some(target) = task_deps.node {
                                if let Some(ref forbidden_edge) = data.current.forbidden_edge {
C
Camille GILLOT 已提交
402
                                    let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
403 404 405 406 407 408 409 410 411 412 413
                                    if forbidden_edge.test(&src, &target) {
                                        panic!("forbidden edge {:?} -> {:?} created", src, target)
                                    }
                                }
                            }
                        }
                    } else if cfg!(debug_assertions) {
                        data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
                    }
                }
            })
414
        }
415 416
    }

417
    #[inline]
418 419
    pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
        self.dep_node_index_of_opt(dep_node).unwrap()
420 421
    }

422
    #[inline]
423 424 425 426 427 428 429 430 431
    pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
        let data = self.data.as_ref().unwrap();
        let current = &data.current;

        if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
            current.prev_index_to_index.lock()[prev_index]
        } else {
            current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
        }
432 433
    }

434
    #[inline]
435
    pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
436 437 438
        self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
    }

439
    pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
440
        self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
441 442
    }

A
Alexander Regueiro 已提交
443
    /// Checks whether a previous work product exists for `v` and, if
444
    /// so, return the path that leads to it. Used to skip doing work.
445
    pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
M
Mark Rousskov 已提交
446
        self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
447 448
    }

449 450
    /// Access the map of work-products created during the cached run. Only
    /// used during saving of the dep-graph.
451 452
    pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
        &self.data.as_ref().unwrap().previous_work_products
453
    }
454 455

    #[inline(always)]
456
    pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
M
Mark Rousskov 已提交
457 458
    where
        F: FnOnce() -> String,
459
    {
460
        let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
461

462
        if dep_node_debug.borrow().contains_key(&dep_node) {
M
Mark Rousskov 已提交
463
            return;
464 465 466
        }
        let debug_str = debug_str_gen();
        dep_node_debug.borrow_mut().insert(dep_node, debug_str);
467 468
    }

469
    pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
M
Mark Rousskov 已提交
470
        self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
471
    }
472

C
Camille GILLOT 已提交
473
    fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
474 475
        if let Some(ref data) = self.data {
            if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
M
Mark Rousskov 已提交
476
                return data.colors.get(prev_index);
477
            } else {
C
Camille GILLOT 已提交
478 479
                // This is a node that did not exist in the previous compilation session.
                return None;
480 481 482 483
            }
        }

        None
484 485
    }

C
Camille GILLOT 已提交
486 487
    /// Try to mark a node index for the node dep_node.
    ///
488 489 490
    /// A node will have an index, when it's already been marked green, or when we can mark it
    /// green. This function will mark the current task as a reader of the specified node, when
    /// a node index can be found for that node.
491
    pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
492
        &self,
493 494
        tcx: Ctxt,
        dep_node: &DepNode<K>,
495
    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
C
Camille GILLOT 已提交
496
        debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
497

498 499 500 501 502 503 504 505 506
        // Return None if the dep graph is disabled
        let data = self.data.as_ref()?;

        // Return None if the dep node didn't exist in the previous session
        let prev_index = data.previous.node_to_index_opt(dep_node)?;

        match data.colors.get(prev_index) {
            Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
            Some(DepNodeColor::Red) => None,
507
            None => {
J
John Kåre Alsaker 已提交
508 509 510 511
                // This DepNode and the corresponding query invocation existed
                // in the previous compilation session too, so we can try to
                // mark it as green by recursively marking all of its
                // dependencies green.
M
Mark Rousskov 已提交
512 513
                self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
                    .map(|dep_node_index| (prev_index, dep_node_index))
514
            }
515 516
        }
    }
517

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
    fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
        &self,
        tcx: Ctxt,
        data: &DepGraphData<K>,
        parent_dep_node_index: SerializedDepNodeIndex,
        dep_node: &DepNode<K>,
    ) -> Option<()> {
        let dep_dep_node_color = data.colors.get(parent_dep_node_index);
        let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);

        match dep_dep_node_color {
            Some(DepNodeColor::Green(_)) => {
                // This dependency has been marked as green before, we are
                // still fine and can continue with checking the other
                // dependencies.
                debug!(
                    "try_mark_previous_green({:?}) --- found dependency {:?} to \
                            be immediately green",
                    dep_node, dep_dep_node,
                );
                return Some(());
            }
            Some(DepNodeColor::Red) => {
                // We found a dependency the value of which has changed
                // compared to the previous compilation session. We cannot
                // mark the DepNode as green and also don't need to bother
                // with checking any of the other dependencies.
                debug!(
                    "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red",
                    dep_node, dep_dep_node,
                );
                return None;
            }
            None => {}
        }

        // We don't know the state of this dependency. If it isn't
        // an eval_always node, let's try to mark it green recursively.
C
Camille GILLOT 已提交
556
        if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
            debug!(
                "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
                                 is unknown, trying to mark it green",
                dep_node, dep_dep_node, dep_dep_node.hash,
            );

            let node_index =
                self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node);
            if node_index.is_some() {
                debug!(
                    "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green",
                    dep_node, dep_dep_node
                );
                return Some(());
            }
        }

        // We failed to mark it green, so we try to force the query.
        debug!(
            "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
            dep_node, dep_dep_node
        );
        if !tcx.try_force_from_dep_node(dep_dep_node) {
            // The DepNode could not be forced.
            debug!(
                "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
                dep_node, dep_dep_node
            );
            return None;
        }

        let dep_dep_node_color = data.colors.get(parent_dep_node_index);

        match dep_dep_node_color {
            Some(DepNodeColor::Green(_)) => {
                debug!(
                    "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green",
                    dep_node, dep_dep_node
                );
                return Some(());
            }
            Some(DepNodeColor::Red) => {
                debug!(
                    "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing",
                    dep_node, dep_dep_node
                );
                return None;
            }
            None => {}
        }

        if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
            panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
        }

        // If the query we just forced has resulted in
        // some kind of compilation error, we cannot rely on
        // the dep-node color having been properly updated.
        // This means that the query system has reached an
        // invalid state. We let the compiler continue (by
        // returning `None`) so it can emit error messages
        // and wind down, but rely on the fact that this
        // invalid state will not be persisted to the
        // incremental compilation cache because of
        // compilation errors being present.
        debug!(
            "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error",
            dep_node, dep_dep_node
        );
        return None;
    }

A
Alexander Regueiro 已提交
629
    /// Try to mark a dep-node which existed in the previous compilation session as green.
630
    fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
631
        &self,
632 633
        tcx: Ctxt,
        data: &DepGraphData<K>,
634
        prev_dep_node_index: SerializedDepNodeIndex,
635
        dep_node: &DepNode<K>,
636 637 638
    ) -> Option<DepNodeIndex> {
        debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);

639
        #[cfg(not(parallel_compiler))]
640
        {
641
            debug_assert!(!self.dep_node_exists(dep_node));
642 643 644
            debug_assert!(data.colors.get(prev_dep_node_index).is_none());
        }

645
        // We never try to mark eval_always nodes as green
C
Camille GILLOT 已提交
646
        debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
647

648
        debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
649 650

        let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
651

652
        for &dep_dep_node_index in prev_deps {
653
            self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)?
654 655 656 657
        }

        // If we got here without hitting a `return` that means that all
        // dependencies of this DepNode could be marked as green. Therefore we
J
John Kåre Alsaker 已提交
658
        // can also mark this DepNode as green.
659

J
John Kåre Alsaker 已提交
660 661
        // There may be multiple threads trying to mark the same dep node green concurrently

C
Camille GILLOT 已提交
662 663
        // We allocating an entry for the node in the current dependency graph and
        // adding all the appropriate edges imported from the previous graph
C
Camille GILLOT 已提交
664 665 666 667 668
        let dep_node_index = data.current.promote_node_and_deps_to_current(
            tcx.dep_context().profiler(),
            &data.previous,
            prev_dep_node_index,
        );
669

670
        // ... emitting any stored diagnostic ...
J
John Kåre Alsaker 已提交
671

672 673
        // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
        // Maybe store a list on disk and encode this fact in the DepNodeState
674
        let side_effects = tcx.load_side_effects(prev_dep_node_index);
675 676

        #[cfg(not(parallel_compiler))]
M
Mark Rousskov 已提交
677 678 679 680 681 682
        debug_assert!(
            data.colors.get(prev_dep_node_index).is_none(),
            "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
                      insertion for {:?}",
            dep_node
        );
683

684 685
        if unlikely!(!side_effects.is_empty()) {
            self.emit_side_effects(tcx, data, dep_node_index, side_effects);
686 687
        }

688
        // ... and finally storing a "Green" entry in the color map.
J
John Kåre Alsaker 已提交
689
        // Multiple threads can all write the same color here
690
        data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
691

692
        debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
693
        Some(dep_node_index)
694 695
    }

696 697
    /// Atomically emits some loaded diagnostics.
    /// This may be called concurrently on multiple threads for the same dep node.
698 699
    #[cold]
    #[inline(never)]
700
    fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
701
        &self,
702 703
        tcx: Ctxt,
        data: &DepGraphData<K>,
704
        dep_node_index: DepNodeIndex,
705
        side_effects: QuerySideEffects,
706
    ) {
707
        let mut processed = data.processed_side_effects.lock();
708

709
        if processed.insert(dep_node_index) {
710
            // We were the first to insert the node in the set so this thread
711
            // must process side effects
712 713

            // Promote the previous diagnostics to the current session.
714
            tcx.store_side_effects(dep_node_index, side_effects.clone());
715

716
            let handle = tcx.dep_context().sess().diagnostic();
717

718
            for diagnostic in side_effects.diagnostics {
719
                handle.emit_diagnostic(&diagnostic);
720 721 722 723
            }
        }
    }

C
Camille GILLOT 已提交
724
    // Returns true if the given node has been marked as red during the
C
Camille GILLOT 已提交
725 726 727 728 729
    // current compilation session. Used in various assertions
    pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
        self.node_color(dep_node) == Some(DepNodeColor::Red)
    }

730 731
    // Returns true if the given node has been marked as green during the
    // current compilation session. Used in various assertions
732
    pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
733
        self.node_color(dep_node).map_or(false, |c| c.is_green())
734
    }
735

736 737 738 739 740 741 742 743
    // This method loads all on-disk cacheable query results into memory, so
    // they can be written out to the new cache file again. Most query results
    // will already be in memory but in the case where we marked something as
    // green but then did not need the value, that value will never have been
    // loaded from disk.
    //
    // This method will only load queries that will end up in the disk cache.
    // Other queries will not be executed.
744 745
    pub fn exec_cache_promotions<Ctxt: QueryContext<DepKind = K>>(&self, qcx: Ctxt) {
        let tcx = qcx.dep_context();
746
        let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
747

J
John Kåre Alsaker 已提交
748 749 750 751
        let data = self.data.as_ref().unwrap();
        for prev_index in data.colors.values.indices() {
            match data.colors.get(prev_index) {
                Some(DepNodeColor::Green(_)) => {
752
                    let dep_node = data.previous.index_to_node(prev_index);
753
                    qcx.try_load_from_on_disk_cache(&dep_node);
754
                }
M
Mark Rousskov 已提交
755
                None | Some(DepNodeColor::Red) => {
J
John Kåre Alsaker 已提交
756 757 758 759 760
                    // We can skip red nodes because a node can only be marked
                    // as red if the query result was recomputed and thus is
                    // already in memory.
                }
            }
761 762
        }
    }
763

764
    pub fn print_incremental_info(&self) {
C
Camille GILLOT 已提交
765 766 767 768 769
        if let Some(data) = &self.data {
            data.current.encoder.borrow().print_incremental_info(
                data.current.total_read_count.load(Relaxed),
                data.current.total_duplicate_read_count.load(Relaxed),
            )
770
        }
C
Camille GILLOT 已提交
771
    }
772

773 774 775 776 777 778
    pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
        if let Some(data) = &self.data {
            data.current.encoder.steal().finish(profiler)
        } else {
            Ok(())
        }
779 780
    }

C
Camille GILLOT 已提交
781
    pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
782 783 784
        let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
        DepNodeIndex::from_u32(index)
    }
785 786 787 788 789 790 791 792 793 794
}

/// A "work product" is an intermediate result that we save into the
/// incremental directory for later re-use. The primary example are
/// the object files that we save for each partition at code
/// generation time.
///
/// Each work product is associated with a dep-node, representing the
/// process that produced the work-product. If that dep-node is found
/// to be dirty when we load up, then we will delete the work-product
N
Niko Matsakis 已提交
795
/// at load time. If the work-product is found to be clean, then we
796 797 798 799 800 801 802 803 804 805 806
/// will keep a record in the `previous_work_products` list.
///
/// In addition, work products have an associated hash. This hash is
/// an extra hash that can be used to decide if the work-product from
/// a previous compilation can be re-used (in addition to the dirty
/// edges check).
///
/// As the primary example, consider the object files we generate for
/// each partition. In the first run, we create partitions based on
/// the symbols that need to be compiled. For each partition P, we
/// hash the symbols in P and create a `WorkProduct` record associated
I
Irina Popa 已提交
807
/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
808 809
/// in P.
///
I
Irina Popa 已提交
810
/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
811 812 813 814 815 816 817
/// judged to be clean (which means none of the things we read to
/// generate the partition were found to be dirty), it will be loaded
/// into previous work products. We will then regenerate the set of
/// symbols in the partition P and hash them (note that new symbols
/// may be added -- for example, new monomorphizations -- even if
/// nothing in P changed!). We will compare that hash against the
/// previous hash. If it matches up, we can reuse the object file.
M
Matthew Jasper 已提交
818
#[derive(Clone, Debug, Encodable, Decodable)]
819
pub struct WorkProduct {
820
    pub cgu_name: String,
821 822
    /// Saved file associated with this CGU.
    pub saved_file: Option<String>,
823
}
824

825 826 827 828 829
// Index type for `DepNodeData`'s edges.
rustc_index::newtype_index! {
    struct EdgeIndex { .. }
}

830 831 832
/// `CurrentDepGraph` stores the dependency graph for the current session. It
/// will be populated as we run queries or tasks. We never remove nodes from the
/// graph: they are only added.
833
///
C
Camille GILLOT 已提交
834 835 836
/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
/// in memory.  This is important, because these graph structures are some of the
/// largest in the compiler.
837
///
C
Camille GILLOT 已提交
838
/// For this reason, we avoid storing `DepNode`s more than once as map
839 840
/// keys. The `new_node_to_index` map only contains nodes not in the previous
/// graph, and we map nodes in the previous graph to indices via a two-step
C
Camille GILLOT 已提交
841
/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
842 843
/// and the `prev_index_to_index` vector (which is more compact and faster than
/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
844
///
845 846 847
/// This struct uses three locks internally. The `data`, `new_node_to_index`,
/// and `prev_index_to_index` fields are locked separately. Operations that take
/// a `DepNodeIndex` typically just access the `data` field.
848
///
849
/// We only need to manipulate at most two locks simultaneously:
850 851 852
/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
/// first, and `data` second.
C
Camille GILLOT 已提交
853 854
pub(super) struct CurrentDepGraph<K: DepKind> {
    encoder: Steal<GraphEncoder<K>>,
855 856
    new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
    prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
857 858 859

    /// Used to trap when a specific edge is added to the graph.
    /// This is used for debug purposes and is only active with `debug_assertions`.
C
Camille GILLOT 已提交
860 861
    #[cfg(debug_assertions)]
    forbidden_edge: Option<EdgeFilter<K>>,
862

A
Alexander Regueiro 已提交
863 864 865 866 867 868 869 870 871 872 873
    /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
    /// their edges. This has the beneficial side-effect that multiple anonymous
    /// nodes can be coalesced into one without changing the semantics of the
    /// dependency graph. However, the merging of nodes can lead to a subtle
    /// problem during red-green marking: The color of an anonymous node from
    /// the current session might "shadow" the color of the node with the same
    /// ID from the previous session. In order to side-step this problem, we make
    /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
    /// This is implemented by mixing a session-key into the ID fingerprint of
    /// each anon node. The session-key is just a random number generated when
    /// the `DepGraph` is created.
874
    anon_id_seed: Fingerprint,
875

876 877
    /// These are simple counters that are for profiling and
    /// debugging and only active with `debug_assertions`.
878 879
    total_read_count: AtomicU64,
    total_duplicate_read_count: AtomicU64,
880 881
}

882
impl<K: DepKind> CurrentDepGraph<K> {
C
Camille GILLOT 已提交
883 884 885 886 887 888
    fn new(
        prev_graph_node_count: usize,
        encoder: FileEncoder,
        record_graph: bool,
        record_stats: bool,
    ) -> CurrentDepGraph<K> {
889 890 891
        use std::time::{SystemTime, UNIX_EPOCH};

        let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
M
Mark Rousskov 已提交
892
        let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
893 894 895
        let mut stable_hasher = StableHasher::new();
        nanos.hash(&mut stable_hasher);

C
Camille GILLOT 已提交
896 897 898 899 900 901 902
        #[cfg(debug_assertions)]
        let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
            Ok(s) => match EdgeFilter::new(&s) {
                Ok(f) => Some(f),
                Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
            },
            Err(_) => None,
903 904
        };

905 906 907 908
        // We store a large collection of these in `prev_index_to_index` during
        // non-full incremental builds, and want to ensure that the element size
        // doesn't inadvertently increase.
        static_assert_size!(Option<DepNodeIndex>, 4);
909

C
Camille GILLOT 已提交
910 911
        let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;

912
        CurrentDepGraph {
C
Camille GILLOT 已提交
913 914 915 916 917 918
            encoder: Steal::new(GraphEncoder::new(
                encoder,
                prev_graph_node_count,
                record_graph,
                record_stats,
            )),
919
            new_node_to_index: Sharded::new(|| {
M
Mark Rousskov 已提交
920 921 922 923 924
                FxHashMap::with_capacity_and_hasher(
                    new_node_count_estimate / sharded::SHARDS,
                    Default::default(),
                )
            }),
925
            prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
926
            anon_id_seed: stable_hasher.finish(),
C
Camille GILLOT 已提交
927
            #[cfg(debug_assertions)]
928
            forbidden_edge,
929 930
            total_read_count: AtomicU64::new(0),
            total_duplicate_read_count: AtomicU64::new(0),
931 932 933
        }
    }

C
Camille GILLOT 已提交
934 935 936 937 938 939 940 941 942
    #[cfg(debug_assertions)]
    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
        if let Some(forbidden_edge) = &self.forbidden_edge {
            forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
        }
    }

    /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
    /// Assumes that this is a node that has no equivalent in the previous dep-graph.
943
    fn intern_new_node(
944
        &self,
C
Camille GILLOT 已提交
945
        profiler: &SelfProfilerRef,
C
Camille GILLOT 已提交
946
        key: DepNode<K>,
947
        edges: EdgesVec,
C
Camille GILLOT 已提交
948
        current_fingerprint: Fingerprint,
949
    ) -> DepNodeIndex {
C
Camille GILLOT 已提交
950
        match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
951 952
            Entry::Occupied(entry) => *entry.get(),
            Entry::Vacant(entry) => {
C
Camille GILLOT 已提交
953 954
                let dep_node_index =
                    self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
955
                entry.insert(dep_node_index);
C
Camille GILLOT 已提交
956
                #[cfg(debug_assertions)]
C
Camille GILLOT 已提交
957
                self.record_edge(dep_node_index, key);
958 959 960
                dep_node_index
            }
        }
961 962
    }

C
Camille GILLOT 已提交
963
    fn intern_node(
964
        &self,
C
Camille GILLOT 已提交
965
        profiler: &SelfProfilerRef,
C
Camille GILLOT 已提交
966
        prev_graph: &SerializedDepGraph<K>,
C
Camille GILLOT 已提交
967
        key: DepNode<K>,
968
        edges: EdgesVec,
C
Camille GILLOT 已提交
969
        fingerprint: Option<Fingerprint>,
C
Camille GILLOT 已提交
970 971 972 973 974 975
        print_status: bool,
    ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
        let print_status = cfg!(debug_assertions) && print_status;

        if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
            // Determine the color and index of the new `DepNode`.
C
Camille GILLOT 已提交
976 977
            if let Some(fingerprint) = fingerprint {
                if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
C
Camille GILLOT 已提交
978 979 980
                    if print_status {
                        eprintln!("[task::green] {:?}", key);
                    }
981

C
Camille GILLOT 已提交
982
                    // This is a green node: it existed in the previous compilation,
C
Camille GILLOT 已提交
983 984 985 986 987 988 989
                    // its query was re-executed, and it has the same result as before.
                    let mut prev_index_to_index = self.prev_index_to_index.lock();

                    let dep_node_index = match prev_index_to_index[prev_index] {
                        Some(dep_node_index) => dep_node_index,
                        None => {
                            let dep_node_index =
C
Camille GILLOT 已提交
990
                                self.encoder.borrow().send(profiler, key, fingerprint, edges);
C
Camille GILLOT 已提交
991 992 993 994
                            prev_index_to_index[prev_index] = Some(dep_node_index);
                            dep_node_index
                        }
                    };
995

C
Camille GILLOT 已提交
996
                    #[cfg(debug_assertions)]
C
Camille GILLOT 已提交
997
                    self.record_edge(dep_node_index, key);
C
Camille GILLOT 已提交
998 999 1000 1001 1002
                    (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
                } else {
                    if print_status {
                        eprintln!("[task::red] {:?}", key);
                    }
1003

C
Camille GILLOT 已提交
1004 1005 1006 1007 1008 1009 1010 1011
                    // This is a red node: it existed in the previous compilation, its query
                    // was re-executed, but it has a different result from before.
                    let mut prev_index_to_index = self.prev_index_to_index.lock();

                    let dep_node_index = match prev_index_to_index[prev_index] {
                        Some(dep_node_index) => dep_node_index,
                        None => {
                            let dep_node_index =
C
Camille GILLOT 已提交
1012
                                self.encoder.borrow().send(profiler, key, fingerprint, edges);
C
Camille GILLOT 已提交
1013 1014 1015 1016
                            prev_index_to_index[prev_index] = Some(dep_node_index);
                            dep_node_index
                        }
                    };
1017

C
Camille GILLOT 已提交
1018
                    #[cfg(debug_assertions)]
C
Camille GILLOT 已提交
1019
                    self.record_edge(dep_node_index, key);
C
Camille GILLOT 已提交
1020 1021 1022 1023 1024 1025
                    (dep_node_index, Some((prev_index, DepNodeColor::Red)))
                }
            } else {
                if print_status {
                    eprintln!("[task::unknown] {:?}", key);
                }
1026

C
Camille GILLOT 已提交
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
                // This is a red node, effectively: it existed in the previous compilation
                // session, its query was re-executed, but it doesn't compute a result hash
                // (i.e. it represents a `no_hash` query), so we have no way of determining
                // whether or not the result was the same as before.
                let mut prev_index_to_index = self.prev_index_to_index.lock();

                let dep_node_index = match prev_index_to_index[prev_index] {
                    Some(dep_node_index) => dep_node_index,
                    None => {
                        let dep_node_index =
C
Camille GILLOT 已提交
1037
                            self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
C
Camille GILLOT 已提交
1038 1039 1040 1041 1042 1043
                        prev_index_to_index[prev_index] = Some(dep_node_index);
                        dep_node_index
                    }
                };

                #[cfg(debug_assertions)]
C
Camille GILLOT 已提交
1044
                self.record_edge(dep_node_index, key);
C
Camille GILLOT 已提交
1045
                (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1046
            }
C
Camille GILLOT 已提交
1047 1048 1049 1050 1051
        } else {
            if print_status {
                eprintln!("[task::new] {:?}", key);
            }

C
Camille GILLOT 已提交
1052
            let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
C
Camille GILLOT 已提交
1053 1054

            // This is a new node: it didn't exist in the previous compilation session.
C
Camille GILLOT 已提交
1055
            let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
C
Camille GILLOT 已提交
1056 1057

            (dep_node_index, None)
1058 1059 1060
        }
    }

C
Camille GILLOT 已提交
1061
    fn promote_node_and_deps_to_current(
1062
        &self,
C
Camille GILLOT 已提交
1063
        profiler: &SelfProfilerRef,
C
Camille GILLOT 已提交
1064
        prev_graph: &SerializedDepGraph<K>,
1065 1066 1067 1068 1069 1070 1071 1072 1073
        prev_index: SerializedDepNodeIndex,
    ) -> DepNodeIndex {
        self.debug_assert_not_in_new_nodes(prev_graph, prev_index);

        let mut prev_index_to_index = self.prev_index_to_index.lock();

        match prev_index_to_index[prev_index] {
            Some(dep_node_index) => dep_node_index,
            None => {
C
Camille GILLOT 已提交
1074 1075
                let key = prev_graph.index_to_node(prev_index);
                let dep_node_index = self.encoder.borrow().send(
C
Camille GILLOT 已提交
1076
                    profiler,
C
Camille GILLOT 已提交
1077 1078 1079 1080 1081 1082 1083 1084
                    key,
                    prev_graph.fingerprint_by_index(prev_index),
                    prev_graph
                        .edge_targets_from(prev_index)
                        .iter()
                        .map(|i| prev_index_to_index[*i].unwrap())
                        .collect(),
                );
1085
                prev_index_to_index[prev_index] = Some(dep_node_index);
C
Camille GILLOT 已提交
1086
                #[cfg(debug_assertions)]
C
Camille GILLOT 已提交
1087
                self.record_edge(dep_node_index, key);
1088
                dep_node_index
1089
            }
1090 1091 1092 1093 1094 1095
        }
    }

    #[inline]
    fn debug_assert_not_in_new_nodes(
        &self,
C
Camille GILLOT 已提交
1096
        prev_graph: &SerializedDepGraph<K>,
1097 1098 1099 1100 1101 1102 1103
        prev_index: SerializedDepNodeIndex,
    ) {
        let node = &prev_graph.index_to_node(prev_index);
        debug_assert!(
            !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
            "node from previous graph present in new node collection"
        );
1104
    }
J
John Kåre Alsaker 已提交
1105 1106
}

1107
/// The capacity of the `reads` field `SmallVec`
1108
const TASK_DEPS_READS_CAP: usize = 8;
1109
type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
1110 1111

pub struct TaskDeps<K> {
1112
    #[cfg(debug_assertions)]
1113
    node: Option<DepNode<K>>,
1114
    reads: EdgesVec,
J
John Kåre Alsaker 已提交
1115
    read_set: FxHashSet<DepNodeIndex>,
C
Camille GILLOT 已提交
1116
    phantom_data: PhantomData<DepNode<K>>,
1117 1118 1119 1120 1121 1122 1123 1124 1125
}

impl<K> Default for TaskDeps<K> {
    fn default() -> Self {
        Self {
            #[cfg(debug_assertions)]
            node: None,
            reads: EdgesVec::new(),
            read_set: FxHashSet::default(),
C
Camille GILLOT 已提交
1126
            phantom_data: PhantomData,
1127 1128
        }
    }
J
John Kåre Alsaker 已提交
1129 1130
}

1131 1132 1133
// A data structure that stores Option<DepNodeColor> values as a contiguous
// array, using one u32 per entry.
struct DepNodeColorMap {
1134
    values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1135 1136 1137 1138 1139 1140 1141 1142
}

const COMPRESSED_NONE: u32 = 0;
const COMPRESSED_RED: u32 = 1;
const COMPRESSED_FIRST_GREEN: u32 = 2;

impl DepNodeColorMap {
    fn new(size: usize) -> DepNodeColorMap {
M
Mark Rousskov 已提交
1143
        DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1144 1145
    }

1146
    #[inline]
1147
    fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1148
        match self.values[index].load(Ordering::Acquire) {
1149 1150
            COMPRESSED_NONE => None,
            COMPRESSED_RED => Some(DepNodeColor::Red),
M
Mark Rousskov 已提交
1151 1152 1153
            value => {
                Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
            }
1154 1155 1156
        }
    }

1157
    fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
M
Mark Rousskov 已提交
1158 1159 1160 1161 1162 1163 1164
        self.values[index].store(
            match color {
                DepNodeColor::Red => COMPRESSED_RED,
                DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
            },
            Ordering::Release,
        )
1165 1166
    }
}