未验证 提交 e87464c0 编写于 作者: H Heejae Chang 提交者: GitHub

fixed a race where solution cralwer used wrong solution for it to pro… (#31291)

* fixed a race where solution cralwer used wrong solution for it to process.

race is this.

1. solution crawler picked up a solution
2. before processing the solution, workitem got changed
3. and then picked up an work item from the queue
4. and use the work item with the solution that got picked up in step 1

step 2 is happening beacuse solution has changed, but step 4 used old solution from step 1
that doesn't have effects of the solution changes.

solution crawler must pick up solution after it has picked up work item from the queue so that new changes of a solution get enqueued as new work item,

* Update src/Features/Core/Portable/SolutionCrawler/WorkCoordinator.NormalPriorityProcessor.cs

thank you
Co-Authored-By: Nheejaechang <hechang@microsoft.com>

* addressing feedback
上级 f50e6b31
......@@ -13,7 +13,6 @@
using Microsoft.CodeAnalysis.Remote;
using Microsoft.CodeAnalysis.Shared.Extensions;
using Microsoft.CodeAnalysis.Shared.TestHooks;
using Microsoft.CodeAnalysis.Versions;
using Roslyn.Utilities;
namespace Microsoft.CodeAnalysis.SolutionCrawler
......@@ -32,9 +31,13 @@ private sealed class NormalPriorityProcessor : AbstractPriorityProcessor
private readonly ConcurrentDictionary<DocumentId, IDisposable> _higherPriorityDocumentsNotProcessed;
private ProjectId _currentProjectProcessing;
private Solution _processingSolution;
private IDisposable _projectCache;
// this is only used in ResetState to find out solution has changed
// and reset some states such as logging some telemetry or
// priorities active,visible, opened files and etc
private Solution _lastSolution = null;
// whether this processor is running or not
private Task _running;
......@@ -52,7 +55,6 @@ private sealed class NormalPriorityProcessor : AbstractPriorityProcessor
_higherPriorityDocumentsNotProcessed = new ConcurrentDictionary<DocumentId, IDisposable>(concurrencyLevel: 2, capacity: 20);
_currentProjectProcessing = default;
_processingSolution = null;
Start();
}
......@@ -309,12 +311,31 @@ private async Task ProcessDocumentAsync(ImmutableArray<IIncrementalAnalyzer> ana
var processedEverything = false;
var documentId = workItem.DocumentId;
// we should always use solution snapshot after workitem is removed from the queue.
// otherwise, we can have a race such as below.
//
// 1.solution crawler picked up a solution
// 2.before processing the solution, an workitem got changed
// 3.and then the work item got picked up from the queue
// 4.and use the work item with the solution that got picked up in step 1
//
// step 2 is happening because solution has changed, but step 4 used old solution from step 1
// that doesn't have effects of the solution changes.
//
// solution crawler must remove the work item from the queue first and then pick up the soluton,
// so that the queue gets new work item if there is any solution changes after the work item is removed
// from the queue
//
// using later version of solution is always fine since, as long as there is new work item in the queue,
// solution crawler will eventually call the last workitem with the lastest solution
// making everything to catch up
var solution = this.Processor.CurrentSolution;
try
{
using (Logger.LogBlock(FunctionId.WorkCoordinator_ProcessDocumentAsync, w => w.ToString(), workItem, source.Token))
{
var cancellationToken = source.Token;
var document = _processingSolution.GetDocument(documentId);
var document = solution.GetDocument(documentId);
if (document != null)
{
......@@ -441,40 +462,16 @@ private static void RemoveDocument(ImmutableArray<IIncrementalAnalyzer> analyzer
}
}
private void ResetLogAggregatorIfNeeded(Solution currentSolution)
{
if (currentSolution == null || _processingSolution == null ||
currentSolution.Id == _processingSolution.Id)
{
return;
}
SolutionCrawlerLogger.LogIncrementalAnalyzerProcessorStatistics(
this.Processor._registration.CorrelationId, _processingSolution, this.Processor._logAggregator, this.Analyzers);
this.Processor.ResetLogAggregator();
}
private async Task ResetStatesAsync()
{
try
{
var currentSolution = this.Processor.CurrentSolution;
if (currentSolution == _processingSolution)
if (!IsSolutionChanged())
{
return;
}
// solution has changed
ResetLogAggregatorIfNeeded(currentSolution);
_processingSolution = currentSolution;
// synchronize new solution to OOP
await currentSolution.Workspace.SynchronizePrimaryWorkspaceAsync(currentSolution, this.CancellationToken).ConfigureAwait(false);
await RunAnalyzersAsync(this.Analyzers, currentSolution, (a, s, c) => a.NewSolutionSnapshotAsync(s, c), this.CancellationToken).ConfigureAwait(false);
await RunAnalyzersAsync(this.Analyzers, this.Processor.CurrentSolution, (a, s, c) => a.NewSolutionSnapshotAsync(s, c), this.CancellationToken).ConfigureAwait(false);
foreach (var id in this.Processor.GetOpenDocumentIds())
{
......@@ -487,13 +484,50 @@ private async Task ResetStatesAsync()
{
throw ExceptionUtilities.Unreachable;
}
bool IsSolutionChanged()
{
var currentSolution = this.Processor.CurrentSolution;
var oldSolution = _lastSolution;
if (currentSolution == oldSolution)
{
return false;
}
_lastSolution = currentSolution;
ResetLogAggregatorIfNeeded(currentSolution, oldSolution);
return true;
}
void ResetLogAggregatorIfNeeded(Solution currentSolution, Solution oldSolution)
{
if (currentSolution == null || oldSolution == null ||
currentSolution.Id == oldSolution.Id)
{
// we log aggregated info when solution is changed such as
// new solution is opened or solution is closed
return;
}
// this log things like how many time we analyzed active files, how many times other files are analyzed,
// avg time to analyze files, how many solution snapshot got analyzed and etc.
// all accumultation is done in VS side and we only send statistics to VS telemetry otherwise, it is too much
// data to send
SolutionCrawlerLogger.LogIncrementalAnalyzerProcessorStatistics(
this.Processor._registration.CorrelationId, oldSolution, this.Processor._logAggregator, this.Analyzers);
this.Processor.ResetLogAggregator();
}
}
public override void Shutdown()
{
base.Shutdown();
SolutionCrawlerLogger.LogIncrementalAnalyzerProcessorStatistics(this.Processor._registration.CorrelationId, _processingSolution, this.Processor._logAggregator, this.Analyzers);
SolutionCrawlerLogger.LogIncrementalAnalyzerProcessorStatistics(this.Processor._registration.CorrelationId, this.Processor.CurrentSolution, this.Processor._logAggregator, this.Analyzers);
_workItemQueue.Dispose();
......@@ -508,7 +542,6 @@ internal void WaitUntilCompletion_ForTestingPurposesOnly(ImmutableArray<IIncreme
{
CancellationTokenSource source = new CancellationTokenSource();
_processingSolution = this.Processor.CurrentSolution;
foreach (var item in items)
{
ProcessDocumentAsync(analyzers, item, source).Wait();
......
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System.Composition;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.CodeAnalysis;
using Microsoft.CodeAnalysis.Remote;
using Microsoft.CodeAnalysis.SolutionCrawler;
namespace Microsoft.VisualStudio.LanguageServices.Remote
{
[ExportIncrementalAnalyzerProvider(nameof(RemoteSolutionPopulatorProvider), workspaceKinds: new[] { WorkspaceKind.Host }), Shared]
internal class RemoteSolutionPopulatorProvider : IIncrementalAnalyzerProvider
{
public IIncrementalAnalyzer CreateIncrementalAnalyzer(Workspace workspace)
{
return new RemoteSolutionPopulator();
}
private class RemoteSolutionPopulator : IncrementalAnalyzerBase
{
public override Task NewSolutionSnapshotAsync(Solution solution, CancellationToken cancellationToken)
{
// this pushes new solution to remote host so that remote host can have new solution get cached before
// anyone actually asking for it. this is for performance rather than functionality.
// since remote host such as Roslyn OOP supports pull mode, any missing data will be automatically pulled to
// remote host when requested if it is not already available in OOP. but by having this, most likely, when
// a feature requests the solution, the solution already exists in OOP due to this pre-emptively pushing the solution
// to OOP. if it already exists in OOP, it will become no-op.
return solution.Workspace.SynchronizePrimaryWorkspaceAsync(solution, cancellationToken);
}
}
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册