提交 c751c0cd 编写于 作者: R Rob Lourens 提交者: GitHub

Merge pull request #16286 from roblourens/roblou/parallelSearch

Parallelize text search
......@@ -5,73 +5,67 @@
'use strict';
import * as strings from 'vs/base/common/strings';
import uri from 'vs/base/common/uri';
import * as fs from 'fs';
import * as os from 'os';
import * as path from 'path';
import * as baseMime from 'vs/base/common/mime';
import { ILineMatch, IProgress } from 'vs/platform/search/common/search';
import { detectMimeAndEncodingFromBuffer } from 'vs/base/node/mime';
import * as ipc from 'vs/base/parts/ipc/common/ipc';
import { onUnexpectedError } from 'vs/base/common/errors';
import { IProgress } from 'vs/platform/search/common/search';
import { FileWalker } from 'vs/workbench/services/search/node/fileSearch';
import { UTF16le, UTF16be, UTF8, UTF8_with_bom, encodingExists, decode } from 'vs/base/node/encoding';
import { ISerializedFileMatch, ISerializedSearchComplete, IRawSearch, ISearchEngine } from './search';
import { ISearchWorkerConfig, ISearchWorker, ISearchWorkerChannel, SearchWorkerChannelClient } from './worker/searchWorkerIpc';
interface ReadLinesOptions {
bufferLength: number;
encoding: string;
}
import { Client } from 'vs/base/parts/ipc/node/ipc.cp';
export class Engine implements ISearchEngine<ISerializedFileMatch> {
private static PROGRESS_FLUSH_CHUNK_SIZE = 50; // optimization: number of files to process before emitting progress event
private rootFolders: string[];
private extraFiles: string[];
private maxResults: number;
private config: IRawSearch;
private walker: FileWalker;
private contentPattern: RegExp;
private isCanceled: boolean;
private isDone: boolean;
private total: number;
private worked: number;
private progressed: number;
private walkerError: Error;
private walkerIsDone: boolean;
private fileEncoding: string;
private limitReached: boolean;
private isCanceled = false;
private isDone = false;
private totalBytes = 0;
private processedBytes = 0;
private progressed = 0;
private walkerIsDone = false;
private limitReached = false;
private numResults = 0;
private nextWorker = 0;
private workers: ISearchWorker[] = [];
private workerClients: Client[] = [];
constructor(config: IRawSearch, walker: FileWalker) {
this.rootFolders = config.rootFolders;
this.extraFiles = config.extraFiles;
this.config = config;
this.walker = walker;
this.contentPattern = strings.createRegExp(config.contentPattern.pattern, config.contentPattern.isRegExp, { matchCase: config.contentPattern.isCaseSensitive, wholeWord: config.contentPattern.isWordMatch, multiline: false, global: true });
this.isCanceled = false;
this.limitReached = false;
this.maxResults = config.maxResults;
this.worked = 0;
this.progressed = 0;
this.total = 0;
this.fileEncoding = encodingExists(config.fileEncoding) ? config.fileEncoding : UTF8;
}
public cancel(): void {
cancel(): void {
this.isCanceled = true;
this.walker.cancel();
}
public search(onResult: (match: ISerializedFileMatch) => void, onProgress: (progress: IProgress) => void, done: (error: Error, complete: ISerializedSearchComplete) => void): void {
let resultCounter = 0;
this.workers.forEach(w => {
w.cancel()
.then(null, onUnexpectedError);
});
}
let progress = () => {
this.progressed++;
if (this.progressed % Engine.PROGRESS_FLUSH_CHUNK_SIZE === 0) {
onProgress({ total: this.total, worked: this.worked }); // buffer progress in chunks to reduce pressure
search(onResult: (match: ISerializedFileMatch) => void, onProgress: (progress: IProgress) => void, done: (error: Error, complete: ISerializedSearchComplete) => void): void {
this.startWorkers();
const progress = () => {
if (++this.progressed % Engine.PROGRESS_FLUSH_CHUNK_SIZE === 0) {
onProgress({ total: this.totalBytes, worked: this.processedBytes }); // buffer progress in chunks to reduce pressure
}
};
let unwind = (processed: number) => {
this.worked += processed;
const unwind = (processed: number) => {
this.processedBytes += processed;
// Emit progress() unless we got canceled or hit the limit
if (processed && !this.isDone && !this.isCanceled && !this.limitReached) {
......@@ -79,8 +73,9 @@ export class Engine implements ISearchEngine<ISerializedFileMatch> {
}
// Emit done()
if (this.worked === this.total && this.walkerIsDone && !this.isDone) {
if (!this.isDone && this.processedBytes === this.totalBytes && this.walkerIsDone) {
this.isDone = true;
this.disposeWorkers();
done(this.walkerError, {
limitHit: this.limitReached,
stats: this.walker.getStats()
......@@ -88,247 +83,109 @@ export class Engine implements ISearchEngine<ISerializedFileMatch> {
}
};
// Walk over the file system
this.walker.walk(this.rootFolders, this.extraFiles, result => {
const size = result.size || 1;
this.total += size;
const run = (batch: string[], batchBytes: number): void => {
const worker = this.workers[this.nextWorker];
this.nextWorker = (this.nextWorker + 1) % this.workers.length;
// If the result is empty or we have reached the limit or we are canceled, ignore it
if (this.limitReached || this.isCanceled) {
return unwind(size);
}
// Indicate progress to the outside
progress();
let fileMatch: FileMatch = null;
let doneCallback = (error?: Error) => {
if (!error && !this.isCanceled && fileMatch && !fileMatch.isEmpty()) {
onResult(fileMatch.serialize());
}
return unwind(size);
};
const absolutePath = result.base ? [result.base, result.relativePath].join(path.sep) : result.relativePath;
let perLineCallback = (line: string, lineNumber: number) => {
if (this.limitReached || this.isCanceled) {
return; // return early if canceled or limit reached
const maxResults = this.config.maxResults && (this.config.maxResults - this.numResults);
worker.search({ absolutePaths: batch, maxResults }).then(result => {
if (!result || this.limitReached || this.isCanceled) {
return unwind(batchBytes);
}
let lineMatch: LineMatch = null;
let match = this.contentPattern.exec(line);
// Record all matches into file result
while (match !== null && match[0].length > 0 && !this.limitReached && !this.isCanceled) {
resultCounter++;
if (this.maxResults && resultCounter >= this.maxResults) {
this.limitReached = true;
}
if (fileMatch === null) {
fileMatch = new FileMatch(absolutePath);
}
if (lineMatch === null) {
lineMatch = new LineMatch(line, lineNumber);
fileMatch.addMatch(lineMatch);
}
lineMatch.addMatch(match.index, match[0].length);
const matches = result.matches;
this.numResults += result.numMatches;
matches.forEach(m => {
onResult(m);
});
match = this.contentPattern.exec(line);
if (this.config.maxResults && this.numResults >= this.config.maxResults) {
// It's possible to go over maxResults like this, but it's much simpler than trying to extract the exact number
// of file matches, line matches, and matches within a line to == maxResults.
this.limitReached = true;
}
};
// Read lines buffered to support large files
this.readlinesAsync(absolutePath, perLineCallback, { bufferLength: 8096, encoding: this.fileEncoding }, doneCallback);
}, (error, isLimitHit) => {
this.walkerIsDone = true;
this.walkerError = error;
unwind(0 /* walker is done, indicate this back to our handler to be able to unwind */);
});
}
unwind(batchBytes);
},
error => {
// An error on the worker's end, not in reading the file, but in processing the batch. Log and continue.
onUnexpectedError(error);
unwind(batchBytes);
});
};
private readlinesAsync(filename: string, perLineCallback: (line: string, lineNumber: number) => void, options: ReadLinesOptions, callback: (error: Error) => void): void {
fs.open(filename, 'r', null, (error: Error, fd: number) => {
if (error) {
return callback(error);
// Walk over the file system
let nextBatch = [];
let nextBatchBytes = 0;
const batchFlushBytes = 2 ** 20; // 1MB
this.walker.walk(this.config.rootFolders, this.config.extraFiles, result => {
let bytes = result.size || 1;
this.totalBytes += bytes;
// If we have reached the limit or we are canceled, ignore it
if (this.limitReached || this.isCanceled) {
return unwind(bytes);
}
let buffer = new Buffer(options.bufferLength);
let pos: number;
let i: number;
let line = '';
let lineNumber = 0;
let lastBufferHadTraillingCR = false;
const outer = this;
function decodeBuffer(buffer: NodeBuffer, start: number, end: number): string {
if (options.encoding === UTF8 || options.encoding === UTF8_with_bom) {
return buffer.toString(undefined, start, end); // much faster to use built in toString() when encoding is default
}
// Indicate progress to the outside
progress();
return decode(buffer.slice(start, end), options.encoding);
}
const absolutePath = result.base ? [result.base, result.relativePath].join(path.sep) : result.relativePath;
nextBatch.push(absolutePath);
nextBatchBytes += bytes;
function lineFinished(offset: number): void {
line += decodeBuffer(buffer, pos, i + offset);
perLineCallback(line, lineNumber);
line = '';
lineNumber++;
pos = i + offset;
if (nextBatchBytes >= batchFlushBytes) {
run(nextBatch, nextBatchBytes);
nextBatch = [];
nextBatchBytes = 0;
}
function readFile(isFirstRead: boolean, clb: (error: Error) => void): void {
if (outer.limitReached || outer.isCanceled) {
return clb(null); // return early if canceled or limit reached
}, (error, isLimitHit) => {
// Send any remaining paths to a worker, or unwind if we're stopping
if (nextBatch.length) {
if (this.limitReached || this.isCanceled) {
unwind(nextBatchBytes);
} else {
run(nextBatch, nextBatchBytes);
}
fs.read(fd, buffer, 0, buffer.length, null, (error: Error, bytesRead: number, buffer: NodeBuffer) => {
if (error || bytesRead === 0 || outer.limitReached || outer.isCanceled) {
return clb(error); // return early if canceled or limit reached or no more bytes to read
}
pos = 0;
i = 0;
// Detect encoding and mime when this is the beginning of the file
if (isFirstRead) {
let mimeAndEncoding = detectMimeAndEncodingFromBuffer(buffer, bytesRead);
if (mimeAndEncoding.mimes[mimeAndEncoding.mimes.length - 1] !== baseMime.MIME_TEXT) {
return clb(null); // skip files that seem binary
}
// Check for BOM offset
switch (mimeAndEncoding.encoding) {
case UTF8:
pos = i = 3;
options.encoding = UTF8;
break;
case UTF16be:
pos = i = 2;
options.encoding = UTF16be;
break;
case UTF16le:
pos = i = 2;
options.encoding = UTF16le;
break;
}
}
if (lastBufferHadTraillingCR) {
if (buffer[i] === 0x0a) { // LF (Line Feed)
lineFinished(1);
i++;
} else {
lineFinished(0);
}
lastBufferHadTraillingCR = false;
}
for (; i < bytesRead; ++i) {
if (buffer[i] === 0x0a) { // LF (Line Feed)
lineFinished(1);
} else if (buffer[i] === 0x0d) { // CR (Carriage Return)
if (i + 1 === bytesRead) {
lastBufferHadTraillingCR = true;
} else if (buffer[i + 1] === 0x0a) { // LF (Line Feed)
lineFinished(2);
i++;
} else {
lineFinished(1);
}
}
}
line += decodeBuffer(buffer, pos, bytesRead);
readFile(false /* isFirstRead */, clb); // Continue reading
});
}
readFile(true /* isFirstRead */, (error: Error) => {
if (error) {
return callback(error);
}
if (line.length) {
perLineCallback(line, lineNumber); // handle last line
}
fs.close(fd, (error: Error) => {
callback(error);
});
});
this.walkerIsDone = true;
this.walkerError = error;
});
}
}
class FileMatch implements ISerializedFileMatch {
public path: string;
public lineMatches: LineMatch[];
constructor(path: string) {
this.path = path;
this.lineMatches = [];
}
public addMatch(lineMatch: LineMatch): void {
this.lineMatches.push(lineMatch);
}
public isEmpty(): boolean {
return this.lineMatches.length === 0;
}
public serialize(): ISerializedFileMatch {
let lineMatches: ILineMatch[] = [];
for (let i = 0; i < this.lineMatches.length; i++) {
lineMatches.push(this.lineMatches[i].serialize());
private startWorkers(): void {
// If the CPU has hyperthreading enabled, this will report (# of physical cores)*2.
const numWorkers = os.cpus().length;
for (let i = 0; i < numWorkers; i++) {
this.createWorker(i);
}
return {
path: this.path,
lineMatches: lineMatches
};
}
}
class LineMatch implements ILineMatch {
public preview: string;
public lineNumber: number;
public offsetAndLengths: number[][];
constructor(preview: string, lineNumber: number) {
this.preview = preview.replace(/(\r|\n)*$/, '');
this.lineNumber = lineNumber;
this.offsetAndLengths = [];
}
private createWorker(id: number): void {
let client = new Client(
uri.parse(require.toUrl('bootstrap')).fsPath,
{
serverName: 'Search Worker ' + id,
args: ['--type=searchWorker'],
env: {
AMD_ENTRYPOINT: 'vs/workbench/services/search/node/worker/searchWorkerApp',
PIPE_LOGGING: 'true',
VERBOSE_LOGGING: process.env.VERBOSE_LOGGING
}
});
public getText(): string {
return this.preview;
}
// Make async?
const channel = ipc.getNextTickChannel(client.getChannel<ISearchWorkerChannel>('searchWorker'));
const channelClient = new SearchWorkerChannelClient(channel);
const config: ISearchWorkerConfig = { pattern: this.config.contentPattern, id, fileEncoding: this.config.fileEncoding };
channelClient.initialize(config).then(null, onUnexpectedError);
public getLineNumber(): number {
return this.lineNumber;
this.workers.push(channelClient);
this.workerClients.push(client);
}
public addMatch(offset: number, length: number): void {
this.offsetAndLengths.push([offset, length]);
private disposeWorkers(): void {
this.workerClients.forEach(c => c.dispose());
}
public serialize(): ILineMatch {
let result = {
preview: this.preview,
lineNumber: this.lineNumber,
offsetAndLengths: this.offsetAndLengths
};
return result;
}
}
\ No newline at end of file
}
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import * as fs from 'fs';
import { onUnexpectedError } from 'vs/base/common/errors';
import * as strings from 'vs/base/common/strings';
import { TPromise } from 'vs/base/common/winjs.base';
import { ISerializedFileMatch } from '../search';
import * as baseMime from 'vs/base/common/mime';
import { ILineMatch } from 'vs/platform/search/common/search';
import { UTF16le, UTF16be, UTF8, UTF8_with_bom, encodingExists, decode } from 'vs/base/node/encoding';
import { detectMimeAndEncodingFromBuffer } from 'vs/base/node/mime';
import { ISearchWorker, ISearchWorkerConfig, ISearchWorkerSearchArgs, ISearchWorkerSearchResult } from './searchWorkerIpc';
interface ReadLinesOptions {
bufferLength: number;
encoding: string;
}
// Global isCanceled flag for the process. It's only set once and this avoids awkwardness in passing it around.
let isCanceled = false;
const MAX_FILE_ERRORS = 5; // Don't report more than this number of errors, 1 per file, to avoid flooding the log when there's a general issue
let numErrorsLogged = 0;
function onError(error: any): void {
if (numErrorsLogged++ < MAX_FILE_ERRORS) {
onUnexpectedError(error);
}
}
export class SearchWorker implements ISearchWorker {
static CONCURRENT_SEARCH_PATHS = 2;
private contentPattern: RegExp;
private nextSearch = TPromise.wrap(null);
private config: ISearchWorkerConfig;
private fileEncoding: string;
initialize(config: ISearchWorkerConfig): TPromise<void> {
this.contentPattern = strings.createRegExp(config.pattern.pattern, config.pattern.isRegExp, { matchCase: config.pattern.isCaseSensitive, wholeWord: config.pattern.isWordMatch, multiline: false, global: true });
this.config = config;
this.fileEncoding = encodingExists(config.fileEncoding) ? config.fileEncoding : UTF8;
return TPromise.wrap<void>(undefined);
}
cancel(): TPromise<void> {
isCanceled = true;
return TPromise.wrap<void>(null);
}
search(args: ISearchWorkerSearchArgs): TPromise<ISearchWorkerSearchResult> {
// Queue this search to run after the current one
return this.nextSearch = this.nextSearch
.then(() => searchBatch(args.absolutePaths, this.contentPattern, this.fileEncoding, args.maxResults));
}
}
/**
* Searches some number of the given paths concurrently, and starts searches in other paths when those complete.
*/
function searchBatch(absolutePaths: string[], contentPattern: RegExp, fileEncoding: string, maxResults?: number): TPromise<ISearchWorkerSearchResult> {
if (isCanceled) {
return TPromise.wrap(null);
}
return new TPromise(batchDone => {
const result: ISearchWorkerSearchResult = {
matches: [],
numMatches: 0,
limitReached: false
};
// Search in the given path, and when it's finished, search in the next path in absolutePaths
const startSearchInFile = (absolutePath: string): TPromise<void> => {
return searchInFile(absolutePath, contentPattern, fileEncoding, maxResults && (maxResults - result.numMatches)).then(fileResult => {
// Finish early if search is canceled
if (isCanceled) {
return;
}
if (fileResult) {
result.numMatches += fileResult.numMatches;
result.matches.push(fileResult.match.serialize());
if (fileResult.limitReached) {
// If the limit was reached, terminate early with the results so far and cancel in-progress searches.
isCanceled = true;
result.limitReached = true;
return batchDone(result);
}
}
if (absolutePaths.length) {
return startSearchInFile(absolutePaths.shift());
}
}, onError);
};
let batchPromises: TPromise<void>[] = [];
for (let i = 0; i < SearchWorker.CONCURRENT_SEARCH_PATHS && absolutePaths.length; i++) {
batchPromises.push(startSearchInFile(absolutePaths.shift()));
}
TPromise.join(batchPromises).then(() => {
batchDone(result);
});
});
}
interface IFileSearchResult {
match: FileMatch;
numMatches: number;
limitReached?: boolean;
}
function searchInFile(absolutePath: string, contentPattern: RegExp, fileEncoding: string, maxResults?: number): TPromise<IFileSearchResult> {
let fileMatch: FileMatch = null;
let limitReached = false;
let numMatches = 0;
const perLineCallback = (line: string, lineNumber: number) => {
let lineMatch: LineMatch = null;
let match = contentPattern.exec(line);
// Record all matches into file result
while (match !== null && match[0].length > 0 && !isCanceled && !limitReached) {
if (fileMatch === null) {
fileMatch = new FileMatch(absolutePath);
}
if (lineMatch === null) {
lineMatch = new LineMatch(line, lineNumber);
fileMatch.addMatch(lineMatch);
}
lineMatch.addMatch(match.index, match[0].length);
numMatches++;
if (maxResults && numMatches >= maxResults) {
limitReached = true;
}
match = contentPattern.exec(line);
}
};
// Read lines buffered to support large files
return readlinesAsync(absolutePath, perLineCallback, { bufferLength: 8096, encoding: fileEncoding }).then(
() => fileMatch ? { match: fileMatch, limitReached, numMatches } : null);
}
function readlinesAsync(filename: string, perLineCallback: (line: string, lineNumber: number) => void, options: ReadLinesOptions): TPromise<void> {
return new TPromise<void>((resolve, reject) => {
fs.open(filename, 'r', null, (error: Error, fd: number) => {
if (error) {
return reject(error);
}
let buffer = new Buffer(options.bufferLength);
let pos: number;
let i: number;
let line = '';
let lineNumber = 0;
let lastBufferHadTraillingCR = false;
const decodeBuffer = (buffer: NodeBuffer, start, end): string => {
if (options.encoding === UTF8 || options.encoding === UTF8_with_bom) {
return buffer.toString(undefined, start, end); // much faster to use built in toString() when encoding is default
}
return decode(buffer.slice(start, end), options.encoding);
};
const lineFinished = (offset: number): void => {
line += decodeBuffer(buffer, pos, i + offset);
perLineCallback(line, lineNumber);
line = '';
lineNumber++;
pos = i + offset;
};
const readFile = (isFirstRead: boolean, clb: (error: Error) => void): void => {
if (isCanceled) {
return clb(null); // return early if canceled or limit reached
}
fs.read(fd, buffer, 0, buffer.length, null, (error: Error, bytesRead: number, buffer: NodeBuffer) => {
if (error || bytesRead === 0 || isCanceled) {
return clb(error); // return early if canceled or limit reached or no more bytes to read
}
pos = 0;
i = 0;
// Detect encoding and mime when this is the beginning of the file
if (isFirstRead) {
let mimeAndEncoding = detectMimeAndEncodingFromBuffer(buffer, bytesRead);
if (mimeAndEncoding.mimes[mimeAndEncoding.mimes.length - 1] !== baseMime.MIME_TEXT) {
return clb(null); // skip files that seem binary
}
// Check for BOM offset
switch (mimeAndEncoding.encoding) {
case UTF8:
pos = i = 3;
options.encoding = UTF8;
break;
case UTF16be:
pos = i = 2;
options.encoding = UTF16be;
break;
case UTF16le:
pos = i = 2;
options.encoding = UTF16le;
break;
}
}
if (lastBufferHadTraillingCR) {
if (buffer[i] === 0x0a) { // LF (Line Feed)
lineFinished(1);
i++;
} else {
lineFinished(0);
}
lastBufferHadTraillingCR = false;
}
for (; i < bytesRead; ++i) {
if (buffer[i] === 0x0a) { // LF (Line Feed)
lineFinished(1);
} else if (buffer[i] === 0x0d) { // CR (Carriage Return)
if (i + 1 === bytesRead) {
lastBufferHadTraillingCR = true;
} else if (buffer[i + 1] === 0x0a) { // LF (Line Feed)
lineFinished(2);
i++;
} else {
lineFinished(1);
}
}
}
line += decodeBuffer(buffer, pos, bytesRead);
readFile(/*isFirstRead=*/false, clb); // Continue reading
});
};
readFile(/*isFirstRead=*/true, (error: Error) => {
if (error) {
return reject(error);
}
if (line.length) {
perLineCallback(line, lineNumber); // handle last line
}
fs.close(fd, (error: Error) => {
if (error) {
reject(error);
} else {
resolve(null);
}
});
});
});
});
}
export class FileMatch implements ISerializedFileMatch {
path: string;
lineMatches: LineMatch[];
constructor(path: string) {
this.path = path;
this.lineMatches = [];
}
addMatch(lineMatch: LineMatch): void {
this.lineMatches.push(lineMatch);
}
isEmpty(): boolean {
return this.lineMatches.length === 0;
}
serialize(): ISerializedFileMatch {
let lineMatches: ILineMatch[] = [];
for (let i = 0; i < this.lineMatches.length; i++) {
lineMatches.push(this.lineMatches[i].serialize());
}
return {
path: this.path,
lineMatches
};
}
}
export class LineMatch implements ILineMatch {
preview: string;
lineNumber: number;
offsetAndLengths: number[][];
constructor(preview: string, lineNumber: number) {
this.preview = preview.replace(/(\r|\n)*$/, '');
this.lineNumber = lineNumber;
this.offsetAndLengths = [];
}
getText(): string {
return this.preview;
}
getLineNumber(): number {
return this.lineNumber;
}
addMatch(offset: number, length: number): void {
this.offsetAndLengths.push([offset, length]);
}
serialize(): ILineMatch {
let result = {
preview: this.preview,
lineNumber: this.lineNumber,
offsetAndLengths: this.offsetAndLengths
};
return result;
}
}
\ No newline at end of file
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { Server } from 'vs/base/parts/ipc/node/ipc.cp';
import { SearchWorkerChannel } from './searchWorkerIpc';
import { SearchWorker } from './searchWorker';
const server = new Server();
const worker = new SearchWorker();
const channel = new SearchWorkerChannel(worker);
server.registerChannel('searchWorker', channel);
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { TPromise } from 'vs/base/common/winjs.base';
import { IChannel } from 'vs/base/parts/ipc/common/ipc';
import { ISerializedFileMatch } from '../search';
import { IPatternInfo } from 'vs/platform/search/common/search';
import { SearchWorker } from './searchWorker';
export interface ISearchWorkerConfig {
pattern: IPatternInfo;
fileEncoding: string;
id: number;
}
export interface ISearchWorkerSearchArgs {
absolutePaths: string[];
maxResults?: number;
}
export interface ISearchWorkerSearchResult {
matches: ISerializedFileMatch[];
numMatches: number;
limitReached: boolean;
}
export interface ISearchWorker {
initialize(config: ISearchWorkerConfig): TPromise<void>;
search(args: ISearchWorkerSearchArgs): TPromise<ISearchWorkerSearchResult>;
cancel(): TPromise<void>;
}
export interface ISearchWorkerChannel extends IChannel {
call(command: 'initialize', config: ISearchWorkerConfig): TPromise<void>;
call(command: 'search', args: ISearchWorkerSearchArgs): TPromise<ISearchWorkerSearchResult>;
call(command: 'cancel'): TPromise<void>;
call(command: string, arg?: any): TPromise<any>;
}
export class SearchWorkerChannel implements ISearchWorkerChannel {
constructor(private worker: SearchWorker) {
}
call(command: string, arg?: any): TPromise<any> {
switch (command) {
case 'initialize': return this.worker.initialize(arg);
case 'search': return this.worker.search(arg);
case 'cancel': return this.worker.cancel();
}
}
}
export class SearchWorkerChannelClient implements ISearchWorker {
constructor(private channel: ISearchWorkerChannel) { }
initialize(config: ISearchWorkerConfig): TPromise<void> {
return this.channel.call('initialize', config);
}
search(args: ISearchWorkerSearchArgs): TPromise<ISearchWorkerSearchResult> {
return this.channel.call('search', args);
}
cancel(): TPromise<void> {
return this.channel.call('cancel');
}
}
......@@ -803,7 +803,10 @@ suite('Search', () => {
}
}, (result) => { }, (error) => {
assert.ok(!error);
assert.equal(c, 520);
// Search can go over the maxResults because it doesn't trim the results from its worker processes to the exact max size.
// But the worst-case scenario should be 2*max-1
assert.ok(c < 520 * 2);
done();
});
});
......
......@@ -7,6 +7,7 @@
import 'vs/workbench/parts/search/browser/search.contribution'; // load contributions
import * as assert from 'assert';
import * as fs from 'fs';
import { WorkspaceContextService, IWorkspaceContextService } from 'vs/platform/workspace/common/workspace';
import { createSyncDescriptor } from 'vs/platform/instantiation/common/descriptors';
import { IEditorGroupService } from 'vs/workbench/services/group/common/groupService';
......@@ -50,6 +51,9 @@ suite('TextSearch performance', () => {
const argv = minimist(process.argv);
const testWorkspaceArg = argv['testWorkspace'];
const testWorkspacePath = testWorkspaceArg ? path.resolve(testWorkspaceArg) : __dirname;
if (!fs.existsSync(testWorkspacePath)) {
throw new Error(`--testWorkspace doesn't exist`);
}
const telemetryService = new TestTelemetryService();
const configurationService = new SimpleConfigurationService();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册