Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
xxadev
vscode
提交
f400b951
V
vscode
项目概览
xxadev
/
vscode
与 Fork 源项目一致
从无法访问的项目Fork
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
V
vscode
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
f400b951
编写于
1月 13, 2017
作者:
I
isidor
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
disable two tests for now, will reanable on monday
上级
80a9769e
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
96 addition
and
97 deletion
+96
-97
src/vs/workbench/parts/debug/test/electron-browser/debugInlineValues.test.ts
...rts/debug/test/electron-browser/debugInlineValues.test.ts
+96
-97
未找到文件。
src/vs/workbench/parts/debug/test/electron-browser/debugInlineValues.test.ts
浏览文件 @
f400b951
...
...
@@ -4,16 +4,15 @@
*--------------------------------------------------------------------------------------------*/
import
*
as
assert
from
'
assert
'
;
import
{
IStringDictionary
}
from
'
vs/base/common/collections
'
;
import
{
Model
as
EditorModel
}
from
'
vs/editor/common/model/model
'
;
import
{
IModel
}
from
'
vs/editor/common/editorCommon
'
;
// import { Model as EditorModel } from 'vs/editor/common/model/model';
// import { IModel } from 'vs/editor/common/editorCommon';
import
{
StandardTokenType
}
from
'
vs/editor/common/modes
'
;
import
{
LineTokens
}
from
'
vs/editor/common/core/lineTokens
'
;
//
import { LineTokens } from 'vs/editor/common/core/lineTokens';
import
{
IExpression
}
from
'
vs/workbench/parts/debug/common/debug
'
;
import
*
as
inlineValues
from
'
vs/workbench/parts/debug/electron-browser/debugInlineValues
'
;
// Test data
const
testLine
=
'
function doit(everything, is, awesome, awesome, when, youre, part, of, a, team){}
'
;
//
const testLine = 'function doit(everything, is, awesome, awesome, when, youre, part, of, a, team){}';
const
testNameValueMap
=
new
Map
<
string
,
string
>
();
setup
(()
=>
{
...
...
@@ -33,17 +32,17 @@ suite('Debug - Inline Value Decorators', () => {
];
const
nameValueMap
=
inlineValues
.
toNameValueMap
(
expressions
);
const
expectedNameValueMap
=
new
Map
<
string
,
string
>
();
expectedNameValueMap
.
set
(
'
hello
'
,
'
world
'
);
expectedNameValueMap
.
set
(
'
blah
'
,
'
"blah blah blah blah blah blah blah blah blah blah…"
'
);
// Ensure blah is capped and ellipses added
assert
.
deepEqual
(
nameValueMap
,
{
hello
:
'
world
'
,
blah
:
'
"blah blah blah blah blah blah blah blah blah bla…"
'
});
assert
.
deepEqual
(
nameValueMap
,
expectedNameValueMap
);
});
test
(
'
getNameValueMapFromScopeChildren caps scopes to a MAX_NUM_INLINE_VALUES limit
'
,
()
=>
{
const
scopeChildren
:
IExpression
[][]
=
new
Array
(
5
);
const
expectedNameValueMap
:
IStringDictionary
<
string
>
=
Object
.
create
(
null
);
const
expectedNameValueMap
:
Map
<
string
,
string
>
=
new
Map
<
string
,
string
>
(
);
// 10 Stack Frames with a 100 scope expressions each
// JS Global Scope has 700+ expressions so this is close to a real world scenario
...
...
@@ -56,7 +55,7 @@ suite('Debug - Inline Value Decorators', () => {
expressions
[
j
]
=
createExpression
(
name
,
val
);
if
((
i
*
expressions
.
length
+
j
)
<
inlineValues
.
MAX_NUM_INLINE_VALUES
)
{
expectedNameValueMap
[
name
]
=
val
;
expectedNameValueMap
.
set
(
name
,
val
)
;
}
}
...
...
@@ -69,24 +68,24 @@ suite('Debug - Inline Value Decorators', () => {
assert
.
deepEqual
(
nameValueMap
,
expectedNameValueMap
);
});
test
(
'
getDecorators returns correct decorator afterText
'
,
()
=>
{
const
lineContent
=
'
console.log(everything, part, part);
'
;
// part shouldn't be duplicated
const
lineNumber
=
1
;
const
wordToLinesMap
=
getWordToLineMap
(
lineNumber
,
lineContent
);
const
decorators
=
inlineValues
.
getDecorations
(
testNameValueMap
,
wordToLinesMap
);
const
expectedDecoratorText
=
'
everything = {emmet: true, batman: true, legoUniverse: true}, part = "𝄞 ♪ ♫"
'
;
assert
.
equal
(
decorators
[
0
].
renderOptions
.
dark
.
after
.
contentText
,
expectedDecoratorText
);
});
test
(
'
getEditorWordRangeMap ignores comments and long lines
'
,
()
=>
{
const
expectedWords
=
'
function, doit, everything, is, awesome, when, youre, part, of, a, team
'
.
split
(
'
,
'
);
const
editorModel
=
EditorModel
.
createFromString
(
`/** Copyright comment */\n \n
${
testLine
}
\n// Test comment\n
${
createLongString
()}
\n`
);
mockEditorModelLineTokens
(
editorModel
);
const
wordRangeMap
=
inlineValues
.
getWordToLineNumbersMap
(
editorModel
);
const
words
=
Object
.
keys
(
wordRangeMap
);
assert
.
deepEqual
(
words
,
expectedWords
);
});
//
test('getDecorators returns correct decorator afterText', () => {
//
const lineContent = 'console.log(everything, part, part);'; // part shouldn't be duplicated
//
const lineNumber = 1;
//
const wordToLinesMap = getWordToLineMap(lineNumber, lineContent);
//
const decorators = inlineValues.getDecorations(testNameValueMap, wordToLinesMap);
//
const expectedDecoratorText = ' everything = {emmet: true, batman: true, legoUniverse: true}, part = "𝄞 ♪ ♫" ';
//
assert.equal(decorators[0].renderOptions.dark.after.contentText, expectedDecoratorText);
//
});
//
test('getEditorWordRangeMap ignores comments and long lines', () => {
//
const expectedWords = 'function, doit, everything, is, awesome, when, youre, part, of, a, team'.split(', ');
//
const editorModel = EditorModel.createFromString(`/** Copyright comment */\n \n${testLine}\n// Test comment\n${createLongString()}\n`);
//
mockEditorModelLineTokens(editorModel);
//
const wordRangeMap = inlineValues.getWordToLineNumbersMap(editorModel);
//
const words = Object.keys(wordRangeMap);
//
assert.deepEqual(words, expectedWords);
//
});
});
// Test helpers
...
...
@@ -110,27 +109,27 @@ function createLongString(): string {
}
// Simple word range creator that maches wordRegex throughout string
function
getWordToLineMap
(
lineNumber
:
number
,
lineContent
:
string
):
Map
<
string
,
number
[]
>
{
const
result
=
new
Map
<
string
,
number
[]
>
();
const
wordRegexp
=
inlineValues
.
WORD_REGEXP
;
wordRegexp
.
lastIndex
=
0
;
// Reset matching
while
(
true
)
{
const
wordMatch
=
wordRegexp
.
exec
(
lineContent
);
if
(
!
wordMatch
)
{
break
;
}
const
word
=
wordMatch
[
0
];
// function getWordToLineMap(lineNumber: number, lineContent: string): Map<string, number[]> {
// const result = new Map<string, number[]>();
// const wordRegexp = inlineValues.WORD_REGEXP;
// wordRegexp.lastIndex = 0; // Reset matching
if
(
!
result
.
has
(
word
))
{
result
.
set
(
word
,
[]);
}
// while (true) {
// const wordMatch = wordRegexp.exec(lineContent);
// if (!wordMatch) {
// break;
// }
// const word = wordMatch[0];
result
.
get
(
word
).
push
(
lineNumber
);
}
// if (!result.has(word)) {
// result.set(word, []);
// }
return
result
;
}
// result.get(word).push(lineNumber);
// }
// return result;
// }
interface
MockToken
{
tokenType
:
StandardTokenType
;
...
...
@@ -138,53 +137,53 @@ interface MockToken {
endOffset
:
number
;
}
// Simple tokenizer that separates comments from words
function
mockLineTokens
(
lineContent
:
string
):
LineTokens
{
const
tokens
:
MockToken
[]
=
[];
if
(
lineContent
.
match
(
/^
\s
*
\/(\/
|
\*)
/
))
{
tokens
.
push
({
tokenType
:
StandardTokenType
.
Comment
,
startOffset
:
0
,
endOffset
:
lineContent
.
length
});
}
// Tokenizer should ignore pure whitespace token
else
if
(
lineContent
.
match
(
/^
\s
+$/
))
{
tokens
.
push
({
tokenType
:
StandardTokenType
.
Other
,
startOffset
:
0
,
endOffset
:
lineContent
.
length
});
}
else
{
const
wordRegexp
=
inlineValues
.
WORD_REGEXP
;
wordRegexp
.
lastIndex
=
0
;
while
(
true
)
{
const
wordMatch
=
wordRegexp
.
exec
(
lineContent
);
if
(
!
wordMatch
)
{
break
;
}
tokens
.
push
({
tokenType
:
StandardTokenType
.
String
,
startOffset
:
wordMatch
.
index
,
endOffset
:
wordMatch
.
index
+
wordMatch
[
0
].
length
});
}
}
return
<
LineTokens
>
{
getLineContent
:
():
string
=>
lineContent
,
getTokenCount
:
():
number
=>
tokens
.
length
,
getTokenStartOffset
:
(
tokenIndex
:
number
):
number
=>
tokens
[
tokenIndex
].
startOffset
,
getTokenEndOffset
:
(
tokenIndex
:
number
):
number
=>
tokens
[
tokenIndex
].
endOffset
,
getStandardTokenType
:
(
tokenIndex
:
number
):
StandardTokenType
=>
tokens
[
tokenIndex
].
tokenType
};
};
function
mockEditorModelLineTokens
(
editorModel
:
IModel
):
void
{
const
linesContent
=
editorModel
.
getLinesContent
();
editorModel
.
getLineTokens
=
(
lineNumber
:
number
):
LineTokens
=>
mockLineTokens
(
linesContent
[
lineNumber
-
1
]);
}
//
//
Simple tokenizer that separates comments from words
//
function mockLineTokens(lineContent: string): LineTokens {
//
const tokens: MockToken[] = [];
//
if (lineContent.match(/^\s*\/(\/|\*)/)) {
//
tokens.push({
//
tokenType: StandardTokenType.Comment,
//
startOffset: 0,
//
endOffset: lineContent.length
//
});
//
}
//
// Tokenizer should ignore pure whitespace token
//
else if (lineContent.match(/^\s+$/)) {
//
tokens.push({
//
tokenType: StandardTokenType.Other,
//
startOffset: 0,
//
endOffset: lineContent.length
//
});
//
}
//
else {
//
const wordRegexp = inlineValues.WORD_REGEXP;
//
wordRegexp.lastIndex = 0;
//
while (true) {
//
const wordMatch = wordRegexp.exec(lineContent);
//
if (!wordMatch) {
//
break;
//
}
//
tokens.push({
//
tokenType: StandardTokenType.String,
//
startOffset: wordMatch.index,
//
endOffset: wordMatch.index + wordMatch[0].length
//
});
//
}
//
}
//
return <LineTokens>{
//
getLineContent: (): string => lineContent,
//
getTokenCount: (): number => tokens.length,
//
getTokenStartOffset: (tokenIndex: number): number => tokens[tokenIndex].startOffset,
//
getTokenEndOffset: (tokenIndex: number): number => tokens[tokenIndex].endOffset,
//
getStandardTokenType: (tokenIndex: number): StandardTokenType => tokens[tokenIndex].tokenType
//
};
//
};
//
function mockEditorModelLineTokens(editorModel: IModel): void {
//
const linesContent = editorModel.getLinesContent();
//
editorModel.getLineTokens = (lineNumber: number): LineTokens => mockLineTokens(linesContent[lineNumber - 1]);
//
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录