From a2c8ae7f5e320a48327ddc8cc07b48a96e5bc24a Mon Sep 17 00:00:00 2001 From: Phodal Huang Date: Tue, 8 Sep 2020 13:15:00 +0800 Subject: [PATCH] test: add simplify code for index --- vscode-tests/index.js | 58 ++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 26 deletions(-) diff --git a/vscode-tests/index.js b/vscode-tests/index.js index 06f6d3f..ab04059 100644 --- a/vscode-tests/index.js +++ b/vscode-tests/index.js @@ -8,30 +8,36 @@ function readFile(path) { }) } -// Create a registry that can create a grammar from a scope name. -const registry = new vsctm.Registry({ - onigLib: Promise.resolve({ - createOnigScanner: (sources) => new oniguruma.OnigScanner(sources), - createOnigString: (str) => new oniguruma.OnigString(str) - }), - loadGrammar: (scopeName) => { - return readFile('./syntaxes/json/c.json').then(data => vsctm.parseRawGrammar(data.toString(), "c.json")) - } -}); +let promise = readFile('./syntaxes/json/c.json').then(data => vsctm.parseRawGrammar(data.toString(), "c.json")); +promise.then((grammar) => { + console.log(grammar.patterns.length); +}) -registry.loadGrammar('source.c').then(grammar => { - const text = `#include `.split("\n"); - let ruleStack = vsctm.INITIAL; - for (let i = 0; i < text.length; i++) { - const line = text[i]; - const lineTokens = grammar.tokenizeLine(line, ruleStack); - for (let j = 0; j < lineTokens.tokens.length; j++) { - const token = lineTokens.tokens[j]; - console.log(` - token from ${token.startIndex} to ${token.endIndex} ` + - `(${line.substring(token.startIndex, token.endIndex)}) ` + - `with scopes ${token.scopes.join(', ')}` - ); - } - ruleStack = lineTokens.ruleStack; - } -}); +// +// // Create a registry that can create a grammar from a scope name. +// const registry = new vsctm.Registry({ +// onigLib: Promise.resolve({ +// createOnigScanner: (sources) => new oniguruma.OnigScanner(sources), +// createOnigString: (str) => new oniguruma.OnigString(str) +// }), +// loadGrammar: (scopeName) => { +// return readFile('./syntaxes/json/c.json').then(data => vsctm.parseRawGrammar(data.toString(), "c.json")) +// } +// }); +// +// registry.loadGrammar('source.c').then(grammar => { +// const text = `#include `.split("\n"); +// let ruleStack = vsctm.INITIAL; +// for (let i = 0; i < text.length; i++) { +// const line = text[i]; +// const lineTokens = grammar.tokenizeLine(line, ruleStack); +// for (let j = 0; j < lineTokens.tokens.length; j++) { +// const token = lineTokens.tokens[j]; +// console.log(` - token from ${token.startIndex} to ${token.endIndex} ` + +// `(${line.substring(token.startIndex, token.endIndex)}) ` + +// `with scopes ${token.scopes.join(', ')}` +// ); +// } +// ruleStack = lineTokens.ruleStack; +// } +// }); -- GitLab