0){if(!i.compareByGeneratedPositionsInflated(n,h[f-1]))continue;e+=","}e+=o.encode(n.generatedColumn-s),s=n.generatedColumn,null!=n.source&&(t=this._sources.indexOf(n.source),e+=o.encode(t-g),g=t,e+=o.encode(n.originalLine-1-l),l=n.originalLine-1,e+=o.encode(n.originalColumn-u),u=n.originalColumn,null!=n.name&&(r=this._names.indexOf(n.name),e+=o.encode(r-c),c=r)),p+=e}return p},t.prototype._generateSourcesContent=function(e,n){return e.map(function(e){if(!this._sourcesContents)return null;null!=n&&(e=i.relative(n,e));var r=i.toSetString(e);return Object.prototype.hasOwnProperty.call(this._sourcesContents,r)?this._sourcesContents[r]:null},this)},t.prototype.toJSON=function(){var e={version:this._version,sources:this._sources.toArray(),names:this._names.toArray(),mappings:this._serializeMappings()};return null!=this._file&&(e.file=this._file),null!=this._sourceRoot&&(e.sourceRoot=this._sourceRoot),this._sourcesContents&&(e.sourcesContent=this._generateSourcesContent(e.sources,e.sourceRoot)),e},t.prototype.toString=function(){return JSON.stringify(this.toJSON())},n.SourceMapGenerator=t},function(e,n,r){function t(e){return e<0?(-e<<1)+1:(e<<1)+0}function o(e){var n=1===(1&e),r=e>>1;return n?-r:r}var i=r(3),s=5,a=1<>>=s,o>0&&(n|=l),r+=i.encode(n);while(o>0);return r},n.decode=function(e,n,r){var t,a,c=e.length,g=0,p=0;do{if(n>=c)throw new Error("Expected more digits in base 64 VLQ value.");if(a=i.decode(e.charCodeAt(n++)),a===-1)throw new Error("Invalid base64 digit: "+e.charAt(n-1));t=!!(a&l),a&=u,g+=a<=0;c--)s=u[c],"."===s?u.splice(c,1):".."===s?l++:l>0&&(""===s?(u.splice(c+1,l),l=0):(u.splice(c,2),l--));return r=u.join("/"),""===r&&(r=a?"/":"."),i?(i.path=r,o(i)):r}function s(e,n){""===e&&(e="."),""===n&&(n=".");var r=t(n),s=t(e);if(s&&(e=s.path||"/"),r&&!r.scheme)return s&&(r.scheme=s.scheme),o(r);if(r||n.match(y))return n;if(s&&!s.host&&!s.path)return s.host=n,o(s);var a="/"===n.charAt(0)?n:i(e.replace(/\/+$/,"")+"/"+n);return s?(s.path=a,o(s)):a}function a(e,n){""===e&&(e="."),e=e.replace(/\/$/,"");for(var r=0;0!==n.indexOf(e+"/");){var t=e.lastIndexOf("/");if(t<0)return n;if(e=e.slice(0,t),e.match(/^([^\/]+:\/)?\/*$/))return n;++r}return Array(r+1).join("../")+n.substr(e.length+1)}function u(e){return e}function l(e){return g(e)?"$"+e:e}function c(e){return g(e)?e.slice(1):e}function g(e){if(!e)return!1;var n=e.length;if(n<9)return!1;if(95!==e.charCodeAt(n-1)||95!==e.charCodeAt(n-2)||111!==e.charCodeAt(n-3)||116!==e.charCodeAt(n-4)||111!==e.charCodeAt(n-5)||114!==e.charCodeAt(n-6)||112!==e.charCodeAt(n-7)||95!==e.charCodeAt(n-8)||95!==e.charCodeAt(n-9))return!1;for(var r=n-10;r>=0;r--)if(36!==e.charCodeAt(r))return!1;return!0}function p(e,n,r){var t=f(e.source,n.source);return 0!==t?t:(t=e.originalLine-n.originalLine,0!==t?t:(t=e.originalColumn-n.originalColumn,0!==t||r?t:(t=e.generatedColumn-n.generatedColumn,0!==t?t:(t=e.generatedLine-n.generatedLine,0!==t?t:f(e.name,n.name)))))}function h(e,n,r){var t=e.generatedLine-n.generatedLine;return 0!==t?t:(t=e.generatedColumn-n.generatedColumn,0!==t||r?t:(t=f(e.source,n.source),0!==t?t:(t=e.originalLine-n.originalLine,0!==t?t:(t=e.originalColumn-n.originalColumn,0!==t?t:f(e.name,n.name)))))}function f(e,n){return e===n?0:null===e?1:null===n?-1:e>n?1:-1}function d(e,n){var r=e.generatedLine-n.generatedLine;return 0!==r?r:(r=e.generatedColumn-n.generatedColumn,0!==r?r:(r=f(e.source,n.source),0!==r?r:(r=e.originalLine-n.originalLine,0!==r?r:(r=e.originalColumn-n.originalColumn,0!==r?r:f(e.name,n.name)))))}function m(e){return JSON.parse(e.replace(/^\)]}'[^\n]*\n/,""))}function _(e,n,r){if(n=n||"",e&&("/"!==e[e.length-1]&&"/"!==n[0]&&(e+="/"),n=e+n),r){var a=t(r);if(!a)throw new Error("sourceMapURL could not be parsed");if(a.path){var u=a.path.lastIndexOf("/");u>=0&&(a.path=a.path.substring(0,u+1))}n=s(o(a),n)}return i(n)}n.getArg=r;var v=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/,y=/^data:.+\,.+$/;n.urlParse=t,n.urlGenerate=o,n.normalize=i,n.join=s,n.isAbsolute=function(e){return"/"===e.charAt(0)||v.test(e)},n.relative=a;var C=function(){var e=Object.create(null);return!("__proto__"in e)}();n.toSetString=C?u:l,n.fromSetString=C?u:c,n.compareByOriginalPositions=p,n.compareByGeneratedPositionsDeflated=h,n.compareByGeneratedPositionsInflated=d,n.parseSourceMapInput=m,n.computeSourceURL=_},function(e,n,r){function t(){this._array=[],this._set=s?new Map:Object.create(null)}var o=r(4),i=Object.prototype.hasOwnProperty,s="undefined"!=typeof Map;t.fromArray=function(e,n){for(var r=new t,o=0,i=e.length;o=0)return n}else{var r=o.toSetString(e);if(i.call(this._set,r))return this._set[r]}throw new Error('"'+e+'" is not in the set.')},t.prototype.at=function(e){if(e>=0&&er||t==r&&s>=o||i.compareByGeneratedPositionsInflated(e,n)<=0}function o(){this._array=[],this._sorted=!0,this._last={generatedLine:-1,generatedColumn:0}}var i=r(4);o.prototype.unsortedForEach=function(e,n){this._array.forEach(e,n)},o.prototype.add=function(e){t(this._last,e)?(this._last=e,this._array.push(e)):(this._sorted=!1,this._array.push(e))},o.prototype.toArray=function(){return this._sorted||(this._array.sort(i.compareByGeneratedPositionsInflated),this._sorted=!0),this._array},n.MappingList=o},function(e,n,r){function t(e,n){var r=e;return"string"==typeof e&&(r=a.parseSourceMapInput(e)),null!=r.sections?new s(r,n):new o(r,n)}function o(e,n){var r=e;"string"==typeof e&&(r=a.parseSourceMapInput(e));var t=a.getArg(r,"version"),o=a.getArg(r,"sources"),i=a.getArg(r,"names",[]),s=a.getArg(r,"sourceRoot",null),u=a.getArg(r,"sourcesContent",null),c=a.getArg(r,"mappings"),g=a.getArg(r,"file",null);if(t!=this._version)throw new Error("Unsupported version: "+t);s&&(s=a.normalize(s)),o=o.map(String).map(a.normalize).map(function(e){return s&&a.isAbsolute(s)&&a.isAbsolute(e)?a.relative(s,e):e}),this._names=l.fromArray(i.map(String),!0),this._sources=l.fromArray(o,!0),this._absoluteSources=this._sources.toArray().map(function(e){return a.computeSourceURL(s,e,n)}),this.sourceRoot=s,this.sourcesContent=u,this._mappings=c,this._sourceMapURL=n,this.file=g}function i(){this.generatedLine=0,this.generatedColumn=0,this.source=null,this.originalLine=null,this.originalColumn=null,this.name=null}function s(e,n){var r=e;"string"==typeof e&&(r=a.parseSourceMapInput(e));var o=a.getArg(r,"version"),i=a.getArg(r,"sections");if(o!=this._version)throw new Error("Unsupported version: "+o);this._sources=new l,this._names=new l;var s={line:-1,column:0};this._sections=i.map(function(e){if(e.url)throw new Error("Support for url field in sections not implemented.");var r=a.getArg(e,"offset"),o=a.getArg(r,"line"),i=a.getArg(r,"column");if(o=0){var i=this._originalMappings[o];if(void 0===e.column)for(var s=i.originalLine;i&&i.originalLine===s;)t.push({line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o];else for(var l=i.originalColumn;i&&i.originalLine===n&&i.originalColumn==l;)t.push({line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o]}return t},n.SourceMapConsumer=t,o.prototype=Object.create(t.prototype),o.prototype.consumer=t,o.prototype._findSourceIndex=function(e){var n=e;if(null!=this.sourceRoot&&(n=a.relative(this.sourceRoot,n)),this._sources.has(n))return this._sources.indexOf(n);var r;for(r=0;r1&&(r.source=d+o[1],d+=o[1],r.originalLine=h+o[2],h=r.originalLine,r.originalLine+=1,r.originalColumn=f+o[3],f=r.originalColumn,o.length>4&&(r.name=m+o[4],m+=o[4])),A.push(r),"number"==typeof r.originalLine&&S.push(r)}g(A,a.compareByGeneratedPositionsDeflated),this.__generatedMappings=A,g(S,a.compareByOriginalPositions),this.__originalMappings=S},o.prototype._findMapping=function(e,n,r,t,o,i){if(e[r]<=0)throw new TypeError("Line must be greater than or equal to 1, got "+e[r]);if(e[t]<0)throw new TypeError("Column must be greater than or equal to 0, got "+e[t]);return u.search(e,n,o,i)},o.prototype.computeColumnSpans=function(){for(var e=0;e=0){var o=this._generatedMappings[r];if(o.generatedLine===n.generatedLine){var i=a.getArg(o,"source",null);null!==i&&(i=this._sources.at(i),i=a.computeSourceURL(this.sourceRoot,i,this._sourceMapURL));var s=a.getArg(o,"name",null);return null!==s&&(s=this._names.at(s)),{source:i,line:a.getArg(o,"originalLine",null),column:a.getArg(o,"originalColumn",null),name:s}}}return{source:null,line:null,column:null,name:null}},o.prototype.hasContentsOfAllSources=function(){return!!this.sourcesContent&&(this.sourcesContent.length>=this._sources.size()&&!this.sourcesContent.some(function(e){return null==e}))},o.prototype.sourceContentFor=function(e,n){if(!this.sourcesContent)return null;var r=this._findSourceIndex(e);if(r>=0)return this.sourcesContent[r];var t=e;null!=this.sourceRoot&&(t=a.relative(this.sourceRoot,t));var o;if(null!=this.sourceRoot&&(o=a.urlParse(this.sourceRoot))){var i=t.replace(/^file:\/\//,"");if("file"==o.scheme&&this._sources.has(i))return this.sourcesContent[this._sources.indexOf(i)];if((!o.path||"/"==o.path)&&this._sources.has("/"+t))return this.sourcesContent[this._sources.indexOf("/"+t)]}if(n)return null;throw new Error('"'+t+'" is not in the SourceMap.')},o.prototype.generatedPositionFor=function(e){var n=a.getArg(e,"source");if(n=this._findSourceIndex(n),n<0)return{line:null,column:null,lastColumn:null};var r={source:n,originalLine:a.getArg(e,"line"),originalColumn:a.getArg(e,"column")},o=this._findMapping(r,this._originalMappings,"originalLine","originalColumn",a.compareByOriginalPositions,a.getArg(e,"bias",t.GREATEST_LOWER_BOUND));if(o>=0){var i=this._originalMappings[o];if(i.source===r.source)return{line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}}return{line:null,column:null,lastColumn:null}},n.BasicSourceMapConsumer=o,s.prototype=Object.create(t.prototype),s.prototype.constructor=t,s.prototype._version=3,Object.defineProperty(s.prototype,"sources",{get:function(){for(var e=[],n=0;n0?t-u>1?r(u,t,o,i,s,a):a==n.LEAST_UPPER_BOUND?t1?r(e,u,o,i,s,a):a==n.LEAST_UPPER_BOUND?u:e<0?-1:e}n.GREATEST_LOWER_BOUND=1,n.LEAST_UPPER_BOUND=2,n.search=function(e,t,o,i){if(0===t.length)return-1;var s=r(-1,t.length,e,t,o,i||n.GREATEST_LOWER_BOUND);if(s<0)return-1;for(;s-1>=0&&0===o(t[s],t[s-1],!0);)--s;return s}},function(e,n){function r(e,n,r){var t=e[n];e[n]=e[r],e[r]=t}function t(e,n){return Math.round(e+Math.random()*(n-e))}function o(e,n,i,s){if(i=0;n--)this.prepend(e[n]);else{if(!e[u]&&"string"!=typeof e)throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e);this.children.unshift(e)}return this},t.prototype.walk=function(e){for(var n,r=0,t=this.children.length;r0){for(n=[],r=0;r 0 && aGenerated.column >= 0\n\t && !aOriginal && !aSource && !aName) {\n\t // Case 1.\n\t return;\n\t }\n\t else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated\n\t && aOriginal && 'line' in aOriginal && 'column' in aOriginal\n\t && aGenerated.line > 0 && aGenerated.column >= 0\n\t && aOriginal.line > 0 && aOriginal.column >= 0\n\t && aSource) {\n\t // Cases 2 and 3.\n\t return;\n\t }\n\t else {\n\t throw new Error('Invalid mapping: ' + JSON.stringify({\n\t generated: aGenerated,\n\t source: aSource,\n\t original: aOriginal,\n\t name: aName\n\t }));\n\t }\n\t };\n\t\n\t/**\n\t * Serialize the accumulated mappings in to the stream of base 64 VLQs\n\t * specified by the source map format.\n\t */\n\tSourceMapGenerator.prototype._serializeMappings =\n\t function SourceMapGenerator_serializeMappings() {\n\t var previousGeneratedColumn = 0;\n\t var previousGeneratedLine = 1;\n\t var previousOriginalColumn = 0;\n\t var previousOriginalLine = 0;\n\t var previousName = 0;\n\t var previousSource = 0;\n\t var result = '';\n\t var next;\n\t var mapping;\n\t var nameIdx;\n\t var sourceIdx;\n\t\n\t var mappings = this._mappings.toArray();\n\t for (var i = 0, len = mappings.length; i < len; i++) {\n\t mapping = mappings[i];\n\t next = ''\n\t\n\t if (mapping.generatedLine !== previousGeneratedLine) {\n\t previousGeneratedColumn = 0;\n\t while (mapping.generatedLine !== previousGeneratedLine) {\n\t next += ';';\n\t previousGeneratedLine++;\n\t }\n\t }\n\t else {\n\t if (i > 0) {\n\t if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) {\n\t continue;\n\t }\n\t next += ',';\n\t }\n\t }\n\t\n\t next += base64VLQ.encode(mapping.generatedColumn\n\t - previousGeneratedColumn);\n\t previousGeneratedColumn = mapping.generatedColumn;\n\t\n\t if (mapping.source != null) {\n\t sourceIdx = this._sources.indexOf(mapping.source);\n\t next += base64VLQ.encode(sourceIdx - previousSource);\n\t previousSource = sourceIdx;\n\t\n\t // lines are stored 0-based in SourceMap spec version 3\n\t next += base64VLQ.encode(mapping.originalLine - 1\n\t - previousOriginalLine);\n\t previousOriginalLine = mapping.originalLine - 1;\n\t\n\t next += base64VLQ.encode(mapping.originalColumn\n\t - previousOriginalColumn);\n\t previousOriginalColumn = mapping.originalColumn;\n\t\n\t if (mapping.name != null) {\n\t nameIdx = this._names.indexOf(mapping.name);\n\t next += base64VLQ.encode(nameIdx - previousName);\n\t previousName = nameIdx;\n\t }\n\t }\n\t\n\t result += next;\n\t }\n\t\n\t return result;\n\t };\n\t\n\tSourceMapGenerator.prototype._generateSourcesContent =\n\t function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) {\n\t return aSources.map(function (source) {\n\t if (!this._sourcesContents) {\n\t return null;\n\t }\n\t if (aSourceRoot != null) {\n\t source = util.relative(aSourceRoot, source);\n\t }\n\t var key = util.toSetString(source);\n\t return Object.prototype.hasOwnProperty.call(this._sourcesContents, key)\n\t ? this._sourcesContents[key]\n\t : null;\n\t }, this);\n\t };\n\t\n\t/**\n\t * Externalize the source map.\n\t */\n\tSourceMapGenerator.prototype.toJSON =\n\t function SourceMapGenerator_toJSON() {\n\t var map = {\n\t version: this._version,\n\t sources: this._sources.toArray(),\n\t names: this._names.toArray(),\n\t mappings: this._serializeMappings()\n\t };\n\t if (this._file != null) {\n\t map.file = this._file;\n\t }\n\t if (this._sourceRoot != null) {\n\t map.sourceRoot = this._sourceRoot;\n\t }\n\t if (this._sourcesContents) {\n\t map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot);\n\t }\n\t\n\t return map;\n\t };\n\t\n\t/**\n\t * Render the source map being generated to a string.\n\t */\n\tSourceMapGenerator.prototype.toString =\n\t function SourceMapGenerator_toString() {\n\t return JSON.stringify(this.toJSON());\n\t };\n\t\n\texports.SourceMapGenerator = SourceMapGenerator;\n\n\n/***/ }),\n/* 2 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t *\n\t * Based on the Base 64 VLQ implementation in Closure Compiler:\n\t * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java\n\t *\n\t * Copyright 2011 The Closure Compiler Authors. All rights reserved.\n\t * Redistribution and use in source and binary forms, with or without\n\t * modification, are permitted provided that the following conditions are\n\t * met:\n\t *\n\t * * Redistributions of source code must retain the above copyright\n\t * notice, this list of conditions and the following disclaimer.\n\t * * Redistributions in binary form must reproduce the above\n\t * copyright notice, this list of conditions and the following\n\t * disclaimer in the documentation and/or other materials provided\n\t * with the distribution.\n\t * * Neither the name of Google Inc. nor the names of its\n\t * contributors may be used to endorse or promote products derived\n\t * from this software without specific prior written permission.\n\t *\n\t * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\t * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\t * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\t * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\t * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\t * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\t * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\t * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\t * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\t * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\t * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\t */\n\t\n\tvar base64 = __webpack_require__(3);\n\t\n\t// A single base 64 digit can contain 6 bits of data. For the base 64 variable\n\t// length quantities we use in the source map spec, the first bit is the sign,\n\t// the next four bits are the actual value, and the 6th bit is the\n\t// continuation bit. The continuation bit tells us whether there are more\n\t// digits in this value following this digit.\n\t//\n\t// Continuation\n\t// | Sign\n\t// | |\n\t// V V\n\t// 101011\n\t\n\tvar VLQ_BASE_SHIFT = 5;\n\t\n\t// binary: 100000\n\tvar VLQ_BASE = 1 << VLQ_BASE_SHIFT;\n\t\n\t// binary: 011111\n\tvar VLQ_BASE_MASK = VLQ_BASE - 1;\n\t\n\t// binary: 100000\n\tvar VLQ_CONTINUATION_BIT = VLQ_BASE;\n\t\n\t/**\n\t * Converts from a two-complement value to a value where the sign bit is\n\t * placed in the least significant bit. For example, as decimals:\n\t * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary)\n\t * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary)\n\t */\n\tfunction toVLQSigned(aValue) {\n\t return aValue < 0\n\t ? ((-aValue) << 1) + 1\n\t : (aValue << 1) + 0;\n\t}\n\t\n\t/**\n\t * Converts to a two-complement value from a value where the sign bit is\n\t * placed in the least significant bit. For example, as decimals:\n\t * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1\n\t * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2\n\t */\n\tfunction fromVLQSigned(aValue) {\n\t var isNegative = (aValue & 1) === 1;\n\t var shifted = aValue >> 1;\n\t return isNegative\n\t ? -shifted\n\t : shifted;\n\t}\n\t\n\t/**\n\t * Returns the base 64 VLQ encoded value.\n\t */\n\texports.encode = function base64VLQ_encode(aValue) {\n\t var encoded = \"\";\n\t var digit;\n\t\n\t var vlq = toVLQSigned(aValue);\n\t\n\t do {\n\t digit = vlq & VLQ_BASE_MASK;\n\t vlq >>>= VLQ_BASE_SHIFT;\n\t if (vlq > 0) {\n\t // There are still more digits in this value, so we must make sure the\n\t // continuation bit is marked.\n\t digit |= VLQ_CONTINUATION_BIT;\n\t }\n\t encoded += base64.encode(digit);\n\t } while (vlq > 0);\n\t\n\t return encoded;\n\t};\n\t\n\t/**\n\t * Decodes the next base 64 VLQ value from the given string and returns the\n\t * value and the rest of the string via the out parameter.\n\t */\n\texports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) {\n\t var strLen = aStr.length;\n\t var result = 0;\n\t var shift = 0;\n\t var continuation, digit;\n\t\n\t do {\n\t if (aIndex >= strLen) {\n\t throw new Error(\"Expected more digits in base 64 VLQ value.\");\n\t }\n\t\n\t digit = base64.decode(aStr.charCodeAt(aIndex++));\n\t if (digit === -1) {\n\t throw new Error(\"Invalid base64 digit: \" + aStr.charAt(aIndex - 1));\n\t }\n\t\n\t continuation = !!(digit & VLQ_CONTINUATION_BIT);\n\t digit &= VLQ_BASE_MASK;\n\t result = result + (digit << shift);\n\t shift += VLQ_BASE_SHIFT;\n\t } while (continuation);\n\t\n\t aOutParam.value = fromVLQSigned(result);\n\t aOutParam.rest = aIndex;\n\t};\n\n\n/***/ }),\n/* 3 */\n/***/ (function(module, exports) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\tvar intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split('');\n\t\n\t/**\n\t * Encode an integer in the range of 0 to 63 to a single base 64 digit.\n\t */\n\texports.encode = function (number) {\n\t if (0 <= number && number < intToCharMap.length) {\n\t return intToCharMap[number];\n\t }\n\t throw new TypeError(\"Must be between 0 and 63: \" + number);\n\t};\n\t\n\t/**\n\t * Decode a single base 64 character code digit to an integer. Returns -1 on\n\t * failure.\n\t */\n\texports.decode = function (charCode) {\n\t var bigA = 65; // 'A'\n\t var bigZ = 90; // 'Z'\n\t\n\t var littleA = 97; // 'a'\n\t var littleZ = 122; // 'z'\n\t\n\t var zero = 48; // '0'\n\t var nine = 57; // '9'\n\t\n\t var plus = 43; // '+'\n\t var slash = 47; // '/'\n\t\n\t var littleOffset = 26;\n\t var numberOffset = 52;\n\t\n\t // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ\n\t if (bigA <= charCode && charCode <= bigZ) {\n\t return (charCode - bigA);\n\t }\n\t\n\t // 26 - 51: abcdefghijklmnopqrstuvwxyz\n\t if (littleA <= charCode && charCode <= littleZ) {\n\t return (charCode - littleA + littleOffset);\n\t }\n\t\n\t // 52 - 61: 0123456789\n\t if (zero <= charCode && charCode <= nine) {\n\t return (charCode - zero + numberOffset);\n\t }\n\t\n\t // 62: +\n\t if (charCode == plus) {\n\t return 62;\n\t }\n\t\n\t // 63: /\n\t if (charCode == slash) {\n\t return 63;\n\t }\n\t\n\t // Invalid base64 digit.\n\t return -1;\n\t};\n\n\n/***/ }),\n/* 4 */\n/***/ (function(module, exports) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\t/**\n\t * This is a helper function for getting values from parameter/options\n\t * objects.\n\t *\n\t * @param args The object we are extracting values from\n\t * @param name The name of the property we are getting.\n\t * @param defaultValue An optional value to return if the property is missing\n\t * from the object. If this is not specified and the property is missing, an\n\t * error will be thrown.\n\t */\n\tfunction getArg(aArgs, aName, aDefaultValue) {\n\t if (aName in aArgs) {\n\t return aArgs[aName];\n\t } else if (arguments.length === 3) {\n\t return aDefaultValue;\n\t } else {\n\t throw new Error('\"' + aName + '\" is a required argument.');\n\t }\n\t}\n\texports.getArg = getArg;\n\t\n\tvar urlRegexp = /^(?:([\\w+\\-.]+):)?\\/\\/(?:(\\w+:\\w+)@)?([\\w.-]*)(?::(\\d+))?(.*)$/;\n\tvar dataUrlRegexp = /^data:.+\\,.+$/;\n\t\n\tfunction urlParse(aUrl) {\n\t var match = aUrl.match(urlRegexp);\n\t if (!match) {\n\t return null;\n\t }\n\t return {\n\t scheme: match[1],\n\t auth: match[2],\n\t host: match[3],\n\t port: match[4],\n\t path: match[5]\n\t };\n\t}\n\texports.urlParse = urlParse;\n\t\n\tfunction urlGenerate(aParsedUrl) {\n\t var url = '';\n\t if (aParsedUrl.scheme) {\n\t url += aParsedUrl.scheme + ':';\n\t }\n\t url += '//';\n\t if (aParsedUrl.auth) {\n\t url += aParsedUrl.auth + '@';\n\t }\n\t if (aParsedUrl.host) {\n\t url += aParsedUrl.host;\n\t }\n\t if (aParsedUrl.port) {\n\t url += \":\" + aParsedUrl.port\n\t }\n\t if (aParsedUrl.path) {\n\t url += aParsedUrl.path;\n\t }\n\t return url;\n\t}\n\texports.urlGenerate = urlGenerate;\n\t\n\t/**\n\t * Normalizes a path, or the path portion of a URL:\n\t *\n\t * - Replaces consecutive slashes with one slash.\n\t * - Removes unnecessary '.' parts.\n\t * - Removes unnecessary '/..' parts.\n\t *\n\t * Based on code in the Node.js 'path' core module.\n\t *\n\t * @param aPath The path or url to normalize.\n\t */\n\tfunction normalize(aPath) {\n\t var path = aPath;\n\t var url = urlParse(aPath);\n\t if (url) {\n\t if (!url.path) {\n\t return aPath;\n\t }\n\t path = url.path;\n\t }\n\t var isAbsolute = exports.isAbsolute(path);\n\t\n\t var parts = path.split(/\\/+/);\n\t for (var part, up = 0, i = parts.length - 1; i >= 0; i--) {\n\t part = parts[i];\n\t if (part === '.') {\n\t parts.splice(i, 1);\n\t } else if (part === '..') {\n\t up++;\n\t } else if (up > 0) {\n\t if (part === '') {\n\t // The first part is blank if the path is absolute. Trying to go\n\t // above the root is a no-op. Therefore we can remove all '..' parts\n\t // directly after the root.\n\t parts.splice(i + 1, up);\n\t up = 0;\n\t } else {\n\t parts.splice(i, 2);\n\t up--;\n\t }\n\t }\n\t }\n\t path = parts.join('/');\n\t\n\t if (path === '') {\n\t path = isAbsolute ? '/' : '.';\n\t }\n\t\n\t if (url) {\n\t url.path = path;\n\t return urlGenerate(url);\n\t }\n\t return path;\n\t}\n\texports.normalize = normalize;\n\t\n\t/**\n\t * Joins two paths/URLs.\n\t *\n\t * @param aRoot The root path or URL.\n\t * @param aPath The path or URL to be joined with the root.\n\t *\n\t * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a\n\t * scheme-relative URL: Then the scheme of aRoot, if any, is prepended\n\t * first.\n\t * - Otherwise aPath is a path. If aRoot is a URL, then its path portion\n\t * is updated with the result and aRoot is returned. Otherwise the result\n\t * is returned.\n\t * - If aPath is absolute, the result is aPath.\n\t * - Otherwise the two paths are joined with a slash.\n\t * - Joining for example 'http://' and 'www.example.com' is also supported.\n\t */\n\tfunction join(aRoot, aPath) {\n\t if (aRoot === \"\") {\n\t aRoot = \".\";\n\t }\n\t if (aPath === \"\") {\n\t aPath = \".\";\n\t }\n\t var aPathUrl = urlParse(aPath);\n\t var aRootUrl = urlParse(aRoot);\n\t if (aRootUrl) {\n\t aRoot = aRootUrl.path || '/';\n\t }\n\t\n\t // `join(foo, '//www.example.org')`\n\t if (aPathUrl && !aPathUrl.scheme) {\n\t if (aRootUrl) {\n\t aPathUrl.scheme = aRootUrl.scheme;\n\t }\n\t return urlGenerate(aPathUrl);\n\t }\n\t\n\t if (aPathUrl || aPath.match(dataUrlRegexp)) {\n\t return aPath;\n\t }\n\t\n\t // `join('http://', 'www.example.com')`\n\t if (aRootUrl && !aRootUrl.host && !aRootUrl.path) {\n\t aRootUrl.host = aPath;\n\t return urlGenerate(aRootUrl);\n\t }\n\t\n\t var joined = aPath.charAt(0) === '/'\n\t ? aPath\n\t : normalize(aRoot.replace(/\\/+$/, '') + '/' + aPath);\n\t\n\t if (aRootUrl) {\n\t aRootUrl.path = joined;\n\t return urlGenerate(aRootUrl);\n\t }\n\t return joined;\n\t}\n\texports.join = join;\n\t\n\texports.isAbsolute = function (aPath) {\n\t return aPath.charAt(0) === '/' || urlRegexp.test(aPath);\n\t};\n\t\n\t/**\n\t * Make a path relative to a URL or another path.\n\t *\n\t * @param aRoot The root path or URL.\n\t * @param aPath The path or URL to be made relative to aRoot.\n\t */\n\tfunction relative(aRoot, aPath) {\n\t if (aRoot === \"\") {\n\t aRoot = \".\";\n\t }\n\t\n\t aRoot = aRoot.replace(/\\/$/, '');\n\t\n\t // It is possible for the path to be above the root. In this case, simply\n\t // checking whether the root is a prefix of the path won't work. Instead, we\n\t // need to remove components from the root one by one, until either we find\n\t // a prefix that fits, or we run out of components to remove.\n\t var level = 0;\n\t while (aPath.indexOf(aRoot + '/') !== 0) {\n\t var index = aRoot.lastIndexOf(\"/\");\n\t if (index < 0) {\n\t return aPath;\n\t }\n\t\n\t // If the only part of the root that is left is the scheme (i.e. http://,\n\t // file:///, etc.), one or more slashes (/), or simply nothing at all, we\n\t // have exhausted all components, so the path is not relative to the root.\n\t aRoot = aRoot.slice(0, index);\n\t if (aRoot.match(/^([^\\/]+:\\/)?\\/*$/)) {\n\t return aPath;\n\t }\n\t\n\t ++level;\n\t }\n\t\n\t // Make sure we add a \"../\" for each component we removed from the root.\n\t return Array(level + 1).join(\"../\") + aPath.substr(aRoot.length + 1);\n\t}\n\texports.relative = relative;\n\t\n\tvar supportsNullProto = (function () {\n\t var obj = Object.create(null);\n\t return !('__proto__' in obj);\n\t}());\n\t\n\tfunction identity (s) {\n\t return s;\n\t}\n\t\n\t/**\n\t * Because behavior goes wacky when you set `__proto__` on objects, we\n\t * have to prefix all the strings in our set with an arbitrary character.\n\t *\n\t * See https://github.com/mozilla/source-map/pull/31 and\n\t * https://github.com/mozilla/source-map/issues/30\n\t *\n\t * @param String aStr\n\t */\n\tfunction toSetString(aStr) {\n\t if (isProtoString(aStr)) {\n\t return '$' + aStr;\n\t }\n\t\n\t return aStr;\n\t}\n\texports.toSetString = supportsNullProto ? identity : toSetString;\n\t\n\tfunction fromSetString(aStr) {\n\t if (isProtoString(aStr)) {\n\t return aStr.slice(1);\n\t }\n\t\n\t return aStr;\n\t}\n\texports.fromSetString = supportsNullProto ? identity : fromSetString;\n\t\n\tfunction isProtoString(s) {\n\t if (!s) {\n\t return false;\n\t }\n\t\n\t var length = s.length;\n\t\n\t if (length < 9 /* \"__proto__\".length */) {\n\t return false;\n\t }\n\t\n\t if (s.charCodeAt(length - 1) !== 95 /* '_' */ ||\n\t s.charCodeAt(length - 2) !== 95 /* '_' */ ||\n\t s.charCodeAt(length - 3) !== 111 /* 'o' */ ||\n\t s.charCodeAt(length - 4) !== 116 /* 't' */ ||\n\t s.charCodeAt(length - 5) !== 111 /* 'o' */ ||\n\t s.charCodeAt(length - 6) !== 114 /* 'r' */ ||\n\t s.charCodeAt(length - 7) !== 112 /* 'p' */ ||\n\t s.charCodeAt(length - 8) !== 95 /* '_' */ ||\n\t s.charCodeAt(length - 9) !== 95 /* '_' */) {\n\t return false;\n\t }\n\t\n\t for (var i = length - 10; i >= 0; i--) {\n\t if (s.charCodeAt(i) !== 36 /* '$' */) {\n\t return false;\n\t }\n\t }\n\t\n\t return true;\n\t}\n\t\n\t/**\n\t * Comparator between two mappings where the original positions are compared.\n\t *\n\t * Optionally pass in `true` as `onlyCompareGenerated` to consider two\n\t * mappings with the same original source/line/column, but different generated\n\t * line and column the same. Useful when searching for a mapping with a\n\t * stubbed out mapping.\n\t */\n\tfunction compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) {\n\t var cmp = strcmp(mappingA.source, mappingB.source);\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.originalLine - mappingB.originalLine;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.originalColumn - mappingB.originalColumn;\n\t if (cmp !== 0 || onlyCompareOriginal) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.generatedColumn - mappingB.generatedColumn;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.generatedLine - mappingB.generatedLine;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t return strcmp(mappingA.name, mappingB.name);\n\t}\n\texports.compareByOriginalPositions = compareByOriginalPositions;\n\t\n\t/**\n\t * Comparator between two mappings with deflated source and name indices where\n\t * the generated positions are compared.\n\t *\n\t * Optionally pass in `true` as `onlyCompareGenerated` to consider two\n\t * mappings with the same generated line and column, but different\n\t * source/name/original line and column the same. Useful when searching for a\n\t * mapping with a stubbed out mapping.\n\t */\n\tfunction compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) {\n\t var cmp = mappingA.generatedLine - mappingB.generatedLine;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.generatedColumn - mappingB.generatedColumn;\n\t if (cmp !== 0 || onlyCompareGenerated) {\n\t return cmp;\n\t }\n\t\n\t cmp = strcmp(mappingA.source, mappingB.source);\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.originalLine - mappingB.originalLine;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.originalColumn - mappingB.originalColumn;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t return strcmp(mappingA.name, mappingB.name);\n\t}\n\texports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated;\n\t\n\tfunction strcmp(aStr1, aStr2) {\n\t if (aStr1 === aStr2) {\n\t return 0;\n\t }\n\t\n\t if (aStr1 === null) {\n\t return 1; // aStr2 !== null\n\t }\n\t\n\t if (aStr2 === null) {\n\t return -1; // aStr1 !== null\n\t }\n\t\n\t if (aStr1 > aStr2) {\n\t return 1;\n\t }\n\t\n\t return -1;\n\t}\n\t\n\t/**\n\t * Comparator between two mappings with inflated source and name strings where\n\t * the generated positions are compared.\n\t */\n\tfunction compareByGeneratedPositionsInflated(mappingA, mappingB) {\n\t var cmp = mappingA.generatedLine - mappingB.generatedLine;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.generatedColumn - mappingB.generatedColumn;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = strcmp(mappingA.source, mappingB.source);\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.originalLine - mappingB.originalLine;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t cmp = mappingA.originalColumn - mappingB.originalColumn;\n\t if (cmp !== 0) {\n\t return cmp;\n\t }\n\t\n\t return strcmp(mappingA.name, mappingB.name);\n\t}\n\texports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated;\n\t\n\t/**\n\t * Strip any JSON XSSI avoidance prefix from the string (as documented\n\t * in the source maps specification), and then parse the string as\n\t * JSON.\n\t */\n\tfunction parseSourceMapInput(str) {\n\t return JSON.parse(str.replace(/^\\)]}'[^\\n]*\\n/, ''));\n\t}\n\texports.parseSourceMapInput = parseSourceMapInput;\n\t\n\t/**\n\t * Compute the URL of a source given the the source root, the source's\n\t * URL, and the source map's URL.\n\t */\n\tfunction computeSourceURL(sourceRoot, sourceURL, sourceMapURL) {\n\t sourceURL = sourceURL || '';\n\t\n\t if (sourceRoot) {\n\t // This follows what Chrome does.\n\t if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') {\n\t sourceRoot += '/';\n\t }\n\t // The spec says:\n\t // Line 4: An optional source root, useful for relocating source\n\t // files on a server or removing repeated values in the\n\t // “sources” entry. This value is prepended to the individual\n\t // entries in the “source” field.\n\t sourceURL = sourceRoot + sourceURL;\n\t }\n\t\n\t // Historically, SourceMapConsumer did not take the sourceMapURL as\n\t // a parameter. This mode is still somewhat supported, which is why\n\t // this code block is conditional. However, it's preferable to pass\n\t // the source map URL to SourceMapConsumer, so that this function\n\t // can implement the source URL resolution algorithm as outlined in\n\t // the spec. This block is basically the equivalent of:\n\t // new URL(sourceURL, sourceMapURL).toString()\n\t // ... except it avoids using URL, which wasn't available in the\n\t // older releases of node still supported by this library.\n\t //\n\t // The spec says:\n\t // If the sources are not absolute URLs after prepending of the\n\t // “sourceRoot”, the sources are resolved relative to the\n\t // SourceMap (like resolving script src in a html document).\n\t if (sourceMapURL) {\n\t var parsed = urlParse(sourceMapURL);\n\t if (!parsed) {\n\t throw new Error(\"sourceMapURL could not be parsed\");\n\t }\n\t if (parsed.path) {\n\t // Strip the last path component, but keep the \"/\".\n\t var index = parsed.path.lastIndexOf('/');\n\t if (index >= 0) {\n\t parsed.path = parsed.path.substring(0, index + 1);\n\t }\n\t }\n\t sourceURL = join(urlGenerate(parsed), sourceURL);\n\t }\n\t\n\t return normalize(sourceURL);\n\t}\n\texports.computeSourceURL = computeSourceURL;\n\n\n/***/ }),\n/* 5 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\tvar util = __webpack_require__(4);\n\tvar has = Object.prototype.hasOwnProperty;\n\tvar hasNativeMap = typeof Map !== \"undefined\";\n\t\n\t/**\n\t * A data structure which is a combination of an array and a set. Adding a new\n\t * member is O(1), testing for membership is O(1), and finding the index of an\n\t * element is O(1). Removing elements from the set is not supported. Only\n\t * strings are supported for membership.\n\t */\n\tfunction ArraySet() {\n\t this._array = [];\n\t this._set = hasNativeMap ? new Map() : Object.create(null);\n\t}\n\t\n\t/**\n\t * Static method for creating ArraySet instances from an existing array.\n\t */\n\tArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) {\n\t var set = new ArraySet();\n\t for (var i = 0, len = aArray.length; i < len; i++) {\n\t set.add(aArray[i], aAllowDuplicates);\n\t }\n\t return set;\n\t};\n\t\n\t/**\n\t * Return how many unique items are in this ArraySet. If duplicates have been\n\t * added, than those do not count towards the size.\n\t *\n\t * @returns Number\n\t */\n\tArraySet.prototype.size = function ArraySet_size() {\n\t return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length;\n\t};\n\t\n\t/**\n\t * Add the given string to this set.\n\t *\n\t * @param String aStr\n\t */\n\tArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) {\n\t var sStr = hasNativeMap ? aStr : util.toSetString(aStr);\n\t var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr);\n\t var idx = this._array.length;\n\t if (!isDuplicate || aAllowDuplicates) {\n\t this._array.push(aStr);\n\t }\n\t if (!isDuplicate) {\n\t if (hasNativeMap) {\n\t this._set.set(aStr, idx);\n\t } else {\n\t this._set[sStr] = idx;\n\t }\n\t }\n\t};\n\t\n\t/**\n\t * Is the given string a member of this set?\n\t *\n\t * @param String aStr\n\t */\n\tArraySet.prototype.has = function ArraySet_has(aStr) {\n\t if (hasNativeMap) {\n\t return this._set.has(aStr);\n\t } else {\n\t var sStr = util.toSetString(aStr);\n\t return has.call(this._set, sStr);\n\t }\n\t};\n\t\n\t/**\n\t * What is the index of the given string in the array?\n\t *\n\t * @param String aStr\n\t */\n\tArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) {\n\t if (hasNativeMap) {\n\t var idx = this._set.get(aStr);\n\t if (idx >= 0) {\n\t return idx;\n\t }\n\t } else {\n\t var sStr = util.toSetString(aStr);\n\t if (has.call(this._set, sStr)) {\n\t return this._set[sStr];\n\t }\n\t }\n\t\n\t throw new Error('\"' + aStr + '\" is not in the set.');\n\t};\n\t\n\t/**\n\t * What is the element at the given index?\n\t *\n\t * @param Number aIdx\n\t */\n\tArraySet.prototype.at = function ArraySet_at(aIdx) {\n\t if (aIdx >= 0 && aIdx < this._array.length) {\n\t return this._array[aIdx];\n\t }\n\t throw new Error('No element indexed by ' + aIdx);\n\t};\n\t\n\t/**\n\t * Returns the array representation of this set (which has the proper indices\n\t * indicated by indexOf). Note that this is a copy of the internal array used\n\t * for storing the members so that no one can mess with internal state.\n\t */\n\tArraySet.prototype.toArray = function ArraySet_toArray() {\n\t return this._array.slice();\n\t};\n\t\n\texports.ArraySet = ArraySet;\n\n\n/***/ }),\n/* 6 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2014 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\tvar util = __webpack_require__(4);\n\t\n\t/**\n\t * Determine whether mappingB is after mappingA with respect to generated\n\t * position.\n\t */\n\tfunction generatedPositionAfter(mappingA, mappingB) {\n\t // Optimized for most common case\n\t var lineA = mappingA.generatedLine;\n\t var lineB = mappingB.generatedLine;\n\t var columnA = mappingA.generatedColumn;\n\t var columnB = mappingB.generatedColumn;\n\t return lineB > lineA || lineB == lineA && columnB >= columnA ||\n\t util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0;\n\t}\n\t\n\t/**\n\t * A data structure to provide a sorted view of accumulated mappings in a\n\t * performance conscious manner. It trades a neglibable overhead in general\n\t * case for a large speedup in case of mappings being added in order.\n\t */\n\tfunction MappingList() {\n\t this._array = [];\n\t this._sorted = true;\n\t // Serves as infimum\n\t this._last = {generatedLine: -1, generatedColumn: 0};\n\t}\n\t\n\t/**\n\t * Iterate through internal items. This method takes the same arguments that\n\t * `Array.prototype.forEach` takes.\n\t *\n\t * NOTE: The order of the mappings is NOT guaranteed.\n\t */\n\tMappingList.prototype.unsortedForEach =\n\t function MappingList_forEach(aCallback, aThisArg) {\n\t this._array.forEach(aCallback, aThisArg);\n\t };\n\t\n\t/**\n\t * Add the given source mapping.\n\t *\n\t * @param Object aMapping\n\t */\n\tMappingList.prototype.add = function MappingList_add(aMapping) {\n\t if (generatedPositionAfter(this._last, aMapping)) {\n\t this._last = aMapping;\n\t this._array.push(aMapping);\n\t } else {\n\t this._sorted = false;\n\t this._array.push(aMapping);\n\t }\n\t};\n\t\n\t/**\n\t * Returns the flat, sorted array of mappings. The mappings are sorted by\n\t * generated position.\n\t *\n\t * WARNING: This method returns internal data without copying, for\n\t * performance. The return value must NOT be mutated, and should be treated as\n\t * an immutable borrow. If you want to take ownership, you must make your own\n\t * copy.\n\t */\n\tMappingList.prototype.toArray = function MappingList_toArray() {\n\t if (!this._sorted) {\n\t this._array.sort(util.compareByGeneratedPositionsInflated);\n\t this._sorted = true;\n\t }\n\t return this._array;\n\t};\n\t\n\texports.MappingList = MappingList;\n\n\n/***/ }),\n/* 7 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\tvar util = __webpack_require__(4);\n\tvar binarySearch = __webpack_require__(8);\n\tvar ArraySet = __webpack_require__(5).ArraySet;\n\tvar base64VLQ = __webpack_require__(2);\n\tvar quickSort = __webpack_require__(9).quickSort;\n\t\n\tfunction SourceMapConsumer(aSourceMap, aSourceMapURL) {\n\t var sourceMap = aSourceMap;\n\t if (typeof aSourceMap === 'string') {\n\t sourceMap = util.parseSourceMapInput(aSourceMap);\n\t }\n\t\n\t return sourceMap.sections != null\n\t ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL)\n\t : new BasicSourceMapConsumer(sourceMap, aSourceMapURL);\n\t}\n\t\n\tSourceMapConsumer.fromSourceMap = function(aSourceMap, aSourceMapURL) {\n\t return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL);\n\t}\n\t\n\t/**\n\t * The version of the source mapping spec that we are consuming.\n\t */\n\tSourceMapConsumer.prototype._version = 3;\n\t\n\t// `__generatedMappings` and `__originalMappings` are arrays that hold the\n\t// parsed mapping coordinates from the source map's \"mappings\" attribute. They\n\t// are lazily instantiated, accessed via the `_generatedMappings` and\n\t// `_originalMappings` getters respectively, and we only parse the mappings\n\t// and create these arrays once queried for a source location. We jump through\n\t// these hoops because there can be many thousands of mappings, and parsing\n\t// them is expensive, so we only want to do it if we must.\n\t//\n\t// Each object in the arrays is of the form:\n\t//\n\t// {\n\t// generatedLine: The line number in the generated code,\n\t// generatedColumn: The column number in the generated code,\n\t// source: The path to the original source file that generated this\n\t// chunk of code,\n\t// originalLine: The line number in the original source that\n\t// corresponds to this chunk of generated code,\n\t// originalColumn: The column number in the original source that\n\t// corresponds to this chunk of generated code,\n\t// name: The name of the original symbol which generated this chunk of\n\t// code.\n\t// }\n\t//\n\t// All properties except for `generatedLine` and `generatedColumn` can be\n\t// `null`.\n\t//\n\t// `_generatedMappings` is ordered by the generated positions.\n\t//\n\t// `_originalMappings` is ordered by the original positions.\n\t\n\tSourceMapConsumer.prototype.__generatedMappings = null;\n\tObject.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', {\n\t configurable: true,\n\t enumerable: true,\n\t get: function () {\n\t if (!this.__generatedMappings) {\n\t this._parseMappings(this._mappings, this.sourceRoot);\n\t }\n\t\n\t return this.__generatedMappings;\n\t }\n\t});\n\t\n\tSourceMapConsumer.prototype.__originalMappings = null;\n\tObject.defineProperty(SourceMapConsumer.prototype, '_originalMappings', {\n\t configurable: true,\n\t enumerable: true,\n\t get: function () {\n\t if (!this.__originalMappings) {\n\t this._parseMappings(this._mappings, this.sourceRoot);\n\t }\n\t\n\t return this.__originalMappings;\n\t }\n\t});\n\t\n\tSourceMapConsumer.prototype._charIsMappingSeparator =\n\t function SourceMapConsumer_charIsMappingSeparator(aStr, index) {\n\t var c = aStr.charAt(index);\n\t return c === \";\" || c === \",\";\n\t };\n\t\n\t/**\n\t * Parse the mappings in a string in to a data structure which we can easily\n\t * query (the ordered arrays in the `this.__generatedMappings` and\n\t * `this.__originalMappings` properties).\n\t */\n\tSourceMapConsumer.prototype._parseMappings =\n\t function SourceMapConsumer_parseMappings(aStr, aSourceRoot) {\n\t throw new Error(\"Subclasses must implement _parseMappings\");\n\t };\n\t\n\tSourceMapConsumer.GENERATED_ORDER = 1;\n\tSourceMapConsumer.ORIGINAL_ORDER = 2;\n\t\n\tSourceMapConsumer.GREATEST_LOWER_BOUND = 1;\n\tSourceMapConsumer.LEAST_UPPER_BOUND = 2;\n\t\n\t/**\n\t * Iterate over each mapping between an original source/line/column and a\n\t * generated line/column in this source map.\n\t *\n\t * @param Function aCallback\n\t * The function that is called with each mapping.\n\t * @param Object aContext\n\t * Optional. If specified, this object will be the value of `this` every\n\t * time that `aCallback` is called.\n\t * @param aOrder\n\t * Either `SourceMapConsumer.GENERATED_ORDER` or\n\t * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to\n\t * iterate over the mappings sorted by the generated file's line/column\n\t * order or the original's source/line/column order, respectively. Defaults to\n\t * `SourceMapConsumer.GENERATED_ORDER`.\n\t */\n\tSourceMapConsumer.prototype.eachMapping =\n\t function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) {\n\t var context = aContext || null;\n\t var order = aOrder || SourceMapConsumer.GENERATED_ORDER;\n\t\n\t var mappings;\n\t switch (order) {\n\t case SourceMapConsumer.GENERATED_ORDER:\n\t mappings = this._generatedMappings;\n\t break;\n\t case SourceMapConsumer.ORIGINAL_ORDER:\n\t mappings = this._originalMappings;\n\t break;\n\t default:\n\t throw new Error(\"Unknown order of iteration.\");\n\t }\n\t\n\t var sourceRoot = this.sourceRoot;\n\t mappings.map(function (mapping) {\n\t var source = mapping.source === null ? null : this._sources.at(mapping.source);\n\t source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL);\n\t return {\n\t source: source,\n\t generatedLine: mapping.generatedLine,\n\t generatedColumn: mapping.generatedColumn,\n\t originalLine: mapping.originalLine,\n\t originalColumn: mapping.originalColumn,\n\t name: mapping.name === null ? null : this._names.at(mapping.name)\n\t };\n\t }, this).forEach(aCallback, context);\n\t };\n\t\n\t/**\n\t * Returns all generated line and column information for the original source,\n\t * line, and column provided. If no column is provided, returns all mappings\n\t * corresponding to a either the line we are searching for or the next\n\t * closest line that has any mappings. Otherwise, returns all mappings\n\t * corresponding to the given line and either the column we are searching for\n\t * or the next closest column that has any offsets.\n\t *\n\t * The only argument is an object with the following properties:\n\t *\n\t * - source: The filename of the original source.\n\t * - line: The line number in the original source. The line number is 1-based.\n\t * - column: Optional. the column number in the original source.\n\t * The column number is 0-based.\n\t *\n\t * and an array of objects is returned, each with the following properties:\n\t *\n\t * - line: The line number in the generated source, or null. The\n\t * line number is 1-based.\n\t * - column: The column number in the generated source, or null.\n\t * The column number is 0-based.\n\t */\n\tSourceMapConsumer.prototype.allGeneratedPositionsFor =\n\t function SourceMapConsumer_allGeneratedPositionsFor(aArgs) {\n\t var line = util.getArg(aArgs, 'line');\n\t\n\t // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping\n\t // returns the index of the closest mapping less than the needle. By\n\t // setting needle.originalColumn to 0, we thus find the last mapping for\n\t // the given line, provided such a mapping exists.\n\t var needle = {\n\t source: util.getArg(aArgs, 'source'),\n\t originalLine: line,\n\t originalColumn: util.getArg(aArgs, 'column', 0)\n\t };\n\t\n\t needle.source = this._findSourceIndex(needle.source);\n\t if (needle.source < 0) {\n\t return [];\n\t }\n\t\n\t var mappings = [];\n\t\n\t var index = this._findMapping(needle,\n\t this._originalMappings,\n\t \"originalLine\",\n\t \"originalColumn\",\n\t util.compareByOriginalPositions,\n\t binarySearch.LEAST_UPPER_BOUND);\n\t if (index >= 0) {\n\t var mapping = this._originalMappings[index];\n\t\n\t if (aArgs.column === undefined) {\n\t var originalLine = mapping.originalLine;\n\t\n\t // Iterate until either we run out of mappings, or we run into\n\t // a mapping for a different line than the one we found. Since\n\t // mappings are sorted, this is guaranteed to find all mappings for\n\t // the line we found.\n\t while (mapping && mapping.originalLine === originalLine) {\n\t mappings.push({\n\t line: util.getArg(mapping, 'generatedLine', null),\n\t column: util.getArg(mapping, 'generatedColumn', null),\n\t lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)\n\t });\n\t\n\t mapping = this._originalMappings[++index];\n\t }\n\t } else {\n\t var originalColumn = mapping.originalColumn;\n\t\n\t // Iterate until either we run out of mappings, or we run into\n\t // a mapping for a different line than the one we were searching for.\n\t // Since mappings are sorted, this is guaranteed to find all mappings for\n\t // the line we are searching for.\n\t while (mapping &&\n\t mapping.originalLine === line &&\n\t mapping.originalColumn == originalColumn) {\n\t mappings.push({\n\t line: util.getArg(mapping, 'generatedLine', null),\n\t column: util.getArg(mapping, 'generatedColumn', null),\n\t lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)\n\t });\n\t\n\t mapping = this._originalMappings[++index];\n\t }\n\t }\n\t }\n\t\n\t return mappings;\n\t };\n\t\n\texports.SourceMapConsumer = SourceMapConsumer;\n\t\n\t/**\n\t * A BasicSourceMapConsumer instance represents a parsed source map which we can\n\t * query for information about the original file positions by giving it a file\n\t * position in the generated source.\n\t *\n\t * The first parameter is the raw source map (either as a JSON string, or\n\t * already parsed to an object). According to the spec, source maps have the\n\t * following attributes:\n\t *\n\t * - version: Which version of the source map spec this map is following.\n\t * - sources: An array of URLs to the original source files.\n\t * - names: An array of identifiers which can be referrenced by individual mappings.\n\t * - sourceRoot: Optional. The URL root from which all sources are relative.\n\t * - sourcesContent: Optional. An array of contents of the original source files.\n\t * - mappings: A string of base64 VLQs which contain the actual mappings.\n\t * - file: Optional. The generated file this source map is associated with.\n\t *\n\t * Here is an example source map, taken from the source map spec[0]:\n\t *\n\t * {\n\t * version : 3,\n\t * file: \"out.js\",\n\t * sourceRoot : \"\",\n\t * sources: [\"foo.js\", \"bar.js\"],\n\t * names: [\"src\", \"maps\", \"are\", \"fun\"],\n\t * mappings: \"AA,AB;;ABCDE;\"\n\t * }\n\t *\n\t * The second parameter, if given, is a string whose value is the URL\n\t * at which the source map was found. This URL is used to compute the\n\t * sources array.\n\t *\n\t * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1#\n\t */\n\tfunction BasicSourceMapConsumer(aSourceMap, aSourceMapURL) {\n\t var sourceMap = aSourceMap;\n\t if (typeof aSourceMap === 'string') {\n\t sourceMap = util.parseSourceMapInput(aSourceMap);\n\t }\n\t\n\t var version = util.getArg(sourceMap, 'version');\n\t var sources = util.getArg(sourceMap, 'sources');\n\t // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which\n\t // requires the array) to play nice here.\n\t var names = util.getArg(sourceMap, 'names', []);\n\t var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null);\n\t var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null);\n\t var mappings = util.getArg(sourceMap, 'mappings');\n\t var file = util.getArg(sourceMap, 'file', null);\n\t\n\t // Once again, Sass deviates from the spec and supplies the version as a\n\t // string rather than a number, so we use loose equality checking here.\n\t if (version != this._version) {\n\t throw new Error('Unsupported version: ' + version);\n\t }\n\t\n\t if (sourceRoot) {\n\t sourceRoot = util.normalize(sourceRoot);\n\t }\n\t\n\t sources = sources\n\t .map(String)\n\t // Some source maps produce relative source paths like \"./foo.js\" instead of\n\t // \"foo.js\". Normalize these first so that future comparisons will succeed.\n\t // See bugzil.la/1090768.\n\t .map(util.normalize)\n\t // Always ensure that absolute sources are internally stored relative to\n\t // the source root, if the source root is absolute. Not doing this would\n\t // be particularly problematic when the source root is a prefix of the\n\t // source (valid, but why??). See github issue #199 and bugzil.la/1188982.\n\t .map(function (source) {\n\t return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source)\n\t ? util.relative(sourceRoot, source)\n\t : source;\n\t });\n\t\n\t // Pass `true` below to allow duplicate names and sources. While source maps\n\t // are intended to be compressed and deduplicated, the TypeScript compiler\n\t // sometimes generates source maps with duplicates in them. See Github issue\n\t // #72 and bugzil.la/889492.\n\t this._names = ArraySet.fromArray(names.map(String), true);\n\t this._sources = ArraySet.fromArray(sources, true);\n\t\n\t this._absoluteSources = this._sources.toArray().map(function (s) {\n\t return util.computeSourceURL(sourceRoot, s, aSourceMapURL);\n\t });\n\t\n\t this.sourceRoot = sourceRoot;\n\t this.sourcesContent = sourcesContent;\n\t this._mappings = mappings;\n\t this._sourceMapURL = aSourceMapURL;\n\t this.file = file;\n\t}\n\t\n\tBasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype);\n\tBasicSourceMapConsumer.prototype.consumer = SourceMapConsumer;\n\t\n\t/**\n\t * Utility function to find the index of a source. Returns -1 if not\n\t * found.\n\t */\n\tBasicSourceMapConsumer.prototype._findSourceIndex = function(aSource) {\n\t var relativeSource = aSource;\n\t if (this.sourceRoot != null) {\n\t relativeSource = util.relative(this.sourceRoot, relativeSource);\n\t }\n\t\n\t if (this._sources.has(relativeSource)) {\n\t return this._sources.indexOf(relativeSource);\n\t }\n\t\n\t // Maybe aSource is an absolute URL as returned by |sources|. In\n\t // this case we can't simply undo the transform.\n\t var i;\n\t for (i = 0; i < this._absoluteSources.length; ++i) {\n\t if (this._absoluteSources[i] == aSource) {\n\t return i;\n\t }\n\t }\n\t\n\t return -1;\n\t};\n\t\n\t/**\n\t * Create a BasicSourceMapConsumer from a SourceMapGenerator.\n\t *\n\t * @param SourceMapGenerator aSourceMap\n\t * The source map that will be consumed.\n\t * @param String aSourceMapURL\n\t * The URL at which the source map can be found (optional)\n\t * @returns BasicSourceMapConsumer\n\t */\n\tBasicSourceMapConsumer.fromSourceMap =\n\t function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) {\n\t var smc = Object.create(BasicSourceMapConsumer.prototype);\n\t\n\t var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true);\n\t var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true);\n\t smc.sourceRoot = aSourceMap._sourceRoot;\n\t smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(),\n\t smc.sourceRoot);\n\t smc.file = aSourceMap._file;\n\t smc._sourceMapURL = aSourceMapURL;\n\t smc._absoluteSources = smc._sources.toArray().map(function (s) {\n\t return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL);\n\t });\n\t\n\t // Because we are modifying the entries (by converting string sources and\n\t // names to indices into the sources and names ArraySets), we have to make\n\t // a copy of the entry or else bad things happen. Shared mutable state\n\t // strikes again! See github issue #191.\n\t\n\t var generatedMappings = aSourceMap._mappings.toArray().slice();\n\t var destGeneratedMappings = smc.__generatedMappings = [];\n\t var destOriginalMappings = smc.__originalMappings = [];\n\t\n\t for (var i = 0, length = generatedMappings.length; i < length; i++) {\n\t var srcMapping = generatedMappings[i];\n\t var destMapping = new Mapping;\n\t destMapping.generatedLine = srcMapping.generatedLine;\n\t destMapping.generatedColumn = srcMapping.generatedColumn;\n\t\n\t if (srcMapping.source) {\n\t destMapping.source = sources.indexOf(srcMapping.source);\n\t destMapping.originalLine = srcMapping.originalLine;\n\t destMapping.originalColumn = srcMapping.originalColumn;\n\t\n\t if (srcMapping.name) {\n\t destMapping.name = names.indexOf(srcMapping.name);\n\t }\n\t\n\t destOriginalMappings.push(destMapping);\n\t }\n\t\n\t destGeneratedMappings.push(destMapping);\n\t }\n\t\n\t quickSort(smc.__originalMappings, util.compareByOriginalPositions);\n\t\n\t return smc;\n\t };\n\t\n\t/**\n\t * The version of the source mapping spec that we are consuming.\n\t */\n\tBasicSourceMapConsumer.prototype._version = 3;\n\t\n\t/**\n\t * The list of original sources.\n\t */\n\tObject.defineProperty(BasicSourceMapConsumer.prototype, 'sources', {\n\t get: function () {\n\t return this._absoluteSources.slice();\n\t }\n\t});\n\t\n\t/**\n\t * Provide the JIT with a nice shape / hidden class.\n\t */\n\tfunction Mapping() {\n\t this.generatedLine = 0;\n\t this.generatedColumn = 0;\n\t this.source = null;\n\t this.originalLine = null;\n\t this.originalColumn = null;\n\t this.name = null;\n\t}\n\t\n\t/**\n\t * Parse the mappings in a string in to a data structure which we can easily\n\t * query (the ordered arrays in the `this.__generatedMappings` and\n\t * `this.__originalMappings` properties).\n\t */\n\tBasicSourceMapConsumer.prototype._parseMappings =\n\t function SourceMapConsumer_parseMappings(aStr, aSourceRoot) {\n\t var generatedLine = 1;\n\t var previousGeneratedColumn = 0;\n\t var previousOriginalLine = 0;\n\t var previousOriginalColumn = 0;\n\t var previousSource = 0;\n\t var previousName = 0;\n\t var length = aStr.length;\n\t var index = 0;\n\t var cachedSegments = {};\n\t var temp = {};\n\t var originalMappings = [];\n\t var generatedMappings = [];\n\t var mapping, str, segment, end, value;\n\t\n\t while (index < length) {\n\t if (aStr.charAt(index) === ';') {\n\t generatedLine++;\n\t index++;\n\t previousGeneratedColumn = 0;\n\t }\n\t else if (aStr.charAt(index) === ',') {\n\t index++;\n\t }\n\t else {\n\t mapping = new Mapping();\n\t mapping.generatedLine = generatedLine;\n\t\n\t // Because each offset is encoded relative to the previous one,\n\t // many segments often have the same encoding. We can exploit this\n\t // fact by caching the parsed variable length fields of each segment,\n\t // allowing us to avoid a second parse if we encounter the same\n\t // segment again.\n\t for (end = index; end < length; end++) {\n\t if (this._charIsMappingSeparator(aStr, end)) {\n\t break;\n\t }\n\t }\n\t str = aStr.slice(index, end);\n\t\n\t segment = cachedSegments[str];\n\t if (segment) {\n\t index += str.length;\n\t } else {\n\t segment = [];\n\t while (index < end) {\n\t base64VLQ.decode(aStr, index, temp);\n\t value = temp.value;\n\t index = temp.rest;\n\t segment.push(value);\n\t }\n\t\n\t if (segment.length === 2) {\n\t throw new Error('Found a source, but no line and column');\n\t }\n\t\n\t if (segment.length === 3) {\n\t throw new Error('Found a source and line, but no column');\n\t }\n\t\n\t cachedSegments[str] = segment;\n\t }\n\t\n\t // Generated column.\n\t mapping.generatedColumn = previousGeneratedColumn + segment[0];\n\t previousGeneratedColumn = mapping.generatedColumn;\n\t\n\t if (segment.length > 1) {\n\t // Original source.\n\t mapping.source = previousSource + segment[1];\n\t previousSource += segment[1];\n\t\n\t // Original line.\n\t mapping.originalLine = previousOriginalLine + segment[2];\n\t previousOriginalLine = mapping.originalLine;\n\t // Lines are stored 0-based\n\t mapping.originalLine += 1;\n\t\n\t // Original column.\n\t mapping.originalColumn = previousOriginalColumn + segment[3];\n\t previousOriginalColumn = mapping.originalColumn;\n\t\n\t if (segment.length > 4) {\n\t // Original name.\n\t mapping.name = previousName + segment[4];\n\t previousName += segment[4];\n\t }\n\t }\n\t\n\t generatedMappings.push(mapping);\n\t if (typeof mapping.originalLine === 'number') {\n\t originalMappings.push(mapping);\n\t }\n\t }\n\t }\n\t\n\t quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated);\n\t this.__generatedMappings = generatedMappings;\n\t\n\t quickSort(originalMappings, util.compareByOriginalPositions);\n\t this.__originalMappings = originalMappings;\n\t };\n\t\n\t/**\n\t * Find the mapping that best matches the hypothetical \"needle\" mapping that\n\t * we are searching for in the given \"haystack\" of mappings.\n\t */\n\tBasicSourceMapConsumer.prototype._findMapping =\n\t function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName,\n\t aColumnName, aComparator, aBias) {\n\t // To return the position we are searching for, we must first find the\n\t // mapping for the given position and then return the opposite position it\n\t // points to. Because the mappings are sorted, we can use binary search to\n\t // find the best mapping.\n\t\n\t if (aNeedle[aLineName] <= 0) {\n\t throw new TypeError('Line must be greater than or equal to 1, got '\n\t + aNeedle[aLineName]);\n\t }\n\t if (aNeedle[aColumnName] < 0) {\n\t throw new TypeError('Column must be greater than or equal to 0, got '\n\t + aNeedle[aColumnName]);\n\t }\n\t\n\t return binarySearch.search(aNeedle, aMappings, aComparator, aBias);\n\t };\n\t\n\t/**\n\t * Compute the last column for each generated mapping. The last column is\n\t * inclusive.\n\t */\n\tBasicSourceMapConsumer.prototype.computeColumnSpans =\n\t function SourceMapConsumer_computeColumnSpans() {\n\t for (var index = 0; index < this._generatedMappings.length; ++index) {\n\t var mapping = this._generatedMappings[index];\n\t\n\t // Mappings do not contain a field for the last generated columnt. We\n\t // can come up with an optimistic estimate, however, by assuming that\n\t // mappings are contiguous (i.e. given two consecutive mappings, the\n\t // first mapping ends where the second one starts).\n\t if (index + 1 < this._generatedMappings.length) {\n\t var nextMapping = this._generatedMappings[index + 1];\n\t\n\t if (mapping.generatedLine === nextMapping.generatedLine) {\n\t mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1;\n\t continue;\n\t }\n\t }\n\t\n\t // The last mapping for each line spans the entire line.\n\t mapping.lastGeneratedColumn = Infinity;\n\t }\n\t };\n\t\n\t/**\n\t * Returns the original source, line, and column information for the generated\n\t * source's line and column positions provided. The only argument is an object\n\t * with the following properties:\n\t *\n\t * - line: The line number in the generated source. The line number\n\t * is 1-based.\n\t * - column: The column number in the generated source. The column\n\t * number is 0-based.\n\t * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or\n\t * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the\n\t * closest element that is smaller than or greater than the one we are\n\t * searching for, respectively, if the exact element cannot be found.\n\t * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'.\n\t *\n\t * and an object is returned with the following properties:\n\t *\n\t * - source: The original source file, or null.\n\t * - line: The line number in the original source, or null. The\n\t * line number is 1-based.\n\t * - column: The column number in the original source, or null. The\n\t * column number is 0-based.\n\t * - name: The original identifier, or null.\n\t */\n\tBasicSourceMapConsumer.prototype.originalPositionFor =\n\t function SourceMapConsumer_originalPositionFor(aArgs) {\n\t var needle = {\n\t generatedLine: util.getArg(aArgs, 'line'),\n\t generatedColumn: util.getArg(aArgs, 'column')\n\t };\n\t\n\t var index = this._findMapping(\n\t needle,\n\t this._generatedMappings,\n\t \"generatedLine\",\n\t \"generatedColumn\",\n\t util.compareByGeneratedPositionsDeflated,\n\t util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)\n\t );\n\t\n\t if (index >= 0) {\n\t var mapping = this._generatedMappings[index];\n\t\n\t if (mapping.generatedLine === needle.generatedLine) {\n\t var source = util.getArg(mapping, 'source', null);\n\t if (source !== null) {\n\t source = this._sources.at(source);\n\t source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL);\n\t }\n\t var name = util.getArg(mapping, 'name', null);\n\t if (name !== null) {\n\t name = this._names.at(name);\n\t }\n\t return {\n\t source: source,\n\t line: util.getArg(mapping, 'originalLine', null),\n\t column: util.getArg(mapping, 'originalColumn', null),\n\t name: name\n\t };\n\t }\n\t }\n\t\n\t return {\n\t source: null,\n\t line: null,\n\t column: null,\n\t name: null\n\t };\n\t };\n\t\n\t/**\n\t * Return true if we have the source content for every source in the source\n\t * map, false otherwise.\n\t */\n\tBasicSourceMapConsumer.prototype.hasContentsOfAllSources =\n\t function BasicSourceMapConsumer_hasContentsOfAllSources() {\n\t if (!this.sourcesContent) {\n\t return false;\n\t }\n\t return this.sourcesContent.length >= this._sources.size() &&\n\t !this.sourcesContent.some(function (sc) { return sc == null; });\n\t };\n\t\n\t/**\n\t * Returns the original source content. The only argument is the url of the\n\t * original source file. Returns null if no original source content is\n\t * available.\n\t */\n\tBasicSourceMapConsumer.prototype.sourceContentFor =\n\t function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) {\n\t if (!this.sourcesContent) {\n\t return null;\n\t }\n\t\n\t var index = this._findSourceIndex(aSource);\n\t if (index >= 0) {\n\t return this.sourcesContent[index];\n\t }\n\t\n\t var relativeSource = aSource;\n\t if (this.sourceRoot != null) {\n\t relativeSource = util.relative(this.sourceRoot, relativeSource);\n\t }\n\t\n\t var url;\n\t if (this.sourceRoot != null\n\t && (url = util.urlParse(this.sourceRoot))) {\n\t // XXX: file:// URIs and absolute paths lead to unexpected behavior for\n\t // many users. We can help them out when they expect file:// URIs to\n\t // behave like it would if they were running a local HTTP server. See\n\t // https://bugzilla.mozilla.org/show_bug.cgi?id=885597.\n\t var fileUriAbsPath = relativeSource.replace(/^file:\\/\\//, \"\");\n\t if (url.scheme == \"file\"\n\t && this._sources.has(fileUriAbsPath)) {\n\t return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)]\n\t }\n\t\n\t if ((!url.path || url.path == \"/\")\n\t && this._sources.has(\"/\" + relativeSource)) {\n\t return this.sourcesContent[this._sources.indexOf(\"/\" + relativeSource)];\n\t }\n\t }\n\t\n\t // This function is used recursively from\n\t // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we\n\t // don't want to throw if we can't find the source - we just want to\n\t // return null, so we provide a flag to exit gracefully.\n\t if (nullOnMissing) {\n\t return null;\n\t }\n\t else {\n\t throw new Error('\"' + relativeSource + '\" is not in the SourceMap.');\n\t }\n\t };\n\t\n\t/**\n\t * Returns the generated line and column information for the original source,\n\t * line, and column positions provided. The only argument is an object with\n\t * the following properties:\n\t *\n\t * - source: The filename of the original source.\n\t * - line: The line number in the original source. The line number\n\t * is 1-based.\n\t * - column: The column number in the original source. The column\n\t * number is 0-based.\n\t * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or\n\t * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the\n\t * closest element that is smaller than or greater than the one we are\n\t * searching for, respectively, if the exact element cannot be found.\n\t * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'.\n\t *\n\t * and an object is returned with the following properties:\n\t *\n\t * - line: The line number in the generated source, or null. The\n\t * line number is 1-based.\n\t * - column: The column number in the generated source, or null.\n\t * The column number is 0-based.\n\t */\n\tBasicSourceMapConsumer.prototype.generatedPositionFor =\n\t function SourceMapConsumer_generatedPositionFor(aArgs) {\n\t var source = util.getArg(aArgs, 'source');\n\t source = this._findSourceIndex(source);\n\t if (source < 0) {\n\t return {\n\t line: null,\n\t column: null,\n\t lastColumn: null\n\t };\n\t }\n\t\n\t var needle = {\n\t source: source,\n\t originalLine: util.getArg(aArgs, 'line'),\n\t originalColumn: util.getArg(aArgs, 'column')\n\t };\n\t\n\t var index = this._findMapping(\n\t needle,\n\t this._originalMappings,\n\t \"originalLine\",\n\t \"originalColumn\",\n\t util.compareByOriginalPositions,\n\t util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)\n\t );\n\t\n\t if (index >= 0) {\n\t var mapping = this._originalMappings[index];\n\t\n\t if (mapping.source === needle.source) {\n\t return {\n\t line: util.getArg(mapping, 'generatedLine', null),\n\t column: util.getArg(mapping, 'generatedColumn', null),\n\t lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)\n\t };\n\t }\n\t }\n\t\n\t return {\n\t line: null,\n\t column: null,\n\t lastColumn: null\n\t };\n\t };\n\t\n\texports.BasicSourceMapConsumer = BasicSourceMapConsumer;\n\t\n\t/**\n\t * An IndexedSourceMapConsumer instance represents a parsed source map which\n\t * we can query for information. It differs from BasicSourceMapConsumer in\n\t * that it takes \"indexed\" source maps (i.e. ones with a \"sections\" field) as\n\t * input.\n\t *\n\t * The first parameter is a raw source map (either as a JSON string, or already\n\t * parsed to an object). According to the spec for indexed source maps, they\n\t * have the following attributes:\n\t *\n\t * - version: Which version of the source map spec this map is following.\n\t * - file: Optional. The generated file this source map is associated with.\n\t * - sections: A list of section definitions.\n\t *\n\t * Each value under the \"sections\" field has two fields:\n\t * - offset: The offset into the original specified at which this section\n\t * begins to apply, defined as an object with a \"line\" and \"column\"\n\t * field.\n\t * - map: A source map definition. This source map could also be indexed,\n\t * but doesn't have to be.\n\t *\n\t * Instead of the \"map\" field, it's also possible to have a \"url\" field\n\t * specifying a URL to retrieve a source map from, but that's currently\n\t * unsupported.\n\t *\n\t * Here's an example source map, taken from the source map spec[0], but\n\t * modified to omit a section which uses the \"url\" field.\n\t *\n\t * {\n\t * version : 3,\n\t * file: \"app.js\",\n\t * sections: [{\n\t * offset: {line:100, column:10},\n\t * map: {\n\t * version : 3,\n\t * file: \"section.js\",\n\t * sources: [\"foo.js\", \"bar.js\"],\n\t * names: [\"src\", \"maps\", \"are\", \"fun\"],\n\t * mappings: \"AAAA,E;;ABCDE;\"\n\t * }\n\t * }],\n\t * }\n\t *\n\t * The second parameter, if given, is a string whose value is the URL\n\t * at which the source map was found. This URL is used to compute the\n\t * sources array.\n\t *\n\t * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt\n\t */\n\tfunction IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) {\n\t var sourceMap = aSourceMap;\n\t if (typeof aSourceMap === 'string') {\n\t sourceMap = util.parseSourceMapInput(aSourceMap);\n\t }\n\t\n\t var version = util.getArg(sourceMap, 'version');\n\t var sections = util.getArg(sourceMap, 'sections');\n\t\n\t if (version != this._version) {\n\t throw new Error('Unsupported version: ' + version);\n\t }\n\t\n\t this._sources = new ArraySet();\n\t this._names = new ArraySet();\n\t\n\t var lastOffset = {\n\t line: -1,\n\t column: 0\n\t };\n\t this._sections = sections.map(function (s) {\n\t if (s.url) {\n\t // The url field will require support for asynchronicity.\n\t // See https://github.com/mozilla/source-map/issues/16\n\t throw new Error('Support for url field in sections not implemented.');\n\t }\n\t var offset = util.getArg(s, 'offset');\n\t var offsetLine = util.getArg(offset, 'line');\n\t var offsetColumn = util.getArg(offset, 'column');\n\t\n\t if (offsetLine < lastOffset.line ||\n\t (offsetLine === lastOffset.line && offsetColumn < lastOffset.column)) {\n\t throw new Error('Section offsets must be ordered and non-overlapping.');\n\t }\n\t lastOffset = offset;\n\t\n\t return {\n\t generatedOffset: {\n\t // The offset fields are 0-based, but we use 1-based indices when\n\t // encoding/decoding from VLQ.\n\t generatedLine: offsetLine + 1,\n\t generatedColumn: offsetColumn + 1\n\t },\n\t consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL)\n\t }\n\t });\n\t}\n\t\n\tIndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype);\n\tIndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer;\n\t\n\t/**\n\t * The version of the source mapping spec that we are consuming.\n\t */\n\tIndexedSourceMapConsumer.prototype._version = 3;\n\t\n\t/**\n\t * The list of original sources.\n\t */\n\tObject.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', {\n\t get: function () {\n\t var sources = [];\n\t for (var i = 0; i < this._sections.length; i++) {\n\t for (var j = 0; j < this._sections[i].consumer.sources.length; j++) {\n\t sources.push(this._sections[i].consumer.sources[j]);\n\t }\n\t }\n\t return sources;\n\t }\n\t});\n\t\n\t/**\n\t * Returns the original source, line, and column information for the generated\n\t * source's line and column positions provided. The only argument is an object\n\t * with the following properties:\n\t *\n\t * - line: The line number in the generated source. The line number\n\t * is 1-based.\n\t * - column: The column number in the generated source. The column\n\t * number is 0-based.\n\t *\n\t * and an object is returned with the following properties:\n\t *\n\t * - source: The original source file, or null.\n\t * - line: The line number in the original source, or null. The\n\t * line number is 1-based.\n\t * - column: The column number in the original source, or null. The\n\t * column number is 0-based.\n\t * - name: The original identifier, or null.\n\t */\n\tIndexedSourceMapConsumer.prototype.originalPositionFor =\n\t function IndexedSourceMapConsumer_originalPositionFor(aArgs) {\n\t var needle = {\n\t generatedLine: util.getArg(aArgs, 'line'),\n\t generatedColumn: util.getArg(aArgs, 'column')\n\t };\n\t\n\t // Find the section containing the generated position we're trying to map\n\t // to an original position.\n\t var sectionIndex = binarySearch.search(needle, this._sections,\n\t function(needle, section) {\n\t var cmp = needle.generatedLine - section.generatedOffset.generatedLine;\n\t if (cmp) {\n\t return cmp;\n\t }\n\t\n\t return (needle.generatedColumn -\n\t section.generatedOffset.generatedColumn);\n\t });\n\t var section = this._sections[sectionIndex];\n\t\n\t if (!section) {\n\t return {\n\t source: null,\n\t line: null,\n\t column: null,\n\t name: null\n\t };\n\t }\n\t\n\t return section.consumer.originalPositionFor({\n\t line: needle.generatedLine -\n\t (section.generatedOffset.generatedLine - 1),\n\t column: needle.generatedColumn -\n\t (section.generatedOffset.generatedLine === needle.generatedLine\n\t ? section.generatedOffset.generatedColumn - 1\n\t : 0),\n\t bias: aArgs.bias\n\t });\n\t };\n\t\n\t/**\n\t * Return true if we have the source content for every source in the source\n\t * map, false otherwise.\n\t */\n\tIndexedSourceMapConsumer.prototype.hasContentsOfAllSources =\n\t function IndexedSourceMapConsumer_hasContentsOfAllSources() {\n\t return this._sections.every(function (s) {\n\t return s.consumer.hasContentsOfAllSources();\n\t });\n\t };\n\t\n\t/**\n\t * Returns the original source content. The only argument is the url of the\n\t * original source file. Returns null if no original source content is\n\t * available.\n\t */\n\tIndexedSourceMapConsumer.prototype.sourceContentFor =\n\t function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) {\n\t for (var i = 0; i < this._sections.length; i++) {\n\t var section = this._sections[i];\n\t\n\t var content = section.consumer.sourceContentFor(aSource, true);\n\t if (content) {\n\t return content;\n\t }\n\t }\n\t if (nullOnMissing) {\n\t return null;\n\t }\n\t else {\n\t throw new Error('\"' + aSource + '\" is not in the SourceMap.');\n\t }\n\t };\n\t\n\t/**\n\t * Returns the generated line and column information for the original source,\n\t * line, and column positions provided. The only argument is an object with\n\t * the following properties:\n\t *\n\t * - source: The filename of the original source.\n\t * - line: The line number in the original source. The line number\n\t * is 1-based.\n\t * - column: The column number in the original source. The column\n\t * number is 0-based.\n\t *\n\t * and an object is returned with the following properties:\n\t *\n\t * - line: The line number in the generated source, or null. The\n\t * line number is 1-based. \n\t * - column: The column number in the generated source, or null.\n\t * The column number is 0-based.\n\t */\n\tIndexedSourceMapConsumer.prototype.generatedPositionFor =\n\t function IndexedSourceMapConsumer_generatedPositionFor(aArgs) {\n\t for (var i = 0; i < this._sections.length; i++) {\n\t var section = this._sections[i];\n\t\n\t // Only consider this section if the requested source is in the list of\n\t // sources of the consumer.\n\t if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) {\n\t continue;\n\t }\n\t var generatedPosition = section.consumer.generatedPositionFor(aArgs);\n\t if (generatedPosition) {\n\t var ret = {\n\t line: generatedPosition.line +\n\t (section.generatedOffset.generatedLine - 1),\n\t column: generatedPosition.column +\n\t (section.generatedOffset.generatedLine === generatedPosition.line\n\t ? section.generatedOffset.generatedColumn - 1\n\t : 0)\n\t };\n\t return ret;\n\t }\n\t }\n\t\n\t return {\n\t line: null,\n\t column: null\n\t };\n\t };\n\t\n\t/**\n\t * Parse the mappings in a string in to a data structure which we can easily\n\t * query (the ordered arrays in the `this.__generatedMappings` and\n\t * `this.__originalMappings` properties).\n\t */\n\tIndexedSourceMapConsumer.prototype._parseMappings =\n\t function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) {\n\t this.__generatedMappings = [];\n\t this.__originalMappings = [];\n\t for (var i = 0; i < this._sections.length; i++) {\n\t var section = this._sections[i];\n\t var sectionMappings = section.consumer._generatedMappings;\n\t for (var j = 0; j < sectionMappings.length; j++) {\n\t var mapping = sectionMappings[j];\n\t\n\t var source = section.consumer._sources.at(mapping.source);\n\t source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL);\n\t this._sources.add(source);\n\t source = this._sources.indexOf(source);\n\t\n\t var name = null;\n\t if (mapping.name) {\n\t name = section.consumer._names.at(mapping.name);\n\t this._names.add(name);\n\t name = this._names.indexOf(name);\n\t }\n\t\n\t // The mappings coming from the consumer for the section have\n\t // generated positions relative to the start of the section, so we\n\t // need to offset them to be relative to the start of the concatenated\n\t // generated file.\n\t var adjustedMapping = {\n\t source: source,\n\t generatedLine: mapping.generatedLine +\n\t (section.generatedOffset.generatedLine - 1),\n\t generatedColumn: mapping.generatedColumn +\n\t (section.generatedOffset.generatedLine === mapping.generatedLine\n\t ? section.generatedOffset.generatedColumn - 1\n\t : 0),\n\t originalLine: mapping.originalLine,\n\t originalColumn: mapping.originalColumn,\n\t name: name\n\t };\n\t\n\t this.__generatedMappings.push(adjustedMapping);\n\t if (typeof adjustedMapping.originalLine === 'number') {\n\t this.__originalMappings.push(adjustedMapping);\n\t }\n\t }\n\t }\n\t\n\t quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated);\n\t quickSort(this.__originalMappings, util.compareByOriginalPositions);\n\t };\n\t\n\texports.IndexedSourceMapConsumer = IndexedSourceMapConsumer;\n\n\n/***/ }),\n/* 8 */\n/***/ (function(module, exports) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\texports.GREATEST_LOWER_BOUND = 1;\n\texports.LEAST_UPPER_BOUND = 2;\n\t\n\t/**\n\t * Recursive implementation of binary search.\n\t *\n\t * @param aLow Indices here and lower do not contain the needle.\n\t * @param aHigh Indices here and higher do not contain the needle.\n\t * @param aNeedle The element being searched for.\n\t * @param aHaystack The non-empty array being searched.\n\t * @param aCompare Function which takes two elements and returns -1, 0, or 1.\n\t * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or\n\t * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the\n\t * closest element that is smaller than or greater than the one we are\n\t * searching for, respectively, if the exact element cannot be found.\n\t */\n\tfunction recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) {\n\t // This function terminates when one of the following is true:\n\t //\n\t // 1. We find the exact element we are looking for.\n\t //\n\t // 2. We did not find the exact element, but we can return the index of\n\t // the next-closest element.\n\t //\n\t // 3. We did not find the exact element, and there is no next-closest\n\t // element than the one we are searching for, so we return -1.\n\t var mid = Math.floor((aHigh - aLow) / 2) + aLow;\n\t var cmp = aCompare(aNeedle, aHaystack[mid], true);\n\t if (cmp === 0) {\n\t // Found the element we are looking for.\n\t return mid;\n\t }\n\t else if (cmp > 0) {\n\t // Our needle is greater than aHaystack[mid].\n\t if (aHigh - mid > 1) {\n\t // The element is in the upper half.\n\t return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias);\n\t }\n\t\n\t // The exact needle element was not found in this haystack. Determine if\n\t // we are in termination case (3) or (2) and return the appropriate thing.\n\t if (aBias == exports.LEAST_UPPER_BOUND) {\n\t return aHigh < aHaystack.length ? aHigh : -1;\n\t } else {\n\t return mid;\n\t }\n\t }\n\t else {\n\t // Our needle is less than aHaystack[mid].\n\t if (mid - aLow > 1) {\n\t // The element is in the lower half.\n\t return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias);\n\t }\n\t\n\t // we are in termination case (3) or (2) and return the appropriate thing.\n\t if (aBias == exports.LEAST_UPPER_BOUND) {\n\t return mid;\n\t } else {\n\t return aLow < 0 ? -1 : aLow;\n\t }\n\t }\n\t}\n\t\n\t/**\n\t * This is an implementation of binary search which will always try and return\n\t * the index of the closest element if there is no exact hit. This is because\n\t * mappings between original and generated line/col pairs are single points,\n\t * and there is an implicit region between each of them, so a miss just means\n\t * that you aren't on the very start of a region.\n\t *\n\t * @param aNeedle The element you are looking for.\n\t * @param aHaystack The array that is being searched.\n\t * @param aCompare A function which takes the needle and an element in the\n\t * array and returns -1, 0, or 1 depending on whether the needle is less\n\t * than, equal to, or greater than the element, respectively.\n\t * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or\n\t * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the\n\t * closest element that is smaller than or greater than the one we are\n\t * searching for, respectively, if the exact element cannot be found.\n\t * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'.\n\t */\n\texports.search = function search(aNeedle, aHaystack, aCompare, aBias) {\n\t if (aHaystack.length === 0) {\n\t return -1;\n\t }\n\t\n\t var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack,\n\t aCompare, aBias || exports.GREATEST_LOWER_BOUND);\n\t if (index < 0) {\n\t return -1;\n\t }\n\t\n\t // We have found either the exact element, or the next-closest element than\n\t // the one we are searching for. However, there may be more than one such\n\t // element. Make sure we always return the smallest of these.\n\t while (index - 1 >= 0) {\n\t if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) {\n\t break;\n\t }\n\t --index;\n\t }\n\t\n\t return index;\n\t};\n\n\n/***/ }),\n/* 9 */\n/***/ (function(module, exports) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\t// It turns out that some (most?) JavaScript engines don't self-host\n\t// `Array.prototype.sort`. This makes sense because C++ will likely remain\n\t// faster than JS when doing raw CPU-intensive sorting. However, when using a\n\t// custom comparator function, calling back and forth between the VM's C++ and\n\t// JIT'd JS is rather slow *and* loses JIT type information, resulting in\n\t// worse generated code for the comparator function than would be optimal. In\n\t// fact, when sorting with a comparator, these costs outweigh the benefits of\n\t// sorting in C++. By using our own JS-implemented Quick Sort (below), we get\n\t// a ~3500ms mean speed-up in `bench/bench.html`.\n\t\n\t/**\n\t * Swap the elements indexed by `x` and `y` in the array `ary`.\n\t *\n\t * @param {Array} ary\n\t * The array.\n\t * @param {Number} x\n\t * The index of the first item.\n\t * @param {Number} y\n\t * The index of the second item.\n\t */\n\tfunction swap(ary, x, y) {\n\t var temp = ary[x];\n\t ary[x] = ary[y];\n\t ary[y] = temp;\n\t}\n\t\n\t/**\n\t * Returns a random integer within the range `low .. high` inclusive.\n\t *\n\t * @param {Number} low\n\t * The lower bound on the range.\n\t * @param {Number} high\n\t * The upper bound on the range.\n\t */\n\tfunction randomIntInRange(low, high) {\n\t return Math.round(low + (Math.random() * (high - low)));\n\t}\n\t\n\t/**\n\t * The Quick Sort algorithm.\n\t *\n\t * @param {Array} ary\n\t * An array to sort.\n\t * @param {function} comparator\n\t * Function to use to compare two items.\n\t * @param {Number} p\n\t * Start index of the array\n\t * @param {Number} r\n\t * End index of the array\n\t */\n\tfunction doQuickSort(ary, comparator, p, r) {\n\t // If our lower bound is less than our upper bound, we (1) partition the\n\t // array into two pieces and (2) recurse on each half. If it is not, this is\n\t // the empty array and our base case.\n\t\n\t if (p < r) {\n\t // (1) Partitioning.\n\t //\n\t // The partitioning chooses a pivot between `p` and `r` and moves all\n\t // elements that are less than or equal to the pivot to the before it, and\n\t // all the elements that are greater than it after it. The effect is that\n\t // once partition is done, the pivot is in the exact place it will be when\n\t // the array is put in sorted order, and it will not need to be moved\n\t // again. This runs in O(n) time.\n\t\n\t // Always choose a random pivot so that an input array which is reverse\n\t // sorted does not cause O(n^2) running time.\n\t var pivotIndex = randomIntInRange(p, r);\n\t var i = p - 1;\n\t\n\t swap(ary, pivotIndex, r);\n\t var pivot = ary[r];\n\t\n\t // Immediately after `j` is incremented in this loop, the following hold\n\t // true:\n\t //\n\t // * Every element in `ary[p .. i]` is less than or equal to the pivot.\n\t //\n\t // * Every element in `ary[i+1 .. j-1]` is greater than the pivot.\n\t for (var j = p; j < r; j++) {\n\t if (comparator(ary[j], pivot) <= 0) {\n\t i += 1;\n\t swap(ary, i, j);\n\t }\n\t }\n\t\n\t swap(ary, i + 1, j);\n\t var q = i + 1;\n\t\n\t // (2) Recurse on each half.\n\t\n\t doQuickSort(ary, comparator, p, q - 1);\n\t doQuickSort(ary, comparator, q + 1, r);\n\t }\n\t}\n\t\n\t/**\n\t * Sort the given array in-place with the given comparator function.\n\t *\n\t * @param {Array} ary\n\t * An array to sort.\n\t * @param {function} comparator\n\t * Function to use to compare two items.\n\t */\n\texports.quickSort = function (ary, comparator) {\n\t doQuickSort(ary, comparator, 0, ary.length - 1);\n\t};\n\n\n/***/ }),\n/* 10 */\n/***/ (function(module, exports, __webpack_require__) {\n\n\t/* -*- Mode: js; js-indent-level: 2; -*- */\n\t/*\n\t * Copyright 2011 Mozilla Foundation and contributors\n\t * Licensed under the New BSD license. See LICENSE or:\n\t * http://opensource.org/licenses/BSD-3-Clause\n\t */\n\t\n\tvar SourceMapGenerator = __webpack_require__(1).SourceMapGenerator;\n\tvar util = __webpack_require__(4);\n\t\n\t// Matches a Windows-style `\\r\\n` newline or a `\\n` newline used by all other\n\t// operating systems these days (capturing the result).\n\tvar REGEX_NEWLINE = /(\\r?\\n)/;\n\t\n\t// Newline character code for charCodeAt() comparisons\n\tvar NEWLINE_CODE = 10;\n\t\n\t// Private symbol for identifying `SourceNode`s when multiple versions of\n\t// the source-map library are loaded. This MUST NOT CHANGE across\n\t// versions!\n\tvar isSourceNode = \"$$$isSourceNode$$$\";\n\t\n\t/**\n\t * SourceNodes provide a way to abstract over interpolating/concatenating\n\t * snippets of generated JavaScript source code while maintaining the line and\n\t * column information associated with the original source code.\n\t *\n\t * @param aLine The original line number.\n\t * @param aColumn The original column number.\n\t * @param aSource The original source's filename.\n\t * @param aChunks Optional. An array of strings which are snippets of\n\t * generated JS, or other SourceNodes.\n\t * @param aName The original identifier.\n\t */\n\tfunction SourceNode(aLine, aColumn, aSource, aChunks, aName) {\n\t this.children = [];\n\t this.sourceContents = {};\n\t this.line = aLine == null ? null : aLine;\n\t this.column = aColumn == null ? null : aColumn;\n\t this.source = aSource == null ? null : aSource;\n\t this.name = aName == null ? null : aName;\n\t this[isSourceNode] = true;\n\t if (aChunks != null) this.add(aChunks);\n\t}\n\t\n\t/**\n\t * Creates a SourceNode from generated code and a SourceMapConsumer.\n\t *\n\t * @param aGeneratedCode The generated code\n\t * @param aSourceMapConsumer The SourceMap for the generated code\n\t * @param aRelativePath Optional. The path that relative sources in the\n\t * SourceMapConsumer should be relative to.\n\t */\n\tSourceNode.fromStringWithSourceMap =\n\t function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) {\n\t // The SourceNode we want to fill with the generated code\n\t // and the SourceMap\n\t var node = new SourceNode();\n\t\n\t // All even indices of this array are one line of the generated code,\n\t // while all odd indices are the newlines between two adjacent lines\n\t // (since `REGEX_NEWLINE` captures its match).\n\t // Processed fragments are accessed by calling `shiftNextLine`.\n\t var remainingLines = aGeneratedCode.split(REGEX_NEWLINE);\n\t var remainingLinesIndex = 0;\n\t var shiftNextLine = function() {\n\t var lineContents = getNextLine();\n\t // The last line of a file might not have a newline.\n\t var newLine = getNextLine() || \"\";\n\t return lineContents + newLine;\n\t\n\t function getNextLine() {\n\t return remainingLinesIndex < remainingLines.length ?\n\t remainingLines[remainingLinesIndex++] : undefined;\n\t }\n\t };\n\t\n\t // We need to remember the position of \"remainingLines\"\n\t var lastGeneratedLine = 1, lastGeneratedColumn = 0;\n\t\n\t // The generate SourceNodes we need a code range.\n\t // To extract it current and last mapping is used.\n\t // Here we store the last mapping.\n\t var lastMapping = null;\n\t\n\t aSourceMapConsumer.eachMapping(function (mapping) {\n\t if (lastMapping !== null) {\n\t // We add the code from \"lastMapping\" to \"mapping\":\n\t // First check if there is a new line in between.\n\t if (lastGeneratedLine < mapping.generatedLine) {\n\t // Associate first line with \"lastMapping\"\n\t addMappingWithCode(lastMapping, shiftNextLine());\n\t lastGeneratedLine++;\n\t lastGeneratedColumn = 0;\n\t // The remaining code is added without mapping\n\t } else {\n\t // There is no new line in between.\n\t // Associate the code between \"lastGeneratedColumn\" and\n\t // \"mapping.generatedColumn\" with \"lastMapping\"\n\t var nextLine = remainingLines[remainingLinesIndex] || '';\n\t var code = nextLine.substr(0, mapping.generatedColumn -\n\t lastGeneratedColumn);\n\t remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn -\n\t lastGeneratedColumn);\n\t lastGeneratedColumn = mapping.generatedColumn;\n\t addMappingWithCode(lastMapping, code);\n\t // No more remaining code, continue\n\t lastMapping = mapping;\n\t return;\n\t }\n\t }\n\t // We add the generated code until the first mapping\n\t // to the SourceNode without any mapping.\n\t // Each line is added as separate string.\n\t while (lastGeneratedLine < mapping.generatedLine) {\n\t node.add(shiftNextLine());\n\t lastGeneratedLine++;\n\t }\n\t if (lastGeneratedColumn < mapping.generatedColumn) {\n\t var nextLine = remainingLines[remainingLinesIndex] || '';\n\t node.add(nextLine.substr(0, mapping.generatedColumn));\n\t remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn);\n\t lastGeneratedColumn = mapping.generatedColumn;\n\t }\n\t lastMapping = mapping;\n\t }, this);\n\t // We have processed all mappings.\n\t if (remainingLinesIndex < remainingLines.length) {\n\t if (lastMapping) {\n\t // Associate the remaining code in the current line with \"lastMapping\"\n\t addMappingWithCode(lastMapping, shiftNextLine());\n\t }\n\t // and add the remaining lines without any mapping\n\t node.add(remainingLines.splice(remainingLinesIndex).join(\"\"));\n\t }\n\t\n\t // Copy sourcesContent into SourceNode\n\t aSourceMapConsumer.sources.forEach(function (sourceFile) {\n\t var content = aSourceMapConsumer.sourceContentFor(sourceFile);\n\t if (content != null) {\n\t if (aRelativePath != null) {\n\t sourceFile = util.join(aRelativePath, sourceFile);\n\t }\n\t node.setSourceContent(sourceFile, content);\n\t }\n\t });\n\t\n\t return node;\n\t\n\t function addMappingWithCode(mapping, code) {\n\t if (mapping === null || mapping.source === undefined) {\n\t node.add(code);\n\t } else {\n\t var source = aRelativePath\n\t ? util.join(aRelativePath, mapping.source)\n\t : mapping.source;\n\t node.add(new SourceNode(mapping.originalLine,\n\t mapping.originalColumn,\n\t source,\n\t code,\n\t mapping.name));\n\t }\n\t }\n\t };\n\t\n\t/**\n\t * Add a chunk of generated JS to this source node.\n\t *\n\t * @param aChunk A string snippet of generated JS code, another instance of\n\t * SourceNode, or an array where each member is one of those things.\n\t */\n\tSourceNode.prototype.add = function SourceNode_add(aChunk) {\n\t if (Array.isArray(aChunk)) {\n\t aChunk.forEach(function (chunk) {\n\t this.add(chunk);\n\t }, this);\n\t }\n\t else if (aChunk[isSourceNode] || typeof aChunk === \"string\") {\n\t if (aChunk) {\n\t this.children.push(aChunk);\n\t }\n\t }\n\t else {\n\t throw new TypeError(\n\t \"Expected a SourceNode, string, or an array of SourceNodes and strings. Got \" + aChunk\n\t );\n\t }\n\t return this;\n\t};\n\t\n\t/**\n\t * Add a chunk of generated JS to the beginning of this source node.\n\t *\n\t * @param aChunk A string snippet of generated JS code, another instance of\n\t * SourceNode, or an array where each member is one of those things.\n\t */\n\tSourceNode.prototype.prepend = function SourceNode_prepend(aChunk) {\n\t if (Array.isArray(aChunk)) {\n\t for (var i = aChunk.length-1; i >= 0; i--) {\n\t this.prepend(aChunk[i]);\n\t }\n\t }\n\t else if (aChunk[isSourceNode] || typeof aChunk === \"string\") {\n\t this.children.unshift(aChunk);\n\t }\n\t else {\n\t throw new TypeError(\n\t \"Expected a SourceNode, string, or an array of SourceNodes and strings. Got \" + aChunk\n\t );\n\t }\n\t return this;\n\t};\n\t\n\t/**\n\t * Walk over the tree of JS snippets in this node and its children. The\n\t * walking function is called once for each snippet of JS and is passed that\n\t * snippet and the its original associated source's line/column location.\n\t *\n\t * @param aFn The traversal function.\n\t */\n\tSourceNode.prototype.walk = function SourceNode_walk(aFn) {\n\t var chunk;\n\t for (var i = 0, len = this.children.length; i < len; i++) {\n\t chunk = this.children[i];\n\t if (chunk[isSourceNode]) {\n\t chunk.walk(aFn);\n\t }\n\t else {\n\t if (chunk !== '') {\n\t aFn(chunk, { source: this.source,\n\t line: this.line,\n\t column: this.column,\n\t name: this.name });\n\t }\n\t }\n\t }\n\t};\n\t\n\t/**\n\t * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between\n\t * each of `this.children`.\n\t *\n\t * @param aSep The separator.\n\t */\n\tSourceNode.prototype.join = function SourceNode_join(aSep) {\n\t var newChildren;\n\t var i;\n\t var len = this.children.length;\n\t if (len > 0) {\n\t newChildren = [];\n\t for (i = 0; i < len-1; i++) {\n\t newChildren.push(this.children[i]);\n\t newChildren.push(aSep);\n\t }\n\t newChildren.push(this.children[i]);\n\t this.children = newChildren;\n\t }\n\t return this;\n\t};\n\t\n\t/**\n\t * Call String.prototype.replace on the very right-most source snippet. Useful\n\t * for trimming whitespace from the end of a source node, etc.\n\t *\n\t * @param aPattern The pattern to replace.\n\t * @param aReplacement The thing to replace the pattern with.\n\t */\n\tSourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) {\n\t var lastChild = this.children[this.children.length - 1];\n\t if (lastChild[isSourceNode]) {\n\t lastChild.replaceRight(aPattern, aReplacement);\n\t }\n\t else if (typeof lastChild === 'string') {\n\t this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement);\n\t }\n\t else {\n\t this.children.push(''.replace(aPattern, aReplacement));\n\t }\n\t return this;\n\t};\n\t\n\t/**\n\t * Set the source content for a source file. This will be added to the SourceMapGenerator\n\t * in the sourcesContent field.\n\t *\n\t * @param aSourceFile The filename of the source file\n\t * @param aSourceContent The content of the source file\n\t */\n\tSourceNode.prototype.setSourceContent =\n\t function SourceNode_setSourceContent(aSourceFile, aSourceContent) {\n\t this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent;\n\t };\n\t\n\t/**\n\t * Walk over the tree of SourceNodes. The walking function is called for each\n\t * source file content and is passed the filename and source content.\n\t *\n\t * @param aFn The traversal function.\n\t */\n\tSourceNode.prototype.walkSourceContents =\n\t function SourceNode_walkSourceContents(aFn) {\n\t for (var i = 0, len = this.children.length; i < len; i++) {\n\t if (this.children[i][isSourceNode]) {\n\t this.children[i].walkSourceContents(aFn);\n\t }\n\t }\n\t\n\t var sources = Object.keys(this.sourceContents);\n\t for (var i = 0, len = sources.length; i < len; i++) {\n\t aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]);\n\t }\n\t };\n\t\n\t/**\n\t * Return the string representation of this source node. Walks over the tree\n\t * and concatenates all the various snippets together to one string.\n\t */\n\tSourceNode.prototype.toString = function SourceNode_toString() {\n\t var str = \"\";\n\t this.walk(function (chunk) {\n\t str += chunk;\n\t });\n\t return str;\n\t};\n\t\n\t/**\n\t * Returns the string representation of this source node along with a source\n\t * map.\n\t */\n\tSourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) {\n\t var generated = {\n\t code: \"\",\n\t line: 1,\n\t column: 0\n\t };\n\t var map = new SourceMapGenerator(aArgs);\n\t var sourceMappingActive = false;\n\t var lastOriginalSource = null;\n\t var lastOriginalLine = null;\n\t var lastOriginalColumn = null;\n\t var lastOriginalName = null;\n\t this.walk(function (chunk, original) {\n\t generated.code += chunk;\n\t if (original.source !== null\n\t && original.line !== null\n\t && original.column !== null) {\n\t if(lastOriginalSource !== original.source\n\t || lastOriginalLine !== original.line\n\t || lastOriginalColumn !== original.column\n\t || lastOriginalName !== original.name) {\n\t map.addMapping({\n\t source: original.source,\n\t original: {\n\t line: original.line,\n\t column: original.column\n\t },\n\t generated: {\n\t line: generated.line,\n\t column: generated.column\n\t },\n\t name: original.name\n\t });\n\t }\n\t lastOriginalSource = original.source;\n\t lastOriginalLine = original.line;\n\t lastOriginalColumn = original.column;\n\t lastOriginalName = original.name;\n\t sourceMappingActive = true;\n\t } else if (sourceMappingActive) {\n\t map.addMapping({\n\t generated: {\n\t line: generated.line,\n\t column: generated.column\n\t }\n\t });\n\t lastOriginalSource = null;\n\t sourceMappingActive = false;\n\t }\n\t for (var idx = 0, length = chunk.length; idx < length; idx++) {\n\t if (chunk.charCodeAt(idx) === NEWLINE_CODE) {\n\t generated.line++;\n\t generated.column = 0;\n\t // Mappings end at eol\n\t if (idx + 1 === length) {\n\t lastOriginalSource = null;\n\t sourceMappingActive = false;\n\t } else if (sourceMappingActive) {\n\t map.addMapping({\n\t source: original.source,\n\t original: {\n\t line: original.line,\n\t column: original.column\n\t },\n\t generated: {\n\t line: generated.line,\n\t column: generated.column\n\t },\n\t name: original.name\n\t });\n\t }\n\t } else {\n\t generated.column++;\n\t }\n\t }\n\t });\n\t this.walkSourceContents(function (sourceFile, sourceContent) {\n\t map.setSourceContent(sourceFile, sourceContent);\n\t });\n\t\n\t return { code: generated.code, map: map };\n\t};\n\t\n\texports.SourceNode = SourceNode;\n\n\n/***/ })\n/******/ ])\n});\n;\n\n\n// WEBPACK FOOTER //\n// source-map.min.js"," \t// The module cache\n \tvar installedModules = {};\n\n \t// The require function\n \tfunction __webpack_require__(moduleId) {\n\n \t\t// Check if module is in cache\n \t\tif(installedModules[moduleId])\n \t\t\treturn installedModules[moduleId].exports;\n\n \t\t// Create a new module (and put it into the cache)\n \t\tvar module = installedModules[moduleId] = {\n \t\t\texports: {},\n \t\t\tid: moduleId,\n \t\t\tloaded: false\n \t\t};\n\n \t\t// Execute the module function\n \t\tmodules[moduleId].call(module.exports, module, module.exports, __webpack_require__);\n\n \t\t// Flag the module as loaded\n \t\tmodule.loaded = true;\n\n \t\t// Return the exports of the module\n \t\treturn module.exports;\n \t}\n\n\n \t// expose the modules object (__webpack_modules__)\n \t__webpack_require__.m = modules;\n\n \t// expose the module cache\n \t__webpack_require__.c = installedModules;\n\n \t// __webpack_public_path__\n \t__webpack_require__.p = \"\";\n\n \t// Load entry module and return exports\n \treturn __webpack_require__(0);\n\n\n\n// WEBPACK FOOTER //\n// webpack/bootstrap 0fd5815da764db5fb9fe","/*\n * Copyright 2009-2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE.txt or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\nexports.SourceMapGenerator = require('./lib/source-map-generator').SourceMapGenerator;\nexports.SourceMapConsumer = require('./lib/source-map-consumer').SourceMapConsumer;\nexports.SourceNode = require('./lib/source-node').SourceNode;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./source-map.js\n// module id = 0\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nvar base64VLQ = require('./base64-vlq');\nvar util = require('./util');\nvar ArraySet = require('./array-set').ArraySet;\nvar MappingList = require('./mapping-list').MappingList;\n\n/**\n * An instance of the SourceMapGenerator represents a source map which is\n * being built incrementally. You may pass an object with the following\n * properties:\n *\n * - file: The filename of the generated source.\n * - sourceRoot: A root for all relative URLs in this source map.\n */\nfunction SourceMapGenerator(aArgs) {\n if (!aArgs) {\n aArgs = {};\n }\n this._file = util.getArg(aArgs, 'file', null);\n this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null);\n this._skipValidation = util.getArg(aArgs, 'skipValidation', false);\n this._sources = new ArraySet();\n this._names = new ArraySet();\n this._mappings = new MappingList();\n this._sourcesContents = null;\n}\n\nSourceMapGenerator.prototype._version = 3;\n\n/**\n * Creates a new SourceMapGenerator based on a SourceMapConsumer\n *\n * @param aSourceMapConsumer The SourceMap.\n */\nSourceMapGenerator.fromSourceMap =\n function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) {\n var sourceRoot = aSourceMapConsumer.sourceRoot;\n var generator = new SourceMapGenerator({\n file: aSourceMapConsumer.file,\n sourceRoot: sourceRoot\n });\n aSourceMapConsumer.eachMapping(function (mapping) {\n var newMapping = {\n generated: {\n line: mapping.generatedLine,\n column: mapping.generatedColumn\n }\n };\n\n if (mapping.source != null) {\n newMapping.source = mapping.source;\n if (sourceRoot != null) {\n newMapping.source = util.relative(sourceRoot, newMapping.source);\n }\n\n newMapping.original = {\n line: mapping.originalLine,\n column: mapping.originalColumn\n };\n\n if (mapping.name != null) {\n newMapping.name = mapping.name;\n }\n }\n\n generator.addMapping(newMapping);\n });\n aSourceMapConsumer.sources.forEach(function (sourceFile) {\n var sourceRelative = sourceFile;\n if (sourceRoot !== null) {\n sourceRelative = util.relative(sourceRoot, sourceFile);\n }\n\n if (!generator._sources.has(sourceRelative)) {\n generator._sources.add(sourceRelative);\n }\n\n var content = aSourceMapConsumer.sourceContentFor(sourceFile);\n if (content != null) {\n generator.setSourceContent(sourceFile, content);\n }\n });\n return generator;\n };\n\n/**\n * Add a single mapping from original source line and column to the generated\n * source's line and column for this source map being created. The mapping\n * object should have the following properties:\n *\n * - generated: An object with the generated line and column positions.\n * - original: An object with the original line and column positions.\n * - source: The original source file (relative to the sourceRoot).\n * - name: An optional original token name for this mapping.\n */\nSourceMapGenerator.prototype.addMapping =\n function SourceMapGenerator_addMapping(aArgs) {\n var generated = util.getArg(aArgs, 'generated');\n var original = util.getArg(aArgs, 'original', null);\n var source = util.getArg(aArgs, 'source', null);\n var name = util.getArg(aArgs, 'name', null);\n\n if (!this._skipValidation) {\n this._validateMapping(generated, original, source, name);\n }\n\n if (source != null) {\n source = String(source);\n if (!this._sources.has(source)) {\n this._sources.add(source);\n }\n }\n\n if (name != null) {\n name = String(name);\n if (!this._names.has(name)) {\n this._names.add(name);\n }\n }\n\n this._mappings.add({\n generatedLine: generated.line,\n generatedColumn: generated.column,\n originalLine: original != null && original.line,\n originalColumn: original != null && original.column,\n source: source,\n name: name\n });\n };\n\n/**\n * Set the source content for a source file.\n */\nSourceMapGenerator.prototype.setSourceContent =\n function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) {\n var source = aSourceFile;\n if (this._sourceRoot != null) {\n source = util.relative(this._sourceRoot, source);\n }\n\n if (aSourceContent != null) {\n // Add the source content to the _sourcesContents map.\n // Create a new _sourcesContents map if the property is null.\n if (!this._sourcesContents) {\n this._sourcesContents = Object.create(null);\n }\n this._sourcesContents[util.toSetString(source)] = aSourceContent;\n } else if (this._sourcesContents) {\n // Remove the source file from the _sourcesContents map.\n // If the _sourcesContents map is empty, set the property to null.\n delete this._sourcesContents[util.toSetString(source)];\n if (Object.keys(this._sourcesContents).length === 0) {\n this._sourcesContents = null;\n }\n }\n };\n\n/**\n * Applies the mappings of a sub-source-map for a specific source file to the\n * source map being generated. Each mapping to the supplied source file is\n * rewritten using the supplied source map. Note: The resolution for the\n * resulting mappings is the minimium of this map and the supplied map.\n *\n * @param aSourceMapConsumer The source map to be applied.\n * @param aSourceFile Optional. The filename of the source file.\n * If omitted, SourceMapConsumer's file property will be used.\n * @param aSourceMapPath Optional. The dirname of the path to the source map\n * to be applied. If relative, it is relative to the SourceMapConsumer.\n * This parameter is needed when the two source maps aren't in the same\n * directory, and the source map to be applied contains relative source\n * paths. If so, those relative source paths need to be rewritten\n * relative to the SourceMapGenerator.\n */\nSourceMapGenerator.prototype.applySourceMap =\n function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) {\n var sourceFile = aSourceFile;\n // If aSourceFile is omitted, we will use the file property of the SourceMap\n if (aSourceFile == null) {\n if (aSourceMapConsumer.file == null) {\n throw new Error(\n 'SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' +\n 'or the source map\\'s \"file\" property. Both were omitted.'\n );\n }\n sourceFile = aSourceMapConsumer.file;\n }\n var sourceRoot = this._sourceRoot;\n // Make \"sourceFile\" relative if an absolute Url is passed.\n if (sourceRoot != null) {\n sourceFile = util.relative(sourceRoot, sourceFile);\n }\n // Applying the SourceMap can add and remove items from the sources and\n // the names array.\n var newSources = new ArraySet();\n var newNames = new ArraySet();\n\n // Find mappings for the \"sourceFile\"\n this._mappings.unsortedForEach(function (mapping) {\n if (mapping.source === sourceFile && mapping.originalLine != null) {\n // Check if it can be mapped by the source map, then update the mapping.\n var original = aSourceMapConsumer.originalPositionFor({\n line: mapping.originalLine,\n column: mapping.originalColumn\n });\n if (original.source != null) {\n // Copy mapping\n mapping.source = original.source;\n if (aSourceMapPath != null) {\n mapping.source = util.join(aSourceMapPath, mapping.source)\n }\n if (sourceRoot != null) {\n mapping.source = util.relative(sourceRoot, mapping.source);\n }\n mapping.originalLine = original.line;\n mapping.originalColumn = original.column;\n if (original.name != null) {\n mapping.name = original.name;\n }\n }\n }\n\n var source = mapping.source;\n if (source != null && !newSources.has(source)) {\n newSources.add(source);\n }\n\n var name = mapping.name;\n if (name != null && !newNames.has(name)) {\n newNames.add(name);\n }\n\n }, this);\n this._sources = newSources;\n this._names = newNames;\n\n // Copy sourcesContents of applied map.\n aSourceMapConsumer.sources.forEach(function (sourceFile) {\n var content = aSourceMapConsumer.sourceContentFor(sourceFile);\n if (content != null) {\n if (aSourceMapPath != null) {\n sourceFile = util.join(aSourceMapPath, sourceFile);\n }\n if (sourceRoot != null) {\n sourceFile = util.relative(sourceRoot, sourceFile);\n }\n this.setSourceContent(sourceFile, content);\n }\n }, this);\n };\n\n/**\n * A mapping can have one of the three levels of data:\n *\n * 1. Just the generated position.\n * 2. The Generated position, original position, and original source.\n * 3. Generated and original position, original source, as well as a name\n * token.\n *\n * To maintain consistency, we validate that any new mapping being added falls\n * in to one of these categories.\n */\nSourceMapGenerator.prototype._validateMapping =\n function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource,\n aName) {\n // When aOriginal is truthy but has empty values for .line and .column,\n // it is most likely a programmer error. In this case we throw a very\n // specific error message to try to guide them the right way.\n // For example: https://github.com/Polymer/polymer-bundler/pull/519\n if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') {\n throw new Error(\n 'original.line and original.column are not numbers -- you probably meant to omit ' +\n 'the original mapping entirely and only map the generated position. If so, pass ' +\n 'null for the original mapping instead of an object with empty or null values.'\n );\n }\n\n if (aGenerated && 'line' in aGenerated && 'column' in aGenerated\n && aGenerated.line > 0 && aGenerated.column >= 0\n && !aOriginal && !aSource && !aName) {\n // Case 1.\n return;\n }\n else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated\n && aOriginal && 'line' in aOriginal && 'column' in aOriginal\n && aGenerated.line > 0 && aGenerated.column >= 0\n && aOriginal.line > 0 && aOriginal.column >= 0\n && aSource) {\n // Cases 2 and 3.\n return;\n }\n else {\n throw new Error('Invalid mapping: ' + JSON.stringify({\n generated: aGenerated,\n source: aSource,\n original: aOriginal,\n name: aName\n }));\n }\n };\n\n/**\n * Serialize the accumulated mappings in to the stream of base 64 VLQs\n * specified by the source map format.\n */\nSourceMapGenerator.prototype._serializeMappings =\n function SourceMapGenerator_serializeMappings() {\n var previousGeneratedColumn = 0;\n var previousGeneratedLine = 1;\n var previousOriginalColumn = 0;\n var previousOriginalLine = 0;\n var previousName = 0;\n var previousSource = 0;\n var result = '';\n var next;\n var mapping;\n var nameIdx;\n var sourceIdx;\n\n var mappings = this._mappings.toArray();\n for (var i = 0, len = mappings.length; i < len; i++) {\n mapping = mappings[i];\n next = ''\n\n if (mapping.generatedLine !== previousGeneratedLine) {\n previousGeneratedColumn = 0;\n while (mapping.generatedLine !== previousGeneratedLine) {\n next += ';';\n previousGeneratedLine++;\n }\n }\n else {\n if (i > 0) {\n if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) {\n continue;\n }\n next += ',';\n }\n }\n\n next += base64VLQ.encode(mapping.generatedColumn\n - previousGeneratedColumn);\n previousGeneratedColumn = mapping.generatedColumn;\n\n if (mapping.source != null) {\n sourceIdx = this._sources.indexOf(mapping.source);\n next += base64VLQ.encode(sourceIdx - previousSource);\n previousSource = sourceIdx;\n\n // lines are stored 0-based in SourceMap spec version 3\n next += base64VLQ.encode(mapping.originalLine - 1\n - previousOriginalLine);\n previousOriginalLine = mapping.originalLine - 1;\n\n next += base64VLQ.encode(mapping.originalColumn\n - previousOriginalColumn);\n previousOriginalColumn = mapping.originalColumn;\n\n if (mapping.name != null) {\n nameIdx = this._names.indexOf(mapping.name);\n next += base64VLQ.encode(nameIdx - previousName);\n previousName = nameIdx;\n }\n }\n\n result += next;\n }\n\n return result;\n };\n\nSourceMapGenerator.prototype._generateSourcesContent =\n function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) {\n return aSources.map(function (source) {\n if (!this._sourcesContents) {\n return null;\n }\n if (aSourceRoot != null) {\n source = util.relative(aSourceRoot, source);\n }\n var key = util.toSetString(source);\n return Object.prototype.hasOwnProperty.call(this._sourcesContents, key)\n ? this._sourcesContents[key]\n : null;\n }, this);\n };\n\n/**\n * Externalize the source map.\n */\nSourceMapGenerator.prototype.toJSON =\n function SourceMapGenerator_toJSON() {\n var map = {\n version: this._version,\n sources: this._sources.toArray(),\n names: this._names.toArray(),\n mappings: this._serializeMappings()\n };\n if (this._file != null) {\n map.file = this._file;\n }\n if (this._sourceRoot != null) {\n map.sourceRoot = this._sourceRoot;\n }\n if (this._sourcesContents) {\n map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot);\n }\n\n return map;\n };\n\n/**\n * Render the source map being generated to a string.\n */\nSourceMapGenerator.prototype.toString =\n function SourceMapGenerator_toString() {\n return JSON.stringify(this.toJSON());\n };\n\nexports.SourceMapGenerator = SourceMapGenerator;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/source-map-generator.js\n// module id = 1\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n *\n * Based on the Base 64 VLQ implementation in Closure Compiler:\n * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java\n *\n * Copyright 2011 The Closure Compiler Authors. All rights reserved.\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n * copyright notice, this list of conditions and the following\n * disclaimer in the documentation and/or other materials provided\n * with the distribution.\n * * Neither the name of Google Inc. nor the names of its\n * contributors may be used to endorse or promote products derived\n * from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n\nvar base64 = require('./base64');\n\n// A single base 64 digit can contain 6 bits of data. For the base 64 variable\n// length quantities we use in the source map spec, the first bit is the sign,\n// the next four bits are the actual value, and the 6th bit is the\n// continuation bit. The continuation bit tells us whether there are more\n// digits in this value following this digit.\n//\n// Continuation\n// | Sign\n// | |\n// V V\n// 101011\n\nvar VLQ_BASE_SHIFT = 5;\n\n// binary: 100000\nvar VLQ_BASE = 1 << VLQ_BASE_SHIFT;\n\n// binary: 011111\nvar VLQ_BASE_MASK = VLQ_BASE - 1;\n\n// binary: 100000\nvar VLQ_CONTINUATION_BIT = VLQ_BASE;\n\n/**\n * Converts from a two-complement value to a value where the sign bit is\n * placed in the least significant bit. For example, as decimals:\n * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary)\n * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary)\n */\nfunction toVLQSigned(aValue) {\n return aValue < 0\n ? ((-aValue) << 1) + 1\n : (aValue << 1) + 0;\n}\n\n/**\n * Converts to a two-complement value from a value where the sign bit is\n * placed in the least significant bit. For example, as decimals:\n * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1\n * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2\n */\nfunction fromVLQSigned(aValue) {\n var isNegative = (aValue & 1) === 1;\n var shifted = aValue >> 1;\n return isNegative\n ? -shifted\n : shifted;\n}\n\n/**\n * Returns the base 64 VLQ encoded value.\n */\nexports.encode = function base64VLQ_encode(aValue) {\n var encoded = \"\";\n var digit;\n\n var vlq = toVLQSigned(aValue);\n\n do {\n digit = vlq & VLQ_BASE_MASK;\n vlq >>>= VLQ_BASE_SHIFT;\n if (vlq > 0) {\n // There are still more digits in this value, so we must make sure the\n // continuation bit is marked.\n digit |= VLQ_CONTINUATION_BIT;\n }\n encoded += base64.encode(digit);\n } while (vlq > 0);\n\n return encoded;\n};\n\n/**\n * Decodes the next base 64 VLQ value from the given string and returns the\n * value and the rest of the string via the out parameter.\n */\nexports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) {\n var strLen = aStr.length;\n var result = 0;\n var shift = 0;\n var continuation, digit;\n\n do {\n if (aIndex >= strLen) {\n throw new Error(\"Expected more digits in base 64 VLQ value.\");\n }\n\n digit = base64.decode(aStr.charCodeAt(aIndex++));\n if (digit === -1) {\n throw new Error(\"Invalid base64 digit: \" + aStr.charAt(aIndex - 1));\n }\n\n continuation = !!(digit & VLQ_CONTINUATION_BIT);\n digit &= VLQ_BASE_MASK;\n result = result + (digit << shift);\n shift += VLQ_BASE_SHIFT;\n } while (continuation);\n\n aOutParam.value = fromVLQSigned(result);\n aOutParam.rest = aIndex;\n};\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/base64-vlq.js\n// module id = 2\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nvar intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split('');\n\n/**\n * Encode an integer in the range of 0 to 63 to a single base 64 digit.\n */\nexports.encode = function (number) {\n if (0 <= number && number < intToCharMap.length) {\n return intToCharMap[number];\n }\n throw new TypeError(\"Must be between 0 and 63: \" + number);\n};\n\n/**\n * Decode a single base 64 character code digit to an integer. Returns -1 on\n * failure.\n */\nexports.decode = function (charCode) {\n var bigA = 65; // 'A'\n var bigZ = 90; // 'Z'\n\n var littleA = 97; // 'a'\n var littleZ = 122; // 'z'\n\n var zero = 48; // '0'\n var nine = 57; // '9'\n\n var plus = 43; // '+'\n var slash = 47; // '/'\n\n var littleOffset = 26;\n var numberOffset = 52;\n\n // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ\n if (bigA <= charCode && charCode <= bigZ) {\n return (charCode - bigA);\n }\n\n // 26 - 51: abcdefghijklmnopqrstuvwxyz\n if (littleA <= charCode && charCode <= littleZ) {\n return (charCode - littleA + littleOffset);\n }\n\n // 52 - 61: 0123456789\n if (zero <= charCode && charCode <= nine) {\n return (charCode - zero + numberOffset);\n }\n\n // 62: +\n if (charCode == plus) {\n return 62;\n }\n\n // 63: /\n if (charCode == slash) {\n return 63;\n }\n\n // Invalid base64 digit.\n return -1;\n};\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/base64.js\n// module id = 3\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\n/**\n * This is a helper function for getting values from parameter/options\n * objects.\n *\n * @param args The object we are extracting values from\n * @param name The name of the property we are getting.\n * @param defaultValue An optional value to return if the property is missing\n * from the object. If this is not specified and the property is missing, an\n * error will be thrown.\n */\nfunction getArg(aArgs, aName, aDefaultValue) {\n if (aName in aArgs) {\n return aArgs[aName];\n } else if (arguments.length === 3) {\n return aDefaultValue;\n } else {\n throw new Error('\"' + aName + '\" is a required argument.');\n }\n}\nexports.getArg = getArg;\n\nvar urlRegexp = /^(?:([\\w+\\-.]+):)?\\/\\/(?:(\\w+:\\w+)@)?([\\w.-]*)(?::(\\d+))?(.*)$/;\nvar dataUrlRegexp = /^data:.+\\,.+$/;\n\nfunction urlParse(aUrl) {\n var match = aUrl.match(urlRegexp);\n if (!match) {\n return null;\n }\n return {\n scheme: match[1],\n auth: match[2],\n host: match[3],\n port: match[4],\n path: match[5]\n };\n}\nexports.urlParse = urlParse;\n\nfunction urlGenerate(aParsedUrl) {\n var url = '';\n if (aParsedUrl.scheme) {\n url += aParsedUrl.scheme + ':';\n }\n url += '//';\n if (aParsedUrl.auth) {\n url += aParsedUrl.auth + '@';\n }\n if (aParsedUrl.host) {\n url += aParsedUrl.host;\n }\n if (aParsedUrl.port) {\n url += \":\" + aParsedUrl.port\n }\n if (aParsedUrl.path) {\n url += aParsedUrl.path;\n }\n return url;\n}\nexports.urlGenerate = urlGenerate;\n\n/**\n * Normalizes a path, or the path portion of a URL:\n *\n * - Replaces consecutive slashes with one slash.\n * - Removes unnecessary '.' parts.\n * - Removes unnecessary '/..' parts.\n *\n * Based on code in the Node.js 'path' core module.\n *\n * @param aPath The path or url to normalize.\n */\nfunction normalize(aPath) {\n var path = aPath;\n var url = urlParse(aPath);\n if (url) {\n if (!url.path) {\n return aPath;\n }\n path = url.path;\n }\n var isAbsolute = exports.isAbsolute(path);\n\n var parts = path.split(/\\/+/);\n for (var part, up = 0, i = parts.length - 1; i >= 0; i--) {\n part = parts[i];\n if (part === '.') {\n parts.splice(i, 1);\n } else if (part === '..') {\n up++;\n } else if (up > 0) {\n if (part === '') {\n // The first part is blank if the path is absolute. Trying to go\n // above the root is a no-op. Therefore we can remove all '..' parts\n // directly after the root.\n parts.splice(i + 1, up);\n up = 0;\n } else {\n parts.splice(i, 2);\n up--;\n }\n }\n }\n path = parts.join('/');\n\n if (path === '') {\n path = isAbsolute ? '/' : '.';\n }\n\n if (url) {\n url.path = path;\n return urlGenerate(url);\n }\n return path;\n}\nexports.normalize = normalize;\n\n/**\n * Joins two paths/URLs.\n *\n * @param aRoot The root path or URL.\n * @param aPath The path or URL to be joined with the root.\n *\n * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a\n * scheme-relative URL: Then the scheme of aRoot, if any, is prepended\n * first.\n * - Otherwise aPath is a path. If aRoot is a URL, then its path portion\n * is updated with the result and aRoot is returned. Otherwise the result\n * is returned.\n * - If aPath is absolute, the result is aPath.\n * - Otherwise the two paths are joined with a slash.\n * - Joining for example 'http://' and 'www.example.com' is also supported.\n */\nfunction join(aRoot, aPath) {\n if (aRoot === \"\") {\n aRoot = \".\";\n }\n if (aPath === \"\") {\n aPath = \".\";\n }\n var aPathUrl = urlParse(aPath);\n var aRootUrl = urlParse(aRoot);\n if (aRootUrl) {\n aRoot = aRootUrl.path || '/';\n }\n\n // `join(foo, '//www.example.org')`\n if (aPathUrl && !aPathUrl.scheme) {\n if (aRootUrl) {\n aPathUrl.scheme = aRootUrl.scheme;\n }\n return urlGenerate(aPathUrl);\n }\n\n if (aPathUrl || aPath.match(dataUrlRegexp)) {\n return aPath;\n }\n\n // `join('http://', 'www.example.com')`\n if (aRootUrl && !aRootUrl.host && !aRootUrl.path) {\n aRootUrl.host = aPath;\n return urlGenerate(aRootUrl);\n }\n\n var joined = aPath.charAt(0) === '/'\n ? aPath\n : normalize(aRoot.replace(/\\/+$/, '') + '/' + aPath);\n\n if (aRootUrl) {\n aRootUrl.path = joined;\n return urlGenerate(aRootUrl);\n }\n return joined;\n}\nexports.join = join;\n\nexports.isAbsolute = function (aPath) {\n return aPath.charAt(0) === '/' || urlRegexp.test(aPath);\n};\n\n/**\n * Make a path relative to a URL or another path.\n *\n * @param aRoot The root path or URL.\n * @param aPath The path or URL to be made relative to aRoot.\n */\nfunction relative(aRoot, aPath) {\n if (aRoot === \"\") {\n aRoot = \".\";\n }\n\n aRoot = aRoot.replace(/\\/$/, '');\n\n // It is possible for the path to be above the root. In this case, simply\n // checking whether the root is a prefix of the path won't work. Instead, we\n // need to remove components from the root one by one, until either we find\n // a prefix that fits, or we run out of components to remove.\n var level = 0;\n while (aPath.indexOf(aRoot + '/') !== 0) {\n var index = aRoot.lastIndexOf(\"/\");\n if (index < 0) {\n return aPath;\n }\n\n // If the only part of the root that is left is the scheme (i.e. http://,\n // file:///, etc.), one or more slashes (/), or simply nothing at all, we\n // have exhausted all components, so the path is not relative to the root.\n aRoot = aRoot.slice(0, index);\n if (aRoot.match(/^([^\\/]+:\\/)?\\/*$/)) {\n return aPath;\n }\n\n ++level;\n }\n\n // Make sure we add a \"../\" for each component we removed from the root.\n return Array(level + 1).join(\"../\") + aPath.substr(aRoot.length + 1);\n}\nexports.relative = relative;\n\nvar supportsNullProto = (function () {\n var obj = Object.create(null);\n return !('__proto__' in obj);\n}());\n\nfunction identity (s) {\n return s;\n}\n\n/**\n * Because behavior goes wacky when you set `__proto__` on objects, we\n * have to prefix all the strings in our set with an arbitrary character.\n *\n * See https://github.com/mozilla/source-map/pull/31 and\n * https://github.com/mozilla/source-map/issues/30\n *\n * @param String aStr\n */\nfunction toSetString(aStr) {\n if (isProtoString(aStr)) {\n return '$' + aStr;\n }\n\n return aStr;\n}\nexports.toSetString = supportsNullProto ? identity : toSetString;\n\nfunction fromSetString(aStr) {\n if (isProtoString(aStr)) {\n return aStr.slice(1);\n }\n\n return aStr;\n}\nexports.fromSetString = supportsNullProto ? identity : fromSetString;\n\nfunction isProtoString(s) {\n if (!s) {\n return false;\n }\n\n var length = s.length;\n\n if (length < 9 /* \"__proto__\".length */) {\n return false;\n }\n\n if (s.charCodeAt(length - 1) !== 95 /* '_' */ ||\n s.charCodeAt(length - 2) !== 95 /* '_' */ ||\n s.charCodeAt(length - 3) !== 111 /* 'o' */ ||\n s.charCodeAt(length - 4) !== 116 /* 't' */ ||\n s.charCodeAt(length - 5) !== 111 /* 'o' */ ||\n s.charCodeAt(length - 6) !== 114 /* 'r' */ ||\n s.charCodeAt(length - 7) !== 112 /* 'p' */ ||\n s.charCodeAt(length - 8) !== 95 /* '_' */ ||\n s.charCodeAt(length - 9) !== 95 /* '_' */) {\n return false;\n }\n\n for (var i = length - 10; i >= 0; i--) {\n if (s.charCodeAt(i) !== 36 /* '$' */) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * Comparator between two mappings where the original positions are compared.\n *\n * Optionally pass in `true` as `onlyCompareGenerated` to consider two\n * mappings with the same original source/line/column, but different generated\n * line and column the same. Useful when searching for a mapping with a\n * stubbed out mapping.\n */\nfunction compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) {\n var cmp = strcmp(mappingA.source, mappingB.source);\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.originalLine - mappingB.originalLine;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.originalColumn - mappingB.originalColumn;\n if (cmp !== 0 || onlyCompareOriginal) {\n return cmp;\n }\n\n cmp = mappingA.generatedColumn - mappingB.generatedColumn;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.generatedLine - mappingB.generatedLine;\n if (cmp !== 0) {\n return cmp;\n }\n\n return strcmp(mappingA.name, mappingB.name);\n}\nexports.compareByOriginalPositions = compareByOriginalPositions;\n\n/**\n * Comparator between two mappings with deflated source and name indices where\n * the generated positions are compared.\n *\n * Optionally pass in `true` as `onlyCompareGenerated` to consider two\n * mappings with the same generated line and column, but different\n * source/name/original line and column the same. Useful when searching for a\n * mapping with a stubbed out mapping.\n */\nfunction compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) {\n var cmp = mappingA.generatedLine - mappingB.generatedLine;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.generatedColumn - mappingB.generatedColumn;\n if (cmp !== 0 || onlyCompareGenerated) {\n return cmp;\n }\n\n cmp = strcmp(mappingA.source, mappingB.source);\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.originalLine - mappingB.originalLine;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.originalColumn - mappingB.originalColumn;\n if (cmp !== 0) {\n return cmp;\n }\n\n return strcmp(mappingA.name, mappingB.name);\n}\nexports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated;\n\nfunction strcmp(aStr1, aStr2) {\n if (aStr1 === aStr2) {\n return 0;\n }\n\n if (aStr1 === null) {\n return 1; // aStr2 !== null\n }\n\n if (aStr2 === null) {\n return -1; // aStr1 !== null\n }\n\n if (aStr1 > aStr2) {\n return 1;\n }\n\n return -1;\n}\n\n/**\n * Comparator between two mappings with inflated source and name strings where\n * the generated positions are compared.\n */\nfunction compareByGeneratedPositionsInflated(mappingA, mappingB) {\n var cmp = mappingA.generatedLine - mappingB.generatedLine;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.generatedColumn - mappingB.generatedColumn;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = strcmp(mappingA.source, mappingB.source);\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.originalLine - mappingB.originalLine;\n if (cmp !== 0) {\n return cmp;\n }\n\n cmp = mappingA.originalColumn - mappingB.originalColumn;\n if (cmp !== 0) {\n return cmp;\n }\n\n return strcmp(mappingA.name, mappingB.name);\n}\nexports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated;\n\n/**\n * Strip any JSON XSSI avoidance prefix from the string (as documented\n * in the source maps specification), and then parse the string as\n * JSON.\n */\nfunction parseSourceMapInput(str) {\n return JSON.parse(str.replace(/^\\)]}'[^\\n]*\\n/, ''));\n}\nexports.parseSourceMapInput = parseSourceMapInput;\n\n/**\n * Compute the URL of a source given the the source root, the source's\n * URL, and the source map's URL.\n */\nfunction computeSourceURL(sourceRoot, sourceURL, sourceMapURL) {\n sourceURL = sourceURL || '';\n\n if (sourceRoot) {\n // This follows what Chrome does.\n if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') {\n sourceRoot += '/';\n }\n // The spec says:\n // Line 4: An optional source root, useful for relocating source\n // files on a server or removing repeated values in the\n // “sources” entry. This value is prepended to the individual\n // entries in the “source” field.\n sourceURL = sourceRoot + sourceURL;\n }\n\n // Historically, SourceMapConsumer did not take the sourceMapURL as\n // a parameter. This mode is still somewhat supported, which is why\n // this code block is conditional. However, it's preferable to pass\n // the source map URL to SourceMapConsumer, so that this function\n // can implement the source URL resolution algorithm as outlined in\n // the spec. This block is basically the equivalent of:\n // new URL(sourceURL, sourceMapURL).toString()\n // ... except it avoids using URL, which wasn't available in the\n // older releases of node still supported by this library.\n //\n // The spec says:\n // If the sources are not absolute URLs after prepending of the\n // “sourceRoot”, the sources are resolved relative to the\n // SourceMap (like resolving script src in a html document).\n if (sourceMapURL) {\n var parsed = urlParse(sourceMapURL);\n if (!parsed) {\n throw new Error(\"sourceMapURL could not be parsed\");\n }\n if (parsed.path) {\n // Strip the last path component, but keep the \"/\".\n var index = parsed.path.lastIndexOf('/');\n if (index >= 0) {\n parsed.path = parsed.path.substring(0, index + 1);\n }\n }\n sourceURL = join(urlGenerate(parsed), sourceURL);\n }\n\n return normalize(sourceURL);\n}\nexports.computeSourceURL = computeSourceURL;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/util.js\n// module id = 4\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nvar util = require('./util');\nvar has = Object.prototype.hasOwnProperty;\nvar hasNativeMap = typeof Map !== \"undefined\";\n\n/**\n * A data structure which is a combination of an array and a set. Adding a new\n * member is O(1), testing for membership is O(1), and finding the index of an\n * element is O(1). Removing elements from the set is not supported. Only\n * strings are supported for membership.\n */\nfunction ArraySet() {\n this._array = [];\n this._set = hasNativeMap ? new Map() : Object.create(null);\n}\n\n/**\n * Static method for creating ArraySet instances from an existing array.\n */\nArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) {\n var set = new ArraySet();\n for (var i = 0, len = aArray.length; i < len; i++) {\n set.add(aArray[i], aAllowDuplicates);\n }\n return set;\n};\n\n/**\n * Return how many unique items are in this ArraySet. If duplicates have been\n * added, than those do not count towards the size.\n *\n * @returns Number\n */\nArraySet.prototype.size = function ArraySet_size() {\n return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length;\n};\n\n/**\n * Add the given string to this set.\n *\n * @param String aStr\n */\nArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) {\n var sStr = hasNativeMap ? aStr : util.toSetString(aStr);\n var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr);\n var idx = this._array.length;\n if (!isDuplicate || aAllowDuplicates) {\n this._array.push(aStr);\n }\n if (!isDuplicate) {\n if (hasNativeMap) {\n this._set.set(aStr, idx);\n } else {\n this._set[sStr] = idx;\n }\n }\n};\n\n/**\n * Is the given string a member of this set?\n *\n * @param String aStr\n */\nArraySet.prototype.has = function ArraySet_has(aStr) {\n if (hasNativeMap) {\n return this._set.has(aStr);\n } else {\n var sStr = util.toSetString(aStr);\n return has.call(this._set, sStr);\n }\n};\n\n/**\n * What is the index of the given string in the array?\n *\n * @param String aStr\n */\nArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) {\n if (hasNativeMap) {\n var idx = this._set.get(aStr);\n if (idx >= 0) {\n return idx;\n }\n } else {\n var sStr = util.toSetString(aStr);\n if (has.call(this._set, sStr)) {\n return this._set[sStr];\n }\n }\n\n throw new Error('\"' + aStr + '\" is not in the set.');\n};\n\n/**\n * What is the element at the given index?\n *\n * @param Number aIdx\n */\nArraySet.prototype.at = function ArraySet_at(aIdx) {\n if (aIdx >= 0 && aIdx < this._array.length) {\n return this._array[aIdx];\n }\n throw new Error('No element indexed by ' + aIdx);\n};\n\n/**\n * Returns the array representation of this set (which has the proper indices\n * indicated by indexOf). Note that this is a copy of the internal array used\n * for storing the members so that no one can mess with internal state.\n */\nArraySet.prototype.toArray = function ArraySet_toArray() {\n return this._array.slice();\n};\n\nexports.ArraySet = ArraySet;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/array-set.js\n// module id = 5\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2014 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nvar util = require('./util');\n\n/**\n * Determine whether mappingB is after mappingA with respect to generated\n * position.\n */\nfunction generatedPositionAfter(mappingA, mappingB) {\n // Optimized for most common case\n var lineA = mappingA.generatedLine;\n var lineB = mappingB.generatedLine;\n var columnA = mappingA.generatedColumn;\n var columnB = mappingB.generatedColumn;\n return lineB > lineA || lineB == lineA && columnB >= columnA ||\n util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0;\n}\n\n/**\n * A data structure to provide a sorted view of accumulated mappings in a\n * performance conscious manner. It trades a neglibable overhead in general\n * case for a large speedup in case of mappings being added in order.\n */\nfunction MappingList() {\n this._array = [];\n this._sorted = true;\n // Serves as infimum\n this._last = {generatedLine: -1, generatedColumn: 0};\n}\n\n/**\n * Iterate through internal items. This method takes the same arguments that\n * `Array.prototype.forEach` takes.\n *\n * NOTE: The order of the mappings is NOT guaranteed.\n */\nMappingList.prototype.unsortedForEach =\n function MappingList_forEach(aCallback, aThisArg) {\n this._array.forEach(aCallback, aThisArg);\n };\n\n/**\n * Add the given source mapping.\n *\n * @param Object aMapping\n */\nMappingList.prototype.add = function MappingList_add(aMapping) {\n if (generatedPositionAfter(this._last, aMapping)) {\n this._last = aMapping;\n this._array.push(aMapping);\n } else {\n this._sorted = false;\n this._array.push(aMapping);\n }\n};\n\n/**\n * Returns the flat, sorted array of mappings. The mappings are sorted by\n * generated position.\n *\n * WARNING: This method returns internal data without copying, for\n * performance. The return value must NOT be mutated, and should be treated as\n * an immutable borrow. If you want to take ownership, you must make your own\n * copy.\n */\nMappingList.prototype.toArray = function MappingList_toArray() {\n if (!this._sorted) {\n this._array.sort(util.compareByGeneratedPositionsInflated);\n this._sorted = true;\n }\n return this._array;\n};\n\nexports.MappingList = MappingList;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/mapping-list.js\n// module id = 6\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nvar util = require('./util');\nvar binarySearch = require('./binary-search');\nvar ArraySet = require('./array-set').ArraySet;\nvar base64VLQ = require('./base64-vlq');\nvar quickSort = require('./quick-sort').quickSort;\n\nfunction SourceMapConsumer(aSourceMap, aSourceMapURL) {\n var sourceMap = aSourceMap;\n if (typeof aSourceMap === 'string') {\n sourceMap = util.parseSourceMapInput(aSourceMap);\n }\n\n return sourceMap.sections != null\n ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL)\n : new BasicSourceMapConsumer(sourceMap, aSourceMapURL);\n}\n\nSourceMapConsumer.fromSourceMap = function(aSourceMap, aSourceMapURL) {\n return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL);\n}\n\n/**\n * The version of the source mapping spec that we are consuming.\n */\nSourceMapConsumer.prototype._version = 3;\n\n// `__generatedMappings` and `__originalMappings` are arrays that hold the\n// parsed mapping coordinates from the source map's \"mappings\" attribute. They\n// are lazily instantiated, accessed via the `_generatedMappings` and\n// `_originalMappings` getters respectively, and we only parse the mappings\n// and create these arrays once queried for a source location. We jump through\n// these hoops because there can be many thousands of mappings, and parsing\n// them is expensive, so we only want to do it if we must.\n//\n// Each object in the arrays is of the form:\n//\n// {\n// generatedLine: The line number in the generated code,\n// generatedColumn: The column number in the generated code,\n// source: The path to the original source file that generated this\n// chunk of code,\n// originalLine: The line number in the original source that\n// corresponds to this chunk of generated code,\n// originalColumn: The column number in the original source that\n// corresponds to this chunk of generated code,\n// name: The name of the original symbol which generated this chunk of\n// code.\n// }\n//\n// All properties except for `generatedLine` and `generatedColumn` can be\n// `null`.\n//\n// `_generatedMappings` is ordered by the generated positions.\n//\n// `_originalMappings` is ordered by the original positions.\n\nSourceMapConsumer.prototype.__generatedMappings = null;\nObject.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', {\n configurable: true,\n enumerable: true,\n get: function () {\n if (!this.__generatedMappings) {\n this._parseMappings(this._mappings, this.sourceRoot);\n }\n\n return this.__generatedMappings;\n }\n});\n\nSourceMapConsumer.prototype.__originalMappings = null;\nObject.defineProperty(SourceMapConsumer.prototype, '_originalMappings', {\n configurable: true,\n enumerable: true,\n get: function () {\n if (!this.__originalMappings) {\n this._parseMappings(this._mappings, this.sourceRoot);\n }\n\n return this.__originalMappings;\n }\n});\n\nSourceMapConsumer.prototype._charIsMappingSeparator =\n function SourceMapConsumer_charIsMappingSeparator(aStr, index) {\n var c = aStr.charAt(index);\n return c === \";\" || c === \",\";\n };\n\n/**\n * Parse the mappings in a string in to a data structure which we can easily\n * query (the ordered arrays in the `this.__generatedMappings` and\n * `this.__originalMappings` properties).\n */\nSourceMapConsumer.prototype._parseMappings =\n function SourceMapConsumer_parseMappings(aStr, aSourceRoot) {\n throw new Error(\"Subclasses must implement _parseMappings\");\n };\n\nSourceMapConsumer.GENERATED_ORDER = 1;\nSourceMapConsumer.ORIGINAL_ORDER = 2;\n\nSourceMapConsumer.GREATEST_LOWER_BOUND = 1;\nSourceMapConsumer.LEAST_UPPER_BOUND = 2;\n\n/**\n * Iterate over each mapping between an original source/line/column and a\n * generated line/column in this source map.\n *\n * @param Function aCallback\n * The function that is called with each mapping.\n * @param Object aContext\n * Optional. If specified, this object will be the value of `this` every\n * time that `aCallback` is called.\n * @param aOrder\n * Either `SourceMapConsumer.GENERATED_ORDER` or\n * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to\n * iterate over the mappings sorted by the generated file's line/column\n * order or the original's source/line/column order, respectively. Defaults to\n * `SourceMapConsumer.GENERATED_ORDER`.\n */\nSourceMapConsumer.prototype.eachMapping =\n function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) {\n var context = aContext || null;\n var order = aOrder || SourceMapConsumer.GENERATED_ORDER;\n\n var mappings;\n switch (order) {\n case SourceMapConsumer.GENERATED_ORDER:\n mappings = this._generatedMappings;\n break;\n case SourceMapConsumer.ORIGINAL_ORDER:\n mappings = this._originalMappings;\n break;\n default:\n throw new Error(\"Unknown order of iteration.\");\n }\n\n var sourceRoot = this.sourceRoot;\n mappings.map(function (mapping) {\n var source = mapping.source === null ? null : this._sources.at(mapping.source);\n source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL);\n return {\n source: source,\n generatedLine: mapping.generatedLine,\n generatedColumn: mapping.generatedColumn,\n originalLine: mapping.originalLine,\n originalColumn: mapping.originalColumn,\n name: mapping.name === null ? null : this._names.at(mapping.name)\n };\n }, this).forEach(aCallback, context);\n };\n\n/**\n * Returns all generated line and column information for the original source,\n * line, and column provided. If no column is provided, returns all mappings\n * corresponding to a either the line we are searching for or the next\n * closest line that has any mappings. Otherwise, returns all mappings\n * corresponding to the given line and either the column we are searching for\n * or the next closest column that has any offsets.\n *\n * The only argument is an object with the following properties:\n *\n * - source: The filename of the original source.\n * - line: The line number in the original source. The line number is 1-based.\n * - column: Optional. the column number in the original source.\n * The column number is 0-based.\n *\n * and an array of objects is returned, each with the following properties:\n *\n * - line: The line number in the generated source, or null. The\n * line number is 1-based.\n * - column: The column number in the generated source, or null.\n * The column number is 0-based.\n */\nSourceMapConsumer.prototype.allGeneratedPositionsFor =\n function SourceMapConsumer_allGeneratedPositionsFor(aArgs) {\n var line = util.getArg(aArgs, 'line');\n\n // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping\n // returns the index of the closest mapping less than the needle. By\n // setting needle.originalColumn to 0, we thus find the last mapping for\n // the given line, provided such a mapping exists.\n var needle = {\n source: util.getArg(aArgs, 'source'),\n originalLine: line,\n originalColumn: util.getArg(aArgs, 'column', 0)\n };\n\n needle.source = this._findSourceIndex(needle.source);\n if (needle.source < 0) {\n return [];\n }\n\n var mappings = [];\n\n var index = this._findMapping(needle,\n this._originalMappings,\n \"originalLine\",\n \"originalColumn\",\n util.compareByOriginalPositions,\n binarySearch.LEAST_UPPER_BOUND);\n if (index >= 0) {\n var mapping = this._originalMappings[index];\n\n if (aArgs.column === undefined) {\n var originalLine = mapping.originalLine;\n\n // Iterate until either we run out of mappings, or we run into\n // a mapping for a different line than the one we found. Since\n // mappings are sorted, this is guaranteed to find all mappings for\n // the line we found.\n while (mapping && mapping.originalLine === originalLine) {\n mappings.push({\n line: util.getArg(mapping, 'generatedLine', null),\n column: util.getArg(mapping, 'generatedColumn', null),\n lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)\n });\n\n mapping = this._originalMappings[++index];\n }\n } else {\n var originalColumn = mapping.originalColumn;\n\n // Iterate until either we run out of mappings, or we run into\n // a mapping for a different line than the one we were searching for.\n // Since mappings are sorted, this is guaranteed to find all mappings for\n // the line we are searching for.\n while (mapping &&\n mapping.originalLine === line &&\n mapping.originalColumn == originalColumn) {\n mappings.push({\n line: util.getArg(mapping, 'generatedLine', null),\n column: util.getArg(mapping, 'generatedColumn', null),\n lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)\n });\n\n mapping = this._originalMappings[++index];\n }\n }\n }\n\n return mappings;\n };\n\nexports.SourceMapConsumer = SourceMapConsumer;\n\n/**\n * A BasicSourceMapConsumer instance represents a parsed source map which we can\n * query for information about the original file positions by giving it a file\n * position in the generated source.\n *\n * The first parameter is the raw source map (either as a JSON string, or\n * already parsed to an object). According to the spec, source maps have the\n * following attributes:\n *\n * - version: Which version of the source map spec this map is following.\n * - sources: An array of URLs to the original source files.\n * - names: An array of identifiers which can be referrenced by individual mappings.\n * - sourceRoot: Optional. The URL root from which all sources are relative.\n * - sourcesContent: Optional. An array of contents of the original source files.\n * - mappings: A string of base64 VLQs which contain the actual mappings.\n * - file: Optional. The generated file this source map is associated with.\n *\n * Here is an example source map, taken from the source map spec[0]:\n *\n * {\n * version : 3,\n * file: \"out.js\",\n * sourceRoot : \"\",\n * sources: [\"foo.js\", \"bar.js\"],\n * names: [\"src\", \"maps\", \"are\", \"fun\"],\n * mappings: \"AA,AB;;ABCDE;\"\n * }\n *\n * The second parameter, if given, is a string whose value is the URL\n * at which the source map was found. This URL is used to compute the\n * sources array.\n *\n * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1#\n */\nfunction BasicSourceMapConsumer(aSourceMap, aSourceMapURL) {\n var sourceMap = aSourceMap;\n if (typeof aSourceMap === 'string') {\n sourceMap = util.parseSourceMapInput(aSourceMap);\n }\n\n var version = util.getArg(sourceMap, 'version');\n var sources = util.getArg(sourceMap, 'sources');\n // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which\n // requires the array) to play nice here.\n var names = util.getArg(sourceMap, 'names', []);\n var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null);\n var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null);\n var mappings = util.getArg(sourceMap, 'mappings');\n var file = util.getArg(sourceMap, 'file', null);\n\n // Once again, Sass deviates from the spec and supplies the version as a\n // string rather than a number, so we use loose equality checking here.\n if (version != this._version) {\n throw new Error('Unsupported version: ' + version);\n }\n\n if (sourceRoot) {\n sourceRoot = util.normalize(sourceRoot);\n }\n\n sources = sources\n .map(String)\n // Some source maps produce relative source paths like \"./foo.js\" instead of\n // \"foo.js\". Normalize these first so that future comparisons will succeed.\n // See bugzil.la/1090768.\n .map(util.normalize)\n // Always ensure that absolute sources are internally stored relative to\n // the source root, if the source root is absolute. Not doing this would\n // be particularly problematic when the source root is a prefix of the\n // source (valid, but why??). See github issue #199 and bugzil.la/1188982.\n .map(function (source) {\n return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source)\n ? util.relative(sourceRoot, source)\n : source;\n });\n\n // Pass `true` below to allow duplicate names and sources. While source maps\n // are intended to be compressed and deduplicated, the TypeScript compiler\n // sometimes generates source maps with duplicates in them. See Github issue\n // #72 and bugzil.la/889492.\n this._names = ArraySet.fromArray(names.map(String), true);\n this._sources = ArraySet.fromArray(sources, true);\n\n this._absoluteSources = this._sources.toArray().map(function (s) {\n return util.computeSourceURL(sourceRoot, s, aSourceMapURL);\n });\n\n this.sourceRoot = sourceRoot;\n this.sourcesContent = sourcesContent;\n this._mappings = mappings;\n this._sourceMapURL = aSourceMapURL;\n this.file = file;\n}\n\nBasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype);\nBasicSourceMapConsumer.prototype.consumer = SourceMapConsumer;\n\n/**\n * Utility function to find the index of a source. Returns -1 if not\n * found.\n */\nBasicSourceMapConsumer.prototype._findSourceIndex = function(aSource) {\n var relativeSource = aSource;\n if (this.sourceRoot != null) {\n relativeSource = util.relative(this.sourceRoot, relativeSource);\n }\n\n if (this._sources.has(relativeSource)) {\n return this._sources.indexOf(relativeSource);\n }\n\n // Maybe aSource is an absolute URL as returned by |sources|. In\n // this case we can't simply undo the transform.\n var i;\n for (i = 0; i < this._absoluteSources.length; ++i) {\n if (this._absoluteSources[i] == aSource) {\n return i;\n }\n }\n\n return -1;\n};\n\n/**\n * Create a BasicSourceMapConsumer from a SourceMapGenerator.\n *\n * @param SourceMapGenerator aSourceMap\n * The source map that will be consumed.\n * @param String aSourceMapURL\n * The URL at which the source map can be found (optional)\n * @returns BasicSourceMapConsumer\n */\nBasicSourceMapConsumer.fromSourceMap =\n function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) {\n var smc = Object.create(BasicSourceMapConsumer.prototype);\n\n var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true);\n var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true);\n smc.sourceRoot = aSourceMap._sourceRoot;\n smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(),\n smc.sourceRoot);\n smc.file = aSourceMap._file;\n smc._sourceMapURL = aSourceMapURL;\n smc._absoluteSources = smc._sources.toArray().map(function (s) {\n return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL);\n });\n\n // Because we are modifying the entries (by converting string sources and\n // names to indices into the sources and names ArraySets), we have to make\n // a copy of the entry or else bad things happen. Shared mutable state\n // strikes again! See github issue #191.\n\n var generatedMappings = aSourceMap._mappings.toArray().slice();\n var destGeneratedMappings = smc.__generatedMappings = [];\n var destOriginalMappings = smc.__originalMappings = [];\n\n for (var i = 0, length = generatedMappings.length; i < length; i++) {\n var srcMapping = generatedMappings[i];\n var destMapping = new Mapping;\n destMapping.generatedLine = srcMapping.generatedLine;\n destMapping.generatedColumn = srcMapping.generatedColumn;\n\n if (srcMapping.source) {\n destMapping.source = sources.indexOf(srcMapping.source);\n destMapping.originalLine = srcMapping.originalLine;\n destMapping.originalColumn = srcMapping.originalColumn;\n\n if (srcMapping.name) {\n destMapping.name = names.indexOf(srcMapping.name);\n }\n\n destOriginalMappings.push(destMapping);\n }\n\n destGeneratedMappings.push(destMapping);\n }\n\n quickSort(smc.__originalMappings, util.compareByOriginalPositions);\n\n return smc;\n };\n\n/**\n * The version of the source mapping spec that we are consuming.\n */\nBasicSourceMapConsumer.prototype._version = 3;\n\n/**\n * The list of original sources.\n */\nObject.defineProperty(BasicSourceMapConsumer.prototype, 'sources', {\n get: function () {\n return this._absoluteSources.slice();\n }\n});\n\n/**\n * Provide the JIT with a nice shape / hidden class.\n */\nfunction Mapping() {\n this.generatedLine = 0;\n this.generatedColumn = 0;\n this.source = null;\n this.originalLine = null;\n this.originalColumn = null;\n this.name = null;\n}\n\n/**\n * Parse the mappings in a string in to a data structure which we can easily\n * query (the ordered arrays in the `this.__generatedMappings` and\n * `this.__originalMappings` properties).\n */\nBasicSourceMapConsumer.prototype._parseMappings =\n function SourceMapConsumer_parseMappings(aStr, aSourceRoot) {\n var generatedLine = 1;\n var previousGeneratedColumn = 0;\n var previousOriginalLine = 0;\n var previousOriginalColumn = 0;\n var previousSource = 0;\n var previousName = 0;\n var length = aStr.length;\n var index = 0;\n var cachedSegments = {};\n var temp = {};\n var originalMappings = [];\n var generatedMappings = [];\n var mapping, str, segment, end, value;\n\n while (index < length) {\n if (aStr.charAt(index) === ';') {\n generatedLine++;\n index++;\n previousGeneratedColumn = 0;\n }\n else if (aStr.charAt(index) === ',') {\n index++;\n }\n else {\n mapping = new Mapping();\n mapping.generatedLine = generatedLine;\n\n // Because each offset is encoded relative to the previous one,\n // many segments often have the same encoding. We can exploit this\n // fact by caching the parsed variable length fields of each segment,\n // allowing us to avoid a second parse if we encounter the same\n // segment again.\n for (end = index; end < length; end++) {\n if (this._charIsMappingSeparator(aStr, end)) {\n break;\n }\n }\n str = aStr.slice(index, end);\n\n segment = cachedSegments[str];\n if (segment) {\n index += str.length;\n } else {\n segment = [];\n while (index < end) {\n base64VLQ.decode(aStr, index, temp);\n value = temp.value;\n index = temp.rest;\n segment.push(value);\n }\n\n if (segment.length === 2) {\n throw new Error('Found a source, but no line and column');\n }\n\n if (segment.length === 3) {\n throw new Error('Found a source and line, but no column');\n }\n\n cachedSegments[str] = segment;\n }\n\n // Generated column.\n mapping.generatedColumn = previousGeneratedColumn + segment[0];\n previousGeneratedColumn = mapping.generatedColumn;\n\n if (segment.length > 1) {\n // Original source.\n mapping.source = previousSource + segment[1];\n previousSource += segment[1];\n\n // Original line.\n mapping.originalLine = previousOriginalLine + segment[2];\n previousOriginalLine = mapping.originalLine;\n // Lines are stored 0-based\n mapping.originalLine += 1;\n\n // Original column.\n mapping.originalColumn = previousOriginalColumn + segment[3];\n previousOriginalColumn = mapping.originalColumn;\n\n if (segment.length > 4) {\n // Original name.\n mapping.name = previousName + segment[4];\n previousName += segment[4];\n }\n }\n\n generatedMappings.push(mapping);\n if (typeof mapping.originalLine === 'number') {\n originalMappings.push(mapping);\n }\n }\n }\n\n quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated);\n this.__generatedMappings = generatedMappings;\n\n quickSort(originalMappings, util.compareByOriginalPositions);\n this.__originalMappings = originalMappings;\n };\n\n/**\n * Find the mapping that best matches the hypothetical \"needle\" mapping that\n * we are searching for in the given \"haystack\" of mappings.\n */\nBasicSourceMapConsumer.prototype._findMapping =\n function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName,\n aColumnName, aComparator, aBias) {\n // To return the position we are searching for, we must first find the\n // mapping for the given position and then return the opposite position it\n // points to. Because the mappings are sorted, we can use binary search to\n // find the best mapping.\n\n if (aNeedle[aLineName] <= 0) {\n throw new TypeError('Line must be greater than or equal to 1, got '\n + aNeedle[aLineName]);\n }\n if (aNeedle[aColumnName] < 0) {\n throw new TypeError('Column must be greater than or equal to 0, got '\n + aNeedle[aColumnName]);\n }\n\n return binarySearch.search(aNeedle, aMappings, aComparator, aBias);\n };\n\n/**\n * Compute the last column for each generated mapping. The last column is\n * inclusive.\n */\nBasicSourceMapConsumer.prototype.computeColumnSpans =\n function SourceMapConsumer_computeColumnSpans() {\n for (var index = 0; index < this._generatedMappings.length; ++index) {\n var mapping = this._generatedMappings[index];\n\n // Mappings do not contain a field for the last generated columnt. We\n // can come up with an optimistic estimate, however, by assuming that\n // mappings are contiguous (i.e. given two consecutive mappings, the\n // first mapping ends where the second one starts).\n if (index + 1 < this._generatedMappings.length) {\n var nextMapping = this._generatedMappings[index + 1];\n\n if (mapping.generatedLine === nextMapping.generatedLine) {\n mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1;\n continue;\n }\n }\n\n // The last mapping for each line spans the entire line.\n mapping.lastGeneratedColumn = Infinity;\n }\n };\n\n/**\n * Returns the original source, line, and column information for the generated\n * source's line and column positions provided. The only argument is an object\n * with the following properties:\n *\n * - line: The line number in the generated source. The line number\n * is 1-based.\n * - column: The column number in the generated source. The column\n * number is 0-based.\n * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or\n * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the\n * closest element that is smaller than or greater than the one we are\n * searching for, respectively, if the exact element cannot be found.\n * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'.\n *\n * and an object is returned with the following properties:\n *\n * - source: The original source file, or null.\n * - line: The line number in the original source, or null. The\n * line number is 1-based.\n * - column: The column number in the original source, or null. The\n * column number is 0-based.\n * - name: The original identifier, or null.\n */\nBasicSourceMapConsumer.prototype.originalPositionFor =\n function SourceMapConsumer_originalPositionFor(aArgs) {\n var needle = {\n generatedLine: util.getArg(aArgs, 'line'),\n generatedColumn: util.getArg(aArgs, 'column')\n };\n\n var index = this._findMapping(\n needle,\n this._generatedMappings,\n \"generatedLine\",\n \"generatedColumn\",\n util.compareByGeneratedPositionsDeflated,\n util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)\n );\n\n if (index >= 0) {\n var mapping = this._generatedMappings[index];\n\n if (mapping.generatedLine === needle.generatedLine) {\n var source = util.getArg(mapping, 'source', null);\n if (source !== null) {\n source = this._sources.at(source);\n source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL);\n }\n var name = util.getArg(mapping, 'name', null);\n if (name !== null) {\n name = this._names.at(name);\n }\n return {\n source: source,\n line: util.getArg(mapping, 'originalLine', null),\n column: util.getArg(mapping, 'originalColumn', null),\n name: name\n };\n }\n }\n\n return {\n source: null,\n line: null,\n column: null,\n name: null\n };\n };\n\n/**\n * Return true if we have the source content for every source in the source\n * map, false otherwise.\n */\nBasicSourceMapConsumer.prototype.hasContentsOfAllSources =\n function BasicSourceMapConsumer_hasContentsOfAllSources() {\n if (!this.sourcesContent) {\n return false;\n }\n return this.sourcesContent.length >= this._sources.size() &&\n !this.sourcesContent.some(function (sc) { return sc == null; });\n };\n\n/**\n * Returns the original source content. The only argument is the url of the\n * original source file. Returns null if no original source content is\n * available.\n */\nBasicSourceMapConsumer.prototype.sourceContentFor =\n function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) {\n if (!this.sourcesContent) {\n return null;\n }\n\n var index = this._findSourceIndex(aSource);\n if (index >= 0) {\n return this.sourcesContent[index];\n }\n\n var relativeSource = aSource;\n if (this.sourceRoot != null) {\n relativeSource = util.relative(this.sourceRoot, relativeSource);\n }\n\n var url;\n if (this.sourceRoot != null\n && (url = util.urlParse(this.sourceRoot))) {\n // XXX: file:// URIs and absolute paths lead to unexpected behavior for\n // many users. We can help them out when they expect file:// URIs to\n // behave like it would if they were running a local HTTP server. See\n // https://bugzilla.mozilla.org/show_bug.cgi?id=885597.\n var fileUriAbsPath = relativeSource.replace(/^file:\\/\\//, \"\");\n if (url.scheme == \"file\"\n && this._sources.has(fileUriAbsPath)) {\n return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)]\n }\n\n if ((!url.path || url.path == \"/\")\n && this._sources.has(\"/\" + relativeSource)) {\n return this.sourcesContent[this._sources.indexOf(\"/\" + relativeSource)];\n }\n }\n\n // This function is used recursively from\n // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we\n // don't want to throw if we can't find the source - we just want to\n // return null, so we provide a flag to exit gracefully.\n if (nullOnMissing) {\n return null;\n }\n else {\n throw new Error('\"' + relativeSource + '\" is not in the SourceMap.');\n }\n };\n\n/**\n * Returns the generated line and column information for the original source,\n * line, and column positions provided. The only argument is an object with\n * the following properties:\n *\n * - source: The filename of the original source.\n * - line: The line number in the original source. The line number\n * is 1-based.\n * - column: The column number in the original source. The column\n * number is 0-based.\n * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or\n * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the\n * closest element that is smaller than or greater than the one we are\n * searching for, respectively, if the exact element cannot be found.\n * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'.\n *\n * and an object is returned with the following properties:\n *\n * - line: The line number in the generated source, or null. The\n * line number is 1-based.\n * - column: The column number in the generated source, or null.\n * The column number is 0-based.\n */\nBasicSourceMapConsumer.prototype.generatedPositionFor =\n function SourceMapConsumer_generatedPositionFor(aArgs) {\n var source = util.getArg(aArgs, 'source');\n source = this._findSourceIndex(source);\n if (source < 0) {\n return {\n line: null,\n column: null,\n lastColumn: null\n };\n }\n\n var needle = {\n source: source,\n originalLine: util.getArg(aArgs, 'line'),\n originalColumn: util.getArg(aArgs, 'column')\n };\n\n var index = this._findMapping(\n needle,\n this._originalMappings,\n \"originalLine\",\n \"originalColumn\",\n util.compareByOriginalPositions,\n util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)\n );\n\n if (index >= 0) {\n var mapping = this._originalMappings[index];\n\n if (mapping.source === needle.source) {\n return {\n line: util.getArg(mapping, 'generatedLine', null),\n column: util.getArg(mapping, 'generatedColumn', null),\n lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)\n };\n }\n }\n\n return {\n line: null,\n column: null,\n lastColumn: null\n };\n };\n\nexports.BasicSourceMapConsumer = BasicSourceMapConsumer;\n\n/**\n * An IndexedSourceMapConsumer instance represents a parsed source map which\n * we can query for information. It differs from BasicSourceMapConsumer in\n * that it takes \"indexed\" source maps (i.e. ones with a \"sections\" field) as\n * input.\n *\n * The first parameter is a raw source map (either as a JSON string, or already\n * parsed to an object). According to the spec for indexed source maps, they\n * have the following attributes:\n *\n * - version: Which version of the source map spec this map is following.\n * - file: Optional. The generated file this source map is associated with.\n * - sections: A list of section definitions.\n *\n * Each value under the \"sections\" field has two fields:\n * - offset: The offset into the original specified at which this section\n * begins to apply, defined as an object with a \"line\" and \"column\"\n * field.\n * - map: A source map definition. This source map could also be indexed,\n * but doesn't have to be.\n *\n * Instead of the \"map\" field, it's also possible to have a \"url\" field\n * specifying a URL to retrieve a source map from, but that's currently\n * unsupported.\n *\n * Here's an example source map, taken from the source map spec[0], but\n * modified to omit a section which uses the \"url\" field.\n *\n * {\n * version : 3,\n * file: \"app.js\",\n * sections: [{\n * offset: {line:100, column:10},\n * map: {\n * version : 3,\n * file: \"section.js\",\n * sources: [\"foo.js\", \"bar.js\"],\n * names: [\"src\", \"maps\", \"are\", \"fun\"],\n * mappings: \"AAAA,E;;ABCDE;\"\n * }\n * }],\n * }\n *\n * The second parameter, if given, is a string whose value is the URL\n * at which the source map was found. This URL is used to compute the\n * sources array.\n *\n * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt\n */\nfunction IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) {\n var sourceMap = aSourceMap;\n if (typeof aSourceMap === 'string') {\n sourceMap = util.parseSourceMapInput(aSourceMap);\n }\n\n var version = util.getArg(sourceMap, 'version');\n var sections = util.getArg(sourceMap, 'sections');\n\n if (version != this._version) {\n throw new Error('Unsupported version: ' + version);\n }\n\n this._sources = new ArraySet();\n this._names = new ArraySet();\n\n var lastOffset = {\n line: -1,\n column: 0\n };\n this._sections = sections.map(function (s) {\n if (s.url) {\n // The url field will require support for asynchronicity.\n // See https://github.com/mozilla/source-map/issues/16\n throw new Error('Support for url field in sections not implemented.');\n }\n var offset = util.getArg(s, 'offset');\n var offsetLine = util.getArg(offset, 'line');\n var offsetColumn = util.getArg(offset, 'column');\n\n if (offsetLine < lastOffset.line ||\n (offsetLine === lastOffset.line && offsetColumn < lastOffset.column)) {\n throw new Error('Section offsets must be ordered and non-overlapping.');\n }\n lastOffset = offset;\n\n return {\n generatedOffset: {\n // The offset fields are 0-based, but we use 1-based indices when\n // encoding/decoding from VLQ.\n generatedLine: offsetLine + 1,\n generatedColumn: offsetColumn + 1\n },\n consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL)\n }\n });\n}\n\nIndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype);\nIndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer;\n\n/**\n * The version of the source mapping spec that we are consuming.\n */\nIndexedSourceMapConsumer.prototype._version = 3;\n\n/**\n * The list of original sources.\n */\nObject.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', {\n get: function () {\n var sources = [];\n for (var i = 0; i < this._sections.length; i++) {\n for (var j = 0; j < this._sections[i].consumer.sources.length; j++) {\n sources.push(this._sections[i].consumer.sources[j]);\n }\n }\n return sources;\n }\n});\n\n/**\n * Returns the original source, line, and column information for the generated\n * source's line and column positions provided. The only argument is an object\n * with the following properties:\n *\n * - line: The line number in the generated source. The line number\n * is 1-based.\n * - column: The column number in the generated source. The column\n * number is 0-based.\n *\n * and an object is returned with the following properties:\n *\n * - source: The original source file, or null.\n * - line: The line number in the original source, or null. The\n * line number is 1-based.\n * - column: The column number in the original source, or null. The\n * column number is 0-based.\n * - name: The original identifier, or null.\n */\nIndexedSourceMapConsumer.prototype.originalPositionFor =\n function IndexedSourceMapConsumer_originalPositionFor(aArgs) {\n var needle = {\n generatedLine: util.getArg(aArgs, 'line'),\n generatedColumn: util.getArg(aArgs, 'column')\n };\n\n // Find the section containing the generated position we're trying to map\n // to an original position.\n var sectionIndex = binarySearch.search(needle, this._sections,\n function(needle, section) {\n var cmp = needle.generatedLine - section.generatedOffset.generatedLine;\n if (cmp) {\n return cmp;\n }\n\n return (needle.generatedColumn -\n section.generatedOffset.generatedColumn);\n });\n var section = this._sections[sectionIndex];\n\n if (!section) {\n return {\n source: null,\n line: null,\n column: null,\n name: null\n };\n }\n\n return section.consumer.originalPositionFor({\n line: needle.generatedLine -\n (section.generatedOffset.generatedLine - 1),\n column: needle.generatedColumn -\n (section.generatedOffset.generatedLine === needle.generatedLine\n ? section.generatedOffset.generatedColumn - 1\n : 0),\n bias: aArgs.bias\n });\n };\n\n/**\n * Return true if we have the source content for every source in the source\n * map, false otherwise.\n */\nIndexedSourceMapConsumer.prototype.hasContentsOfAllSources =\n function IndexedSourceMapConsumer_hasContentsOfAllSources() {\n return this._sections.every(function (s) {\n return s.consumer.hasContentsOfAllSources();\n });\n };\n\n/**\n * Returns the original source content. The only argument is the url of the\n * original source file. Returns null if no original source content is\n * available.\n */\nIndexedSourceMapConsumer.prototype.sourceContentFor =\n function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) {\n for (var i = 0; i < this._sections.length; i++) {\n var section = this._sections[i];\n\n var content = section.consumer.sourceContentFor(aSource, true);\n if (content) {\n return content;\n }\n }\n if (nullOnMissing) {\n return null;\n }\n else {\n throw new Error('\"' + aSource + '\" is not in the SourceMap.');\n }\n };\n\n/**\n * Returns the generated line and column information for the original source,\n * line, and column positions provided. The only argument is an object with\n * the following properties:\n *\n * - source: The filename of the original source.\n * - line: The line number in the original source. The line number\n * is 1-based.\n * - column: The column number in the original source. The column\n * number is 0-based.\n *\n * and an object is returned with the following properties:\n *\n * - line: The line number in the generated source, or null. The\n * line number is 1-based. \n * - column: The column number in the generated source, or null.\n * The column number is 0-based.\n */\nIndexedSourceMapConsumer.prototype.generatedPositionFor =\n function IndexedSourceMapConsumer_generatedPositionFor(aArgs) {\n for (var i = 0; i < this._sections.length; i++) {\n var section = this._sections[i];\n\n // Only consider this section if the requested source is in the list of\n // sources of the consumer.\n if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) {\n continue;\n }\n var generatedPosition = section.consumer.generatedPositionFor(aArgs);\n if (generatedPosition) {\n var ret = {\n line: generatedPosition.line +\n (section.generatedOffset.generatedLine - 1),\n column: generatedPosition.column +\n (section.generatedOffset.generatedLine === generatedPosition.line\n ? section.generatedOffset.generatedColumn - 1\n : 0)\n };\n return ret;\n }\n }\n\n return {\n line: null,\n column: null\n };\n };\n\n/**\n * Parse the mappings in a string in to a data structure which we can easily\n * query (the ordered arrays in the `this.__generatedMappings` and\n * `this.__originalMappings` properties).\n */\nIndexedSourceMapConsumer.prototype._parseMappings =\n function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) {\n this.__generatedMappings = [];\n this.__originalMappings = [];\n for (var i = 0; i < this._sections.length; i++) {\n var section = this._sections[i];\n var sectionMappings = section.consumer._generatedMappings;\n for (var j = 0; j < sectionMappings.length; j++) {\n var mapping = sectionMappings[j];\n\n var source = section.consumer._sources.at(mapping.source);\n source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL);\n this._sources.add(source);\n source = this._sources.indexOf(source);\n\n var name = null;\n if (mapping.name) {\n name = section.consumer._names.at(mapping.name);\n this._names.add(name);\n name = this._names.indexOf(name);\n }\n\n // The mappings coming from the consumer for the section have\n // generated positions relative to the start of the section, so we\n // need to offset them to be relative to the start of the concatenated\n // generated file.\n var adjustedMapping = {\n source: source,\n generatedLine: mapping.generatedLine +\n (section.generatedOffset.generatedLine - 1),\n generatedColumn: mapping.generatedColumn +\n (section.generatedOffset.generatedLine === mapping.generatedLine\n ? section.generatedOffset.generatedColumn - 1\n : 0),\n originalLine: mapping.originalLine,\n originalColumn: mapping.originalColumn,\n name: name\n };\n\n this.__generatedMappings.push(adjustedMapping);\n if (typeof adjustedMapping.originalLine === 'number') {\n this.__originalMappings.push(adjustedMapping);\n }\n }\n }\n\n quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated);\n quickSort(this.__originalMappings, util.compareByOriginalPositions);\n };\n\nexports.IndexedSourceMapConsumer = IndexedSourceMapConsumer;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/source-map-consumer.js\n// module id = 7\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nexports.GREATEST_LOWER_BOUND = 1;\nexports.LEAST_UPPER_BOUND = 2;\n\n/**\n * Recursive implementation of binary search.\n *\n * @param aLow Indices here and lower do not contain the needle.\n * @param aHigh Indices here and higher do not contain the needle.\n * @param aNeedle The element being searched for.\n * @param aHaystack The non-empty array being searched.\n * @param aCompare Function which takes two elements and returns -1, 0, or 1.\n * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or\n * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the\n * closest element that is smaller than or greater than the one we are\n * searching for, respectively, if the exact element cannot be found.\n */\nfunction recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) {\n // This function terminates when one of the following is true:\n //\n // 1. We find the exact element we are looking for.\n //\n // 2. We did not find the exact element, but we can return the index of\n // the next-closest element.\n //\n // 3. We did not find the exact element, and there is no next-closest\n // element than the one we are searching for, so we return -1.\n var mid = Math.floor((aHigh - aLow) / 2) + aLow;\n var cmp = aCompare(aNeedle, aHaystack[mid], true);\n if (cmp === 0) {\n // Found the element we are looking for.\n return mid;\n }\n else if (cmp > 0) {\n // Our needle is greater than aHaystack[mid].\n if (aHigh - mid > 1) {\n // The element is in the upper half.\n return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias);\n }\n\n // The exact needle element was not found in this haystack. Determine if\n // we are in termination case (3) or (2) and return the appropriate thing.\n if (aBias == exports.LEAST_UPPER_BOUND) {\n return aHigh < aHaystack.length ? aHigh : -1;\n } else {\n return mid;\n }\n }\n else {\n // Our needle is less than aHaystack[mid].\n if (mid - aLow > 1) {\n // The element is in the lower half.\n return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias);\n }\n\n // we are in termination case (3) or (2) and return the appropriate thing.\n if (aBias == exports.LEAST_UPPER_BOUND) {\n return mid;\n } else {\n return aLow < 0 ? -1 : aLow;\n }\n }\n}\n\n/**\n * This is an implementation of binary search which will always try and return\n * the index of the closest element if there is no exact hit. This is because\n * mappings between original and generated line/col pairs are single points,\n * and there is an implicit region between each of them, so a miss just means\n * that you aren't on the very start of a region.\n *\n * @param aNeedle The element you are looking for.\n * @param aHaystack The array that is being searched.\n * @param aCompare A function which takes the needle and an element in the\n * array and returns -1, 0, or 1 depending on whether the needle is less\n * than, equal to, or greater than the element, respectively.\n * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or\n * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the\n * closest element that is smaller than or greater than the one we are\n * searching for, respectively, if the exact element cannot be found.\n * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'.\n */\nexports.search = function search(aNeedle, aHaystack, aCompare, aBias) {\n if (aHaystack.length === 0) {\n return -1;\n }\n\n var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack,\n aCompare, aBias || exports.GREATEST_LOWER_BOUND);\n if (index < 0) {\n return -1;\n }\n\n // We have found either the exact element, or the next-closest element than\n // the one we are searching for. However, there may be more than one such\n // element. Make sure we always return the smallest of these.\n while (index - 1 >= 0) {\n if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) {\n break;\n }\n --index;\n }\n\n return index;\n};\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/binary-search.js\n// module id = 8\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\n// It turns out that some (most?) JavaScript engines don't self-host\n// `Array.prototype.sort`. This makes sense because C++ will likely remain\n// faster than JS when doing raw CPU-intensive sorting. However, when using a\n// custom comparator function, calling back and forth between the VM's C++ and\n// JIT'd JS is rather slow *and* loses JIT type information, resulting in\n// worse generated code for the comparator function than would be optimal. In\n// fact, when sorting with a comparator, these costs outweigh the benefits of\n// sorting in C++. By using our own JS-implemented Quick Sort (below), we get\n// a ~3500ms mean speed-up in `bench/bench.html`.\n\n/**\n * Swap the elements indexed by `x` and `y` in the array `ary`.\n *\n * @param {Array} ary\n * The array.\n * @param {Number} x\n * The index of the first item.\n * @param {Number} y\n * The index of the second item.\n */\nfunction swap(ary, x, y) {\n var temp = ary[x];\n ary[x] = ary[y];\n ary[y] = temp;\n}\n\n/**\n * Returns a random integer within the range `low .. high` inclusive.\n *\n * @param {Number} low\n * The lower bound on the range.\n * @param {Number} high\n * The upper bound on the range.\n */\nfunction randomIntInRange(low, high) {\n return Math.round(low + (Math.random() * (high - low)));\n}\n\n/**\n * The Quick Sort algorithm.\n *\n * @param {Array} ary\n * An array to sort.\n * @param {function} comparator\n * Function to use to compare two items.\n * @param {Number} p\n * Start index of the array\n * @param {Number} r\n * End index of the array\n */\nfunction doQuickSort(ary, comparator, p, r) {\n // If our lower bound is less than our upper bound, we (1) partition the\n // array into two pieces and (2) recurse on each half. If it is not, this is\n // the empty array and our base case.\n\n if (p < r) {\n // (1) Partitioning.\n //\n // The partitioning chooses a pivot between `p` and `r` and moves all\n // elements that are less than or equal to the pivot to the before it, and\n // all the elements that are greater than it after it. The effect is that\n // once partition is done, the pivot is in the exact place it will be when\n // the array is put in sorted order, and it will not need to be moved\n // again. This runs in O(n) time.\n\n // Always choose a random pivot so that an input array which is reverse\n // sorted does not cause O(n^2) running time.\n var pivotIndex = randomIntInRange(p, r);\n var i = p - 1;\n\n swap(ary, pivotIndex, r);\n var pivot = ary[r];\n\n // Immediately after `j` is incremented in this loop, the following hold\n // true:\n //\n // * Every element in `ary[p .. i]` is less than or equal to the pivot.\n //\n // * Every element in `ary[i+1 .. j-1]` is greater than the pivot.\n for (var j = p; j < r; j++) {\n if (comparator(ary[j], pivot) <= 0) {\n i += 1;\n swap(ary, i, j);\n }\n }\n\n swap(ary, i + 1, j);\n var q = i + 1;\n\n // (2) Recurse on each half.\n\n doQuickSort(ary, comparator, p, q - 1);\n doQuickSort(ary, comparator, q + 1, r);\n }\n}\n\n/**\n * Sort the given array in-place with the given comparator function.\n *\n * @param {Array} ary\n * An array to sort.\n * @param {function} comparator\n * Function to use to compare two items.\n */\nexports.quickSort = function (ary, comparator) {\n doQuickSort(ary, comparator, 0, ary.length - 1);\n};\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/quick-sort.js\n// module id = 9\n// module chunks = 0","/* -*- Mode: js; js-indent-level: 2; -*- */\n/*\n * Copyright 2011 Mozilla Foundation and contributors\n * Licensed under the New BSD license. See LICENSE or:\n * http://opensource.org/licenses/BSD-3-Clause\n */\n\nvar SourceMapGenerator = require('./source-map-generator').SourceMapGenerator;\nvar util = require('./util');\n\n// Matches a Windows-style `\\r\\n` newline or a `\\n` newline used by all other\n// operating systems these days (capturing the result).\nvar REGEX_NEWLINE = /(\\r?\\n)/;\n\n// Newline character code for charCodeAt() comparisons\nvar NEWLINE_CODE = 10;\n\n// Private symbol for identifying `SourceNode`s when multiple versions of\n// the source-map library are loaded. This MUST NOT CHANGE across\n// versions!\nvar isSourceNode = \"$$$isSourceNode$$$\";\n\n/**\n * SourceNodes provide a way to abstract over interpolating/concatenating\n * snippets of generated JavaScript source code while maintaining the line and\n * column information associated with the original source code.\n *\n * @param aLine The original line number.\n * @param aColumn The original column number.\n * @param aSource The original source's filename.\n * @param aChunks Optional. An array of strings which are snippets of\n * generated JS, or other SourceNodes.\n * @param aName The original identifier.\n */\nfunction SourceNode(aLine, aColumn, aSource, aChunks, aName) {\n this.children = [];\n this.sourceContents = {};\n this.line = aLine == null ? null : aLine;\n this.column = aColumn == null ? null : aColumn;\n this.source = aSource == null ? null : aSource;\n this.name = aName == null ? null : aName;\n this[isSourceNode] = true;\n if (aChunks != null) this.add(aChunks);\n}\n\n/**\n * Creates a SourceNode from generated code and a SourceMapConsumer.\n *\n * @param aGeneratedCode The generated code\n * @param aSourceMapConsumer The SourceMap for the generated code\n * @param aRelativePath Optional. The path that relative sources in the\n * SourceMapConsumer should be relative to.\n */\nSourceNode.fromStringWithSourceMap =\n function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) {\n // The SourceNode we want to fill with the generated code\n // and the SourceMap\n var node = new SourceNode();\n\n // All even indices of this array are one line of the generated code,\n // while all odd indices are the newlines between two adjacent lines\n // (since `REGEX_NEWLINE` captures its match).\n // Processed fragments are accessed by calling `shiftNextLine`.\n var remainingLines = aGeneratedCode.split(REGEX_NEWLINE);\n var remainingLinesIndex = 0;\n var shiftNextLine = function() {\n var lineContents = getNextLine();\n // The last line of a file might not have a newline.\n var newLine = getNextLine() || \"\";\n return lineContents + newLine;\n\n function getNextLine() {\n return remainingLinesIndex < remainingLines.length ?\n remainingLines[remainingLinesIndex++] : undefined;\n }\n };\n\n // We need to remember the position of \"remainingLines\"\n var lastGeneratedLine = 1, lastGeneratedColumn = 0;\n\n // The generate SourceNodes we need a code range.\n // To extract it current and last mapping is used.\n // Here we store the last mapping.\n var lastMapping = null;\n\n aSourceMapConsumer.eachMapping(function (mapping) {\n if (lastMapping !== null) {\n // We add the code from \"lastMapping\" to \"mapping\":\n // First check if there is a new line in between.\n if (lastGeneratedLine < mapping.generatedLine) {\n // Associate first line with \"lastMapping\"\n addMappingWithCode(lastMapping, shiftNextLine());\n lastGeneratedLine++;\n lastGeneratedColumn = 0;\n // The remaining code is added without mapping\n } else {\n // There is no new line in between.\n // Associate the code between \"lastGeneratedColumn\" and\n // \"mapping.generatedColumn\" with \"lastMapping\"\n var nextLine = remainingLines[remainingLinesIndex] || '';\n var code = nextLine.substr(0, mapping.generatedColumn -\n lastGeneratedColumn);\n remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn -\n lastGeneratedColumn);\n lastGeneratedColumn = mapping.generatedColumn;\n addMappingWithCode(lastMapping, code);\n // No more remaining code, continue\n lastMapping = mapping;\n return;\n }\n }\n // We add the generated code until the first mapping\n // to the SourceNode without any mapping.\n // Each line is added as separate string.\n while (lastGeneratedLine < mapping.generatedLine) {\n node.add(shiftNextLine());\n lastGeneratedLine++;\n }\n if (lastGeneratedColumn < mapping.generatedColumn) {\n var nextLine = remainingLines[remainingLinesIndex] || '';\n node.add(nextLine.substr(0, mapping.generatedColumn));\n remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn);\n lastGeneratedColumn = mapping.generatedColumn;\n }\n lastMapping = mapping;\n }, this);\n // We have processed all mappings.\n if (remainingLinesIndex < remainingLines.length) {\n if (lastMapping) {\n // Associate the remaining code in the current line with \"lastMapping\"\n addMappingWithCode(lastMapping, shiftNextLine());\n }\n // and add the remaining lines without any mapping\n node.add(remainingLines.splice(remainingLinesIndex).join(\"\"));\n }\n\n // Copy sourcesContent into SourceNode\n aSourceMapConsumer.sources.forEach(function (sourceFile) {\n var content = aSourceMapConsumer.sourceContentFor(sourceFile);\n if (content != null) {\n if (aRelativePath != null) {\n sourceFile = util.join(aRelativePath, sourceFile);\n }\n node.setSourceContent(sourceFile, content);\n }\n });\n\n return node;\n\n function addMappingWithCode(mapping, code) {\n if (mapping === null || mapping.source === undefined) {\n node.add(code);\n } else {\n var source = aRelativePath\n ? util.join(aRelativePath, mapping.source)\n : mapping.source;\n node.add(new SourceNode(mapping.originalLine,\n mapping.originalColumn,\n source,\n code,\n mapping.name));\n }\n }\n };\n\n/**\n * Add a chunk of generated JS to this source node.\n *\n * @param aChunk A string snippet of generated JS code, another instance of\n * SourceNode, or an array where each member is one of those things.\n */\nSourceNode.prototype.add = function SourceNode_add(aChunk) {\n if (Array.isArray(aChunk)) {\n aChunk.forEach(function (chunk) {\n this.add(chunk);\n }, this);\n }\n else if (aChunk[isSourceNode] || typeof aChunk === \"string\") {\n if (aChunk) {\n this.children.push(aChunk);\n }\n }\n else {\n throw new TypeError(\n \"Expected a SourceNode, string, or an array of SourceNodes and strings. Got \" + aChunk\n );\n }\n return this;\n};\n\n/**\n * Add a chunk of generated JS to the beginning of this source node.\n *\n * @param aChunk A string snippet of generated JS code, another instance of\n * SourceNode, or an array where each member is one of those things.\n */\nSourceNode.prototype.prepend = function SourceNode_prepend(aChunk) {\n if (Array.isArray(aChunk)) {\n for (var i = aChunk.length-1; i >= 0; i--) {\n this.prepend(aChunk[i]);\n }\n }\n else if (aChunk[isSourceNode] || typeof aChunk === \"string\") {\n this.children.unshift(aChunk);\n }\n else {\n throw new TypeError(\n \"Expected a SourceNode, string, or an array of SourceNodes and strings. Got \" + aChunk\n );\n }\n return this;\n};\n\n/**\n * Walk over the tree of JS snippets in this node and its children. The\n * walking function is called once for each snippet of JS and is passed that\n * snippet and the its original associated source's line/column location.\n *\n * @param aFn The traversal function.\n */\nSourceNode.prototype.walk = function SourceNode_walk(aFn) {\n var chunk;\n for (var i = 0, len = this.children.length; i < len; i++) {\n chunk = this.children[i];\n if (chunk[isSourceNode]) {\n chunk.walk(aFn);\n }\n else {\n if (chunk !== '') {\n aFn(chunk, { source: this.source,\n line: this.line,\n column: this.column,\n name: this.name });\n }\n }\n }\n};\n\n/**\n * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between\n * each of `this.children`.\n *\n * @param aSep The separator.\n */\nSourceNode.prototype.join = function SourceNode_join(aSep) {\n var newChildren;\n var i;\n var len = this.children.length;\n if (len > 0) {\n newChildren = [];\n for (i = 0; i < len-1; i++) {\n newChildren.push(this.children[i]);\n newChildren.push(aSep);\n }\n newChildren.push(this.children[i]);\n this.children = newChildren;\n }\n return this;\n};\n\n/**\n * Call String.prototype.replace on the very right-most source snippet. Useful\n * for trimming whitespace from the end of a source node, etc.\n *\n * @param aPattern The pattern to replace.\n * @param aReplacement The thing to replace the pattern with.\n */\nSourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) {\n var lastChild = this.children[this.children.length - 1];\n if (lastChild[isSourceNode]) {\n lastChild.replaceRight(aPattern, aReplacement);\n }\n else if (typeof lastChild === 'string') {\n this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement);\n }\n else {\n this.children.push(''.replace(aPattern, aReplacement));\n }\n return this;\n};\n\n/**\n * Set the source content for a source file. This will be added to the SourceMapGenerator\n * in the sourcesContent field.\n *\n * @param aSourceFile The filename of the source file\n * @param aSourceContent The content of the source file\n */\nSourceNode.prototype.setSourceContent =\n function SourceNode_setSourceContent(aSourceFile, aSourceContent) {\n this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent;\n };\n\n/**\n * Walk over the tree of SourceNodes. The walking function is called for each\n * source file content and is passed the filename and source content.\n *\n * @param aFn The traversal function.\n */\nSourceNode.prototype.walkSourceContents =\n function SourceNode_walkSourceContents(aFn) {\n for (var i = 0, len = this.children.length; i < len; i++) {\n if (this.children[i][isSourceNode]) {\n this.children[i].walkSourceContents(aFn);\n }\n }\n\n var sources = Object.keys(this.sourceContents);\n for (var i = 0, len = sources.length; i < len; i++) {\n aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]);\n }\n };\n\n/**\n * Return the string representation of this source node. Walks over the tree\n * and concatenates all the various snippets together to one string.\n */\nSourceNode.prototype.toString = function SourceNode_toString() {\n var str = \"\";\n this.walk(function (chunk) {\n str += chunk;\n });\n return str;\n};\n\n/**\n * Returns the string representation of this source node along with a source\n * map.\n */\nSourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) {\n var generated = {\n code: \"\",\n line: 1,\n column: 0\n };\n var map = new SourceMapGenerator(aArgs);\n var sourceMappingActive = false;\n var lastOriginalSource = null;\n var lastOriginalLine = null;\n var lastOriginalColumn = null;\n var lastOriginalName = null;\n this.walk(function (chunk, original) {\n generated.code += chunk;\n if (original.source !== null\n && original.line !== null\n && original.column !== null) {\n if(lastOriginalSource !== original.source\n || lastOriginalLine !== original.line\n || lastOriginalColumn !== original.column\n || lastOriginalName !== original.name) {\n map.addMapping({\n source: original.source,\n original: {\n line: original.line,\n column: original.column\n },\n generated: {\n line: generated.line,\n column: generated.column\n },\n name: original.name\n });\n }\n lastOriginalSource = original.source;\n lastOriginalLine = original.line;\n lastOriginalColumn = original.column;\n lastOriginalName = original.name;\n sourceMappingActive = true;\n } else if (sourceMappingActive) {\n map.addMapping({\n generated: {\n line: generated.line,\n column: generated.column\n }\n });\n lastOriginalSource = null;\n sourceMappingActive = false;\n }\n for (var idx = 0, length = chunk.length; idx < length; idx++) {\n if (chunk.charCodeAt(idx) === NEWLINE_CODE) {\n generated.line++;\n generated.column = 0;\n // Mappings end at eol\n if (idx + 1 === length) {\n lastOriginalSource = null;\n sourceMappingActive = false;\n } else if (sourceMappingActive) {\n map.addMapping({\n source: original.source,\n original: {\n line: original.line,\n column: original.column\n },\n generated: {\n line: generated.line,\n column: generated.column\n },\n name: original.name\n });\n }\n } else {\n generated.column++;\n }\n }\n });\n this.walkSourceContents(function (sourceFile, sourceContent) {\n map.setSourceContent(sourceFile, sourceContent);\n });\n\n return { code: generated.code, map: map };\n};\n\nexports.SourceNode = SourceNode;\n\n\n\n//////////////////\n// WEBPACK FOOTER\n// ./lib/source-node.js\n// module id = 10\n// module chunks = 0"],"sourceRoot":""}
\ No newline at end of file
diff --git a/node_modules/source-map/lib/array-set.js b/node_modules/source-map/lib/array-set.js
deleted file mode 100644
index fbd5c81cae66fa6401f871ac7fb02e96fdb9c213..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/array-set.js
+++ /dev/null
@@ -1,121 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-var util = require('./util');
-var has = Object.prototype.hasOwnProperty;
-var hasNativeMap = typeof Map !== "undefined";
-
-/**
- * A data structure which is a combination of an array and a set. Adding a new
- * member is O(1), testing for membership is O(1), and finding the index of an
- * element is O(1). Removing elements from the set is not supported. Only
- * strings are supported for membership.
- */
-function ArraySet() {
- this._array = [];
- this._set = hasNativeMap ? new Map() : Object.create(null);
-}
-
-/**
- * Static method for creating ArraySet instances from an existing array.
- */
-ArraySet.fromArray = function ArraySet_fromArray(aArray, aAllowDuplicates) {
- var set = new ArraySet();
- for (var i = 0, len = aArray.length; i < len; i++) {
- set.add(aArray[i], aAllowDuplicates);
- }
- return set;
-};
-
-/**
- * Return how many unique items are in this ArraySet. If duplicates have been
- * added, than those do not count towards the size.
- *
- * @returns Number
- */
-ArraySet.prototype.size = function ArraySet_size() {
- return hasNativeMap ? this._set.size : Object.getOwnPropertyNames(this._set).length;
-};
-
-/**
- * Add the given string to this set.
- *
- * @param String aStr
- */
-ArraySet.prototype.add = function ArraySet_add(aStr, aAllowDuplicates) {
- var sStr = hasNativeMap ? aStr : util.toSetString(aStr);
- var isDuplicate = hasNativeMap ? this.has(aStr) : has.call(this._set, sStr);
- var idx = this._array.length;
- if (!isDuplicate || aAllowDuplicates) {
- this._array.push(aStr);
- }
- if (!isDuplicate) {
- if (hasNativeMap) {
- this._set.set(aStr, idx);
- } else {
- this._set[sStr] = idx;
- }
- }
-};
-
-/**
- * Is the given string a member of this set?
- *
- * @param String aStr
- */
-ArraySet.prototype.has = function ArraySet_has(aStr) {
- if (hasNativeMap) {
- return this._set.has(aStr);
- } else {
- var sStr = util.toSetString(aStr);
- return has.call(this._set, sStr);
- }
-};
-
-/**
- * What is the index of the given string in the array?
- *
- * @param String aStr
- */
-ArraySet.prototype.indexOf = function ArraySet_indexOf(aStr) {
- if (hasNativeMap) {
- var idx = this._set.get(aStr);
- if (idx >= 0) {
- return idx;
- }
- } else {
- var sStr = util.toSetString(aStr);
- if (has.call(this._set, sStr)) {
- return this._set[sStr];
- }
- }
-
- throw new Error('"' + aStr + '" is not in the set.');
-};
-
-/**
- * What is the element at the given index?
- *
- * @param Number aIdx
- */
-ArraySet.prototype.at = function ArraySet_at(aIdx) {
- if (aIdx >= 0 && aIdx < this._array.length) {
- return this._array[aIdx];
- }
- throw new Error('No element indexed by ' + aIdx);
-};
-
-/**
- * Returns the array representation of this set (which has the proper indices
- * indicated by indexOf). Note that this is a copy of the internal array used
- * for storing the members so that no one can mess with internal state.
- */
-ArraySet.prototype.toArray = function ArraySet_toArray() {
- return this._array.slice();
-};
-
-exports.ArraySet = ArraySet;
diff --git a/node_modules/source-map/lib/base64-vlq.js b/node_modules/source-map/lib/base64-vlq.js
deleted file mode 100644
index 612b404018ece911ab71fc0a8db326d16e6b1287..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/base64-vlq.js
+++ /dev/null
@@ -1,140 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- *
- * Based on the Base 64 VLQ implementation in Closure Compiler:
- * https://code.google.com/p/closure-compiler/source/browse/trunk/src/com/google/debugging/sourcemap/Base64VLQ.java
- *
- * Copyright 2011 The Closure Compiler Authors. All rights reserved.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials provided
- * with the distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-var base64 = require('./base64');
-
-// A single base 64 digit can contain 6 bits of data. For the base 64 variable
-// length quantities we use in the source map spec, the first bit is the sign,
-// the next four bits are the actual value, and the 6th bit is the
-// continuation bit. The continuation bit tells us whether there are more
-// digits in this value following this digit.
-//
-// Continuation
-// | Sign
-// | |
-// V V
-// 101011
-
-var VLQ_BASE_SHIFT = 5;
-
-// binary: 100000
-var VLQ_BASE = 1 << VLQ_BASE_SHIFT;
-
-// binary: 011111
-var VLQ_BASE_MASK = VLQ_BASE - 1;
-
-// binary: 100000
-var VLQ_CONTINUATION_BIT = VLQ_BASE;
-
-/**
- * Converts from a two-complement value to a value where the sign bit is
- * placed in the least significant bit. For example, as decimals:
- * 1 becomes 2 (10 binary), -1 becomes 3 (11 binary)
- * 2 becomes 4 (100 binary), -2 becomes 5 (101 binary)
- */
-function toVLQSigned(aValue) {
- return aValue < 0
- ? ((-aValue) << 1) + 1
- : (aValue << 1) + 0;
-}
-
-/**
- * Converts to a two-complement value from a value where the sign bit is
- * placed in the least significant bit. For example, as decimals:
- * 2 (10 binary) becomes 1, 3 (11 binary) becomes -1
- * 4 (100 binary) becomes 2, 5 (101 binary) becomes -2
- */
-function fromVLQSigned(aValue) {
- var isNegative = (aValue & 1) === 1;
- var shifted = aValue >> 1;
- return isNegative
- ? -shifted
- : shifted;
-}
-
-/**
- * Returns the base 64 VLQ encoded value.
- */
-exports.encode = function base64VLQ_encode(aValue) {
- var encoded = "";
- var digit;
-
- var vlq = toVLQSigned(aValue);
-
- do {
- digit = vlq & VLQ_BASE_MASK;
- vlq >>>= VLQ_BASE_SHIFT;
- if (vlq > 0) {
- // There are still more digits in this value, so we must make sure the
- // continuation bit is marked.
- digit |= VLQ_CONTINUATION_BIT;
- }
- encoded += base64.encode(digit);
- } while (vlq > 0);
-
- return encoded;
-};
-
-/**
- * Decodes the next base 64 VLQ value from the given string and returns the
- * value and the rest of the string via the out parameter.
- */
-exports.decode = function base64VLQ_decode(aStr, aIndex, aOutParam) {
- var strLen = aStr.length;
- var result = 0;
- var shift = 0;
- var continuation, digit;
-
- do {
- if (aIndex >= strLen) {
- throw new Error("Expected more digits in base 64 VLQ value.");
- }
-
- digit = base64.decode(aStr.charCodeAt(aIndex++));
- if (digit === -1) {
- throw new Error("Invalid base64 digit: " + aStr.charAt(aIndex - 1));
- }
-
- continuation = !!(digit & VLQ_CONTINUATION_BIT);
- digit &= VLQ_BASE_MASK;
- result = result + (digit << shift);
- shift += VLQ_BASE_SHIFT;
- } while (continuation);
-
- aOutParam.value = fromVLQSigned(result);
- aOutParam.rest = aIndex;
-};
diff --git a/node_modules/source-map/lib/base64.js b/node_modules/source-map/lib/base64.js
deleted file mode 100644
index 8aa86b30264363990334a7df0aa0d0c9cc1aecfc..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/base64.js
+++ /dev/null
@@ -1,67 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-var intToCharMap = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'.split('');
-
-/**
- * Encode an integer in the range of 0 to 63 to a single base 64 digit.
- */
-exports.encode = function (number) {
- if (0 <= number && number < intToCharMap.length) {
- return intToCharMap[number];
- }
- throw new TypeError("Must be between 0 and 63: " + number);
-};
-
-/**
- * Decode a single base 64 character code digit to an integer. Returns -1 on
- * failure.
- */
-exports.decode = function (charCode) {
- var bigA = 65; // 'A'
- var bigZ = 90; // 'Z'
-
- var littleA = 97; // 'a'
- var littleZ = 122; // 'z'
-
- var zero = 48; // '0'
- var nine = 57; // '9'
-
- var plus = 43; // '+'
- var slash = 47; // '/'
-
- var littleOffset = 26;
- var numberOffset = 52;
-
- // 0 - 25: ABCDEFGHIJKLMNOPQRSTUVWXYZ
- if (bigA <= charCode && charCode <= bigZ) {
- return (charCode - bigA);
- }
-
- // 26 - 51: abcdefghijklmnopqrstuvwxyz
- if (littleA <= charCode && charCode <= littleZ) {
- return (charCode - littleA + littleOffset);
- }
-
- // 52 - 61: 0123456789
- if (zero <= charCode && charCode <= nine) {
- return (charCode - zero + numberOffset);
- }
-
- // 62: +
- if (charCode == plus) {
- return 62;
- }
-
- // 63: /
- if (charCode == slash) {
- return 63;
- }
-
- // Invalid base64 digit.
- return -1;
-};
diff --git a/node_modules/source-map/lib/binary-search.js b/node_modules/source-map/lib/binary-search.js
deleted file mode 100644
index 010ac941e1568d59c89b67cb649051a14608ee79..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/binary-search.js
+++ /dev/null
@@ -1,111 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-exports.GREATEST_LOWER_BOUND = 1;
-exports.LEAST_UPPER_BOUND = 2;
-
-/**
- * Recursive implementation of binary search.
- *
- * @param aLow Indices here and lower do not contain the needle.
- * @param aHigh Indices here and higher do not contain the needle.
- * @param aNeedle The element being searched for.
- * @param aHaystack The non-empty array being searched.
- * @param aCompare Function which takes two elements and returns -1, 0, or 1.
- * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or
- * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the
- * closest element that is smaller than or greater than the one we are
- * searching for, respectively, if the exact element cannot be found.
- */
-function recursiveSearch(aLow, aHigh, aNeedle, aHaystack, aCompare, aBias) {
- // This function terminates when one of the following is true:
- //
- // 1. We find the exact element we are looking for.
- //
- // 2. We did not find the exact element, but we can return the index of
- // the next-closest element.
- //
- // 3. We did not find the exact element, and there is no next-closest
- // element than the one we are searching for, so we return -1.
- var mid = Math.floor((aHigh - aLow) / 2) + aLow;
- var cmp = aCompare(aNeedle, aHaystack[mid], true);
- if (cmp === 0) {
- // Found the element we are looking for.
- return mid;
- }
- else if (cmp > 0) {
- // Our needle is greater than aHaystack[mid].
- if (aHigh - mid > 1) {
- // The element is in the upper half.
- return recursiveSearch(mid, aHigh, aNeedle, aHaystack, aCompare, aBias);
- }
-
- // The exact needle element was not found in this haystack. Determine if
- // we are in termination case (3) or (2) and return the appropriate thing.
- if (aBias == exports.LEAST_UPPER_BOUND) {
- return aHigh < aHaystack.length ? aHigh : -1;
- } else {
- return mid;
- }
- }
- else {
- // Our needle is less than aHaystack[mid].
- if (mid - aLow > 1) {
- // The element is in the lower half.
- return recursiveSearch(aLow, mid, aNeedle, aHaystack, aCompare, aBias);
- }
-
- // we are in termination case (3) or (2) and return the appropriate thing.
- if (aBias == exports.LEAST_UPPER_BOUND) {
- return mid;
- } else {
- return aLow < 0 ? -1 : aLow;
- }
- }
-}
-
-/**
- * This is an implementation of binary search which will always try and return
- * the index of the closest element if there is no exact hit. This is because
- * mappings between original and generated line/col pairs are single points,
- * and there is an implicit region between each of them, so a miss just means
- * that you aren't on the very start of a region.
- *
- * @param aNeedle The element you are looking for.
- * @param aHaystack The array that is being searched.
- * @param aCompare A function which takes the needle and an element in the
- * array and returns -1, 0, or 1 depending on whether the needle is less
- * than, equal to, or greater than the element, respectively.
- * @param aBias Either 'binarySearch.GREATEST_LOWER_BOUND' or
- * 'binarySearch.LEAST_UPPER_BOUND'. Specifies whether to return the
- * closest element that is smaller than or greater than the one we are
- * searching for, respectively, if the exact element cannot be found.
- * Defaults to 'binarySearch.GREATEST_LOWER_BOUND'.
- */
-exports.search = function search(aNeedle, aHaystack, aCompare, aBias) {
- if (aHaystack.length === 0) {
- return -1;
- }
-
- var index = recursiveSearch(-1, aHaystack.length, aNeedle, aHaystack,
- aCompare, aBias || exports.GREATEST_LOWER_BOUND);
- if (index < 0) {
- return -1;
- }
-
- // We have found either the exact element, or the next-closest element than
- // the one we are searching for. However, there may be more than one such
- // element. Make sure we always return the smallest of these.
- while (index - 1 >= 0) {
- if (aCompare(aHaystack[index], aHaystack[index - 1], true) !== 0) {
- break;
- }
- --index;
- }
-
- return index;
-};
diff --git a/node_modules/source-map/lib/mapping-list.js b/node_modules/source-map/lib/mapping-list.js
deleted file mode 100644
index 06d1274a025a8a30879f31c9c6703a14f79f73b9..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/mapping-list.js
+++ /dev/null
@@ -1,79 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2014 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-var util = require('./util');
-
-/**
- * Determine whether mappingB is after mappingA with respect to generated
- * position.
- */
-function generatedPositionAfter(mappingA, mappingB) {
- // Optimized for most common case
- var lineA = mappingA.generatedLine;
- var lineB = mappingB.generatedLine;
- var columnA = mappingA.generatedColumn;
- var columnB = mappingB.generatedColumn;
- return lineB > lineA || lineB == lineA && columnB >= columnA ||
- util.compareByGeneratedPositionsInflated(mappingA, mappingB) <= 0;
-}
-
-/**
- * A data structure to provide a sorted view of accumulated mappings in a
- * performance conscious manner. It trades a neglibable overhead in general
- * case for a large speedup in case of mappings being added in order.
- */
-function MappingList() {
- this._array = [];
- this._sorted = true;
- // Serves as infimum
- this._last = {generatedLine: -1, generatedColumn: 0};
-}
-
-/**
- * Iterate through internal items. This method takes the same arguments that
- * `Array.prototype.forEach` takes.
- *
- * NOTE: The order of the mappings is NOT guaranteed.
- */
-MappingList.prototype.unsortedForEach =
- function MappingList_forEach(aCallback, aThisArg) {
- this._array.forEach(aCallback, aThisArg);
- };
-
-/**
- * Add the given source mapping.
- *
- * @param Object aMapping
- */
-MappingList.prototype.add = function MappingList_add(aMapping) {
- if (generatedPositionAfter(this._last, aMapping)) {
- this._last = aMapping;
- this._array.push(aMapping);
- } else {
- this._sorted = false;
- this._array.push(aMapping);
- }
-};
-
-/**
- * Returns the flat, sorted array of mappings. The mappings are sorted by
- * generated position.
- *
- * WARNING: This method returns internal data without copying, for
- * performance. The return value must NOT be mutated, and should be treated as
- * an immutable borrow. If you want to take ownership, you must make your own
- * copy.
- */
-MappingList.prototype.toArray = function MappingList_toArray() {
- if (!this._sorted) {
- this._array.sort(util.compareByGeneratedPositionsInflated);
- this._sorted = true;
- }
- return this._array;
-};
-
-exports.MappingList = MappingList;
diff --git a/node_modules/source-map/lib/quick-sort.js b/node_modules/source-map/lib/quick-sort.js
deleted file mode 100644
index 6a7caadbbdbea1865cfb947cb21fbf0c8da1289a..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/quick-sort.js
+++ /dev/null
@@ -1,114 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-// It turns out that some (most?) JavaScript engines don't self-host
-// `Array.prototype.sort`. This makes sense because C++ will likely remain
-// faster than JS when doing raw CPU-intensive sorting. However, when using a
-// custom comparator function, calling back and forth between the VM's C++ and
-// JIT'd JS is rather slow *and* loses JIT type information, resulting in
-// worse generated code for the comparator function than would be optimal. In
-// fact, when sorting with a comparator, these costs outweigh the benefits of
-// sorting in C++. By using our own JS-implemented Quick Sort (below), we get
-// a ~3500ms mean speed-up in `bench/bench.html`.
-
-/**
- * Swap the elements indexed by `x` and `y` in the array `ary`.
- *
- * @param {Array} ary
- * The array.
- * @param {Number} x
- * The index of the first item.
- * @param {Number} y
- * The index of the second item.
- */
-function swap(ary, x, y) {
- var temp = ary[x];
- ary[x] = ary[y];
- ary[y] = temp;
-}
-
-/**
- * Returns a random integer within the range `low .. high` inclusive.
- *
- * @param {Number} low
- * The lower bound on the range.
- * @param {Number} high
- * The upper bound on the range.
- */
-function randomIntInRange(low, high) {
- return Math.round(low + (Math.random() * (high - low)));
-}
-
-/**
- * The Quick Sort algorithm.
- *
- * @param {Array} ary
- * An array to sort.
- * @param {function} comparator
- * Function to use to compare two items.
- * @param {Number} p
- * Start index of the array
- * @param {Number} r
- * End index of the array
- */
-function doQuickSort(ary, comparator, p, r) {
- // If our lower bound is less than our upper bound, we (1) partition the
- // array into two pieces and (2) recurse on each half. If it is not, this is
- // the empty array and our base case.
-
- if (p < r) {
- // (1) Partitioning.
- //
- // The partitioning chooses a pivot between `p` and `r` and moves all
- // elements that are less than or equal to the pivot to the before it, and
- // all the elements that are greater than it after it. The effect is that
- // once partition is done, the pivot is in the exact place it will be when
- // the array is put in sorted order, and it will not need to be moved
- // again. This runs in O(n) time.
-
- // Always choose a random pivot so that an input array which is reverse
- // sorted does not cause O(n^2) running time.
- var pivotIndex = randomIntInRange(p, r);
- var i = p - 1;
-
- swap(ary, pivotIndex, r);
- var pivot = ary[r];
-
- // Immediately after `j` is incremented in this loop, the following hold
- // true:
- //
- // * Every element in `ary[p .. i]` is less than or equal to the pivot.
- //
- // * Every element in `ary[i+1 .. j-1]` is greater than the pivot.
- for (var j = p; j < r; j++) {
- if (comparator(ary[j], pivot) <= 0) {
- i += 1;
- swap(ary, i, j);
- }
- }
-
- swap(ary, i + 1, j);
- var q = i + 1;
-
- // (2) Recurse on each half.
-
- doQuickSort(ary, comparator, p, q - 1);
- doQuickSort(ary, comparator, q + 1, r);
- }
-}
-
-/**
- * Sort the given array in-place with the given comparator function.
- *
- * @param {Array} ary
- * An array to sort.
- * @param {function} comparator
- * Function to use to compare two items.
- */
-exports.quickSort = function (ary, comparator) {
- doQuickSort(ary, comparator, 0, ary.length - 1);
-};
diff --git a/node_modules/source-map/lib/source-map-consumer.js b/node_modules/source-map/lib/source-map-consumer.js
deleted file mode 100644
index 7b99d1da7feac7c5345af8945ed047c7c054c0ba..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/source-map-consumer.js
+++ /dev/null
@@ -1,1145 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-var util = require('./util');
-var binarySearch = require('./binary-search');
-var ArraySet = require('./array-set').ArraySet;
-var base64VLQ = require('./base64-vlq');
-var quickSort = require('./quick-sort').quickSort;
-
-function SourceMapConsumer(aSourceMap, aSourceMapURL) {
- var sourceMap = aSourceMap;
- if (typeof aSourceMap === 'string') {
- sourceMap = util.parseSourceMapInput(aSourceMap);
- }
-
- return sourceMap.sections != null
- ? new IndexedSourceMapConsumer(sourceMap, aSourceMapURL)
- : new BasicSourceMapConsumer(sourceMap, aSourceMapURL);
-}
-
-SourceMapConsumer.fromSourceMap = function(aSourceMap, aSourceMapURL) {
- return BasicSourceMapConsumer.fromSourceMap(aSourceMap, aSourceMapURL);
-}
-
-/**
- * The version of the source mapping spec that we are consuming.
- */
-SourceMapConsumer.prototype._version = 3;
-
-// `__generatedMappings` and `__originalMappings` are arrays that hold the
-// parsed mapping coordinates from the source map's "mappings" attribute. They
-// are lazily instantiated, accessed via the `_generatedMappings` and
-// `_originalMappings` getters respectively, and we only parse the mappings
-// and create these arrays once queried for a source location. We jump through
-// these hoops because there can be many thousands of mappings, and parsing
-// them is expensive, so we only want to do it if we must.
-//
-// Each object in the arrays is of the form:
-//
-// {
-// generatedLine: The line number in the generated code,
-// generatedColumn: The column number in the generated code,
-// source: The path to the original source file that generated this
-// chunk of code,
-// originalLine: The line number in the original source that
-// corresponds to this chunk of generated code,
-// originalColumn: The column number in the original source that
-// corresponds to this chunk of generated code,
-// name: The name of the original symbol which generated this chunk of
-// code.
-// }
-//
-// All properties except for `generatedLine` and `generatedColumn` can be
-// `null`.
-//
-// `_generatedMappings` is ordered by the generated positions.
-//
-// `_originalMappings` is ordered by the original positions.
-
-SourceMapConsumer.prototype.__generatedMappings = null;
-Object.defineProperty(SourceMapConsumer.prototype, '_generatedMappings', {
- configurable: true,
- enumerable: true,
- get: function () {
- if (!this.__generatedMappings) {
- this._parseMappings(this._mappings, this.sourceRoot);
- }
-
- return this.__generatedMappings;
- }
-});
-
-SourceMapConsumer.prototype.__originalMappings = null;
-Object.defineProperty(SourceMapConsumer.prototype, '_originalMappings', {
- configurable: true,
- enumerable: true,
- get: function () {
- if (!this.__originalMappings) {
- this._parseMappings(this._mappings, this.sourceRoot);
- }
-
- return this.__originalMappings;
- }
-});
-
-SourceMapConsumer.prototype._charIsMappingSeparator =
- function SourceMapConsumer_charIsMappingSeparator(aStr, index) {
- var c = aStr.charAt(index);
- return c === ";" || c === ",";
- };
-
-/**
- * Parse the mappings in a string in to a data structure which we can easily
- * query (the ordered arrays in the `this.__generatedMappings` and
- * `this.__originalMappings` properties).
- */
-SourceMapConsumer.prototype._parseMappings =
- function SourceMapConsumer_parseMappings(aStr, aSourceRoot) {
- throw new Error("Subclasses must implement _parseMappings");
- };
-
-SourceMapConsumer.GENERATED_ORDER = 1;
-SourceMapConsumer.ORIGINAL_ORDER = 2;
-
-SourceMapConsumer.GREATEST_LOWER_BOUND = 1;
-SourceMapConsumer.LEAST_UPPER_BOUND = 2;
-
-/**
- * Iterate over each mapping between an original source/line/column and a
- * generated line/column in this source map.
- *
- * @param Function aCallback
- * The function that is called with each mapping.
- * @param Object aContext
- * Optional. If specified, this object will be the value of `this` every
- * time that `aCallback` is called.
- * @param aOrder
- * Either `SourceMapConsumer.GENERATED_ORDER` or
- * `SourceMapConsumer.ORIGINAL_ORDER`. Specifies whether you want to
- * iterate over the mappings sorted by the generated file's line/column
- * order or the original's source/line/column order, respectively. Defaults to
- * `SourceMapConsumer.GENERATED_ORDER`.
- */
-SourceMapConsumer.prototype.eachMapping =
- function SourceMapConsumer_eachMapping(aCallback, aContext, aOrder) {
- var context = aContext || null;
- var order = aOrder || SourceMapConsumer.GENERATED_ORDER;
-
- var mappings;
- switch (order) {
- case SourceMapConsumer.GENERATED_ORDER:
- mappings = this._generatedMappings;
- break;
- case SourceMapConsumer.ORIGINAL_ORDER:
- mappings = this._originalMappings;
- break;
- default:
- throw new Error("Unknown order of iteration.");
- }
-
- var sourceRoot = this.sourceRoot;
- mappings.map(function (mapping) {
- var source = mapping.source === null ? null : this._sources.at(mapping.source);
- source = util.computeSourceURL(sourceRoot, source, this._sourceMapURL);
- return {
- source: source,
- generatedLine: mapping.generatedLine,
- generatedColumn: mapping.generatedColumn,
- originalLine: mapping.originalLine,
- originalColumn: mapping.originalColumn,
- name: mapping.name === null ? null : this._names.at(mapping.name)
- };
- }, this).forEach(aCallback, context);
- };
-
-/**
- * Returns all generated line and column information for the original source,
- * line, and column provided. If no column is provided, returns all mappings
- * corresponding to a either the line we are searching for or the next
- * closest line that has any mappings. Otherwise, returns all mappings
- * corresponding to the given line and either the column we are searching for
- * or the next closest column that has any offsets.
- *
- * The only argument is an object with the following properties:
- *
- * - source: The filename of the original source.
- * - line: The line number in the original source. The line number is 1-based.
- * - column: Optional. the column number in the original source.
- * The column number is 0-based.
- *
- * and an array of objects is returned, each with the following properties:
- *
- * - line: The line number in the generated source, or null. The
- * line number is 1-based.
- * - column: The column number in the generated source, or null.
- * The column number is 0-based.
- */
-SourceMapConsumer.prototype.allGeneratedPositionsFor =
- function SourceMapConsumer_allGeneratedPositionsFor(aArgs) {
- var line = util.getArg(aArgs, 'line');
-
- // When there is no exact match, BasicSourceMapConsumer.prototype._findMapping
- // returns the index of the closest mapping less than the needle. By
- // setting needle.originalColumn to 0, we thus find the last mapping for
- // the given line, provided such a mapping exists.
- var needle = {
- source: util.getArg(aArgs, 'source'),
- originalLine: line,
- originalColumn: util.getArg(aArgs, 'column', 0)
- };
-
- needle.source = this._findSourceIndex(needle.source);
- if (needle.source < 0) {
- return [];
- }
-
- var mappings = [];
-
- var index = this._findMapping(needle,
- this._originalMappings,
- "originalLine",
- "originalColumn",
- util.compareByOriginalPositions,
- binarySearch.LEAST_UPPER_BOUND);
- if (index >= 0) {
- var mapping = this._originalMappings[index];
-
- if (aArgs.column === undefined) {
- var originalLine = mapping.originalLine;
-
- // Iterate until either we run out of mappings, or we run into
- // a mapping for a different line than the one we found. Since
- // mappings are sorted, this is guaranteed to find all mappings for
- // the line we found.
- while (mapping && mapping.originalLine === originalLine) {
- mappings.push({
- line: util.getArg(mapping, 'generatedLine', null),
- column: util.getArg(mapping, 'generatedColumn', null),
- lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)
- });
-
- mapping = this._originalMappings[++index];
- }
- } else {
- var originalColumn = mapping.originalColumn;
-
- // Iterate until either we run out of mappings, or we run into
- // a mapping for a different line than the one we were searching for.
- // Since mappings are sorted, this is guaranteed to find all mappings for
- // the line we are searching for.
- while (mapping &&
- mapping.originalLine === line &&
- mapping.originalColumn == originalColumn) {
- mappings.push({
- line: util.getArg(mapping, 'generatedLine', null),
- column: util.getArg(mapping, 'generatedColumn', null),
- lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)
- });
-
- mapping = this._originalMappings[++index];
- }
- }
- }
-
- return mappings;
- };
-
-exports.SourceMapConsumer = SourceMapConsumer;
-
-/**
- * A BasicSourceMapConsumer instance represents a parsed source map which we can
- * query for information about the original file positions by giving it a file
- * position in the generated source.
- *
- * The first parameter is the raw source map (either as a JSON string, or
- * already parsed to an object). According to the spec, source maps have the
- * following attributes:
- *
- * - version: Which version of the source map spec this map is following.
- * - sources: An array of URLs to the original source files.
- * - names: An array of identifiers which can be referrenced by individual mappings.
- * - sourceRoot: Optional. The URL root from which all sources are relative.
- * - sourcesContent: Optional. An array of contents of the original source files.
- * - mappings: A string of base64 VLQs which contain the actual mappings.
- * - file: Optional. The generated file this source map is associated with.
- *
- * Here is an example source map, taken from the source map spec[0]:
- *
- * {
- * version : 3,
- * file: "out.js",
- * sourceRoot : "",
- * sources: ["foo.js", "bar.js"],
- * names: ["src", "maps", "are", "fun"],
- * mappings: "AA,AB;;ABCDE;"
- * }
- *
- * The second parameter, if given, is a string whose value is the URL
- * at which the source map was found. This URL is used to compute the
- * sources array.
- *
- * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit?pli=1#
- */
-function BasicSourceMapConsumer(aSourceMap, aSourceMapURL) {
- var sourceMap = aSourceMap;
- if (typeof aSourceMap === 'string') {
- sourceMap = util.parseSourceMapInput(aSourceMap);
- }
-
- var version = util.getArg(sourceMap, 'version');
- var sources = util.getArg(sourceMap, 'sources');
- // Sass 3.3 leaves out the 'names' array, so we deviate from the spec (which
- // requires the array) to play nice here.
- var names = util.getArg(sourceMap, 'names', []);
- var sourceRoot = util.getArg(sourceMap, 'sourceRoot', null);
- var sourcesContent = util.getArg(sourceMap, 'sourcesContent', null);
- var mappings = util.getArg(sourceMap, 'mappings');
- var file = util.getArg(sourceMap, 'file', null);
-
- // Once again, Sass deviates from the spec and supplies the version as a
- // string rather than a number, so we use loose equality checking here.
- if (version != this._version) {
- throw new Error('Unsupported version: ' + version);
- }
-
- if (sourceRoot) {
- sourceRoot = util.normalize(sourceRoot);
- }
-
- sources = sources
- .map(String)
- // Some source maps produce relative source paths like "./foo.js" instead of
- // "foo.js". Normalize these first so that future comparisons will succeed.
- // See bugzil.la/1090768.
- .map(util.normalize)
- // Always ensure that absolute sources are internally stored relative to
- // the source root, if the source root is absolute. Not doing this would
- // be particularly problematic when the source root is a prefix of the
- // source (valid, but why??). See github issue #199 and bugzil.la/1188982.
- .map(function (source) {
- return sourceRoot && util.isAbsolute(sourceRoot) && util.isAbsolute(source)
- ? util.relative(sourceRoot, source)
- : source;
- });
-
- // Pass `true` below to allow duplicate names and sources. While source maps
- // are intended to be compressed and deduplicated, the TypeScript compiler
- // sometimes generates source maps with duplicates in them. See Github issue
- // #72 and bugzil.la/889492.
- this._names = ArraySet.fromArray(names.map(String), true);
- this._sources = ArraySet.fromArray(sources, true);
-
- this._absoluteSources = this._sources.toArray().map(function (s) {
- return util.computeSourceURL(sourceRoot, s, aSourceMapURL);
- });
-
- this.sourceRoot = sourceRoot;
- this.sourcesContent = sourcesContent;
- this._mappings = mappings;
- this._sourceMapURL = aSourceMapURL;
- this.file = file;
-}
-
-BasicSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype);
-BasicSourceMapConsumer.prototype.consumer = SourceMapConsumer;
-
-/**
- * Utility function to find the index of a source. Returns -1 if not
- * found.
- */
-BasicSourceMapConsumer.prototype._findSourceIndex = function(aSource) {
- var relativeSource = aSource;
- if (this.sourceRoot != null) {
- relativeSource = util.relative(this.sourceRoot, relativeSource);
- }
-
- if (this._sources.has(relativeSource)) {
- return this._sources.indexOf(relativeSource);
- }
-
- // Maybe aSource is an absolute URL as returned by |sources|. In
- // this case we can't simply undo the transform.
- var i;
- for (i = 0; i < this._absoluteSources.length; ++i) {
- if (this._absoluteSources[i] == aSource) {
- return i;
- }
- }
-
- return -1;
-};
-
-/**
- * Create a BasicSourceMapConsumer from a SourceMapGenerator.
- *
- * @param SourceMapGenerator aSourceMap
- * The source map that will be consumed.
- * @param String aSourceMapURL
- * The URL at which the source map can be found (optional)
- * @returns BasicSourceMapConsumer
- */
-BasicSourceMapConsumer.fromSourceMap =
- function SourceMapConsumer_fromSourceMap(aSourceMap, aSourceMapURL) {
- var smc = Object.create(BasicSourceMapConsumer.prototype);
-
- var names = smc._names = ArraySet.fromArray(aSourceMap._names.toArray(), true);
- var sources = smc._sources = ArraySet.fromArray(aSourceMap._sources.toArray(), true);
- smc.sourceRoot = aSourceMap._sourceRoot;
- smc.sourcesContent = aSourceMap._generateSourcesContent(smc._sources.toArray(),
- smc.sourceRoot);
- smc.file = aSourceMap._file;
- smc._sourceMapURL = aSourceMapURL;
- smc._absoluteSources = smc._sources.toArray().map(function (s) {
- return util.computeSourceURL(smc.sourceRoot, s, aSourceMapURL);
- });
-
- // Because we are modifying the entries (by converting string sources and
- // names to indices into the sources and names ArraySets), we have to make
- // a copy of the entry or else bad things happen. Shared mutable state
- // strikes again! See github issue #191.
-
- var generatedMappings = aSourceMap._mappings.toArray().slice();
- var destGeneratedMappings = smc.__generatedMappings = [];
- var destOriginalMappings = smc.__originalMappings = [];
-
- for (var i = 0, length = generatedMappings.length; i < length; i++) {
- var srcMapping = generatedMappings[i];
- var destMapping = new Mapping;
- destMapping.generatedLine = srcMapping.generatedLine;
- destMapping.generatedColumn = srcMapping.generatedColumn;
-
- if (srcMapping.source) {
- destMapping.source = sources.indexOf(srcMapping.source);
- destMapping.originalLine = srcMapping.originalLine;
- destMapping.originalColumn = srcMapping.originalColumn;
-
- if (srcMapping.name) {
- destMapping.name = names.indexOf(srcMapping.name);
- }
-
- destOriginalMappings.push(destMapping);
- }
-
- destGeneratedMappings.push(destMapping);
- }
-
- quickSort(smc.__originalMappings, util.compareByOriginalPositions);
-
- return smc;
- };
-
-/**
- * The version of the source mapping spec that we are consuming.
- */
-BasicSourceMapConsumer.prototype._version = 3;
-
-/**
- * The list of original sources.
- */
-Object.defineProperty(BasicSourceMapConsumer.prototype, 'sources', {
- get: function () {
- return this._absoluteSources.slice();
- }
-});
-
-/**
- * Provide the JIT with a nice shape / hidden class.
- */
-function Mapping() {
- this.generatedLine = 0;
- this.generatedColumn = 0;
- this.source = null;
- this.originalLine = null;
- this.originalColumn = null;
- this.name = null;
-}
-
-/**
- * Parse the mappings in a string in to a data structure which we can easily
- * query (the ordered arrays in the `this.__generatedMappings` and
- * `this.__originalMappings` properties).
- */
-BasicSourceMapConsumer.prototype._parseMappings =
- function SourceMapConsumer_parseMappings(aStr, aSourceRoot) {
- var generatedLine = 1;
- var previousGeneratedColumn = 0;
- var previousOriginalLine = 0;
- var previousOriginalColumn = 0;
- var previousSource = 0;
- var previousName = 0;
- var length = aStr.length;
- var index = 0;
- var cachedSegments = {};
- var temp = {};
- var originalMappings = [];
- var generatedMappings = [];
- var mapping, str, segment, end, value;
-
- while (index < length) {
- if (aStr.charAt(index) === ';') {
- generatedLine++;
- index++;
- previousGeneratedColumn = 0;
- }
- else if (aStr.charAt(index) === ',') {
- index++;
- }
- else {
- mapping = new Mapping();
- mapping.generatedLine = generatedLine;
-
- // Because each offset is encoded relative to the previous one,
- // many segments often have the same encoding. We can exploit this
- // fact by caching the parsed variable length fields of each segment,
- // allowing us to avoid a second parse if we encounter the same
- // segment again.
- for (end = index; end < length; end++) {
- if (this._charIsMappingSeparator(aStr, end)) {
- break;
- }
- }
- str = aStr.slice(index, end);
-
- segment = cachedSegments[str];
- if (segment) {
- index += str.length;
- } else {
- segment = [];
- while (index < end) {
- base64VLQ.decode(aStr, index, temp);
- value = temp.value;
- index = temp.rest;
- segment.push(value);
- }
-
- if (segment.length === 2) {
- throw new Error('Found a source, but no line and column');
- }
-
- if (segment.length === 3) {
- throw new Error('Found a source and line, but no column');
- }
-
- cachedSegments[str] = segment;
- }
-
- // Generated column.
- mapping.generatedColumn = previousGeneratedColumn + segment[0];
- previousGeneratedColumn = mapping.generatedColumn;
-
- if (segment.length > 1) {
- // Original source.
- mapping.source = previousSource + segment[1];
- previousSource += segment[1];
-
- // Original line.
- mapping.originalLine = previousOriginalLine + segment[2];
- previousOriginalLine = mapping.originalLine;
- // Lines are stored 0-based
- mapping.originalLine += 1;
-
- // Original column.
- mapping.originalColumn = previousOriginalColumn + segment[3];
- previousOriginalColumn = mapping.originalColumn;
-
- if (segment.length > 4) {
- // Original name.
- mapping.name = previousName + segment[4];
- previousName += segment[4];
- }
- }
-
- generatedMappings.push(mapping);
- if (typeof mapping.originalLine === 'number') {
- originalMappings.push(mapping);
- }
- }
- }
-
- quickSort(generatedMappings, util.compareByGeneratedPositionsDeflated);
- this.__generatedMappings = generatedMappings;
-
- quickSort(originalMappings, util.compareByOriginalPositions);
- this.__originalMappings = originalMappings;
- };
-
-/**
- * Find the mapping that best matches the hypothetical "needle" mapping that
- * we are searching for in the given "haystack" of mappings.
- */
-BasicSourceMapConsumer.prototype._findMapping =
- function SourceMapConsumer_findMapping(aNeedle, aMappings, aLineName,
- aColumnName, aComparator, aBias) {
- // To return the position we are searching for, we must first find the
- // mapping for the given position and then return the opposite position it
- // points to. Because the mappings are sorted, we can use binary search to
- // find the best mapping.
-
- if (aNeedle[aLineName] <= 0) {
- throw new TypeError('Line must be greater than or equal to 1, got '
- + aNeedle[aLineName]);
- }
- if (aNeedle[aColumnName] < 0) {
- throw new TypeError('Column must be greater than or equal to 0, got '
- + aNeedle[aColumnName]);
- }
-
- return binarySearch.search(aNeedle, aMappings, aComparator, aBias);
- };
-
-/**
- * Compute the last column for each generated mapping. The last column is
- * inclusive.
- */
-BasicSourceMapConsumer.prototype.computeColumnSpans =
- function SourceMapConsumer_computeColumnSpans() {
- for (var index = 0; index < this._generatedMappings.length; ++index) {
- var mapping = this._generatedMappings[index];
-
- // Mappings do not contain a field for the last generated columnt. We
- // can come up with an optimistic estimate, however, by assuming that
- // mappings are contiguous (i.e. given two consecutive mappings, the
- // first mapping ends where the second one starts).
- if (index + 1 < this._generatedMappings.length) {
- var nextMapping = this._generatedMappings[index + 1];
-
- if (mapping.generatedLine === nextMapping.generatedLine) {
- mapping.lastGeneratedColumn = nextMapping.generatedColumn - 1;
- continue;
- }
- }
-
- // The last mapping for each line spans the entire line.
- mapping.lastGeneratedColumn = Infinity;
- }
- };
-
-/**
- * Returns the original source, line, and column information for the generated
- * source's line and column positions provided. The only argument is an object
- * with the following properties:
- *
- * - line: The line number in the generated source. The line number
- * is 1-based.
- * - column: The column number in the generated source. The column
- * number is 0-based.
- * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or
- * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the
- * closest element that is smaller than or greater than the one we are
- * searching for, respectively, if the exact element cannot be found.
- * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'.
- *
- * and an object is returned with the following properties:
- *
- * - source: The original source file, or null.
- * - line: The line number in the original source, or null. The
- * line number is 1-based.
- * - column: The column number in the original source, or null. The
- * column number is 0-based.
- * - name: The original identifier, or null.
- */
-BasicSourceMapConsumer.prototype.originalPositionFor =
- function SourceMapConsumer_originalPositionFor(aArgs) {
- var needle = {
- generatedLine: util.getArg(aArgs, 'line'),
- generatedColumn: util.getArg(aArgs, 'column')
- };
-
- var index = this._findMapping(
- needle,
- this._generatedMappings,
- "generatedLine",
- "generatedColumn",
- util.compareByGeneratedPositionsDeflated,
- util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)
- );
-
- if (index >= 0) {
- var mapping = this._generatedMappings[index];
-
- if (mapping.generatedLine === needle.generatedLine) {
- var source = util.getArg(mapping, 'source', null);
- if (source !== null) {
- source = this._sources.at(source);
- source = util.computeSourceURL(this.sourceRoot, source, this._sourceMapURL);
- }
- var name = util.getArg(mapping, 'name', null);
- if (name !== null) {
- name = this._names.at(name);
- }
- return {
- source: source,
- line: util.getArg(mapping, 'originalLine', null),
- column: util.getArg(mapping, 'originalColumn', null),
- name: name
- };
- }
- }
-
- return {
- source: null,
- line: null,
- column: null,
- name: null
- };
- };
-
-/**
- * Return true if we have the source content for every source in the source
- * map, false otherwise.
- */
-BasicSourceMapConsumer.prototype.hasContentsOfAllSources =
- function BasicSourceMapConsumer_hasContentsOfAllSources() {
- if (!this.sourcesContent) {
- return false;
- }
- return this.sourcesContent.length >= this._sources.size() &&
- !this.sourcesContent.some(function (sc) { return sc == null; });
- };
-
-/**
- * Returns the original source content. The only argument is the url of the
- * original source file. Returns null if no original source content is
- * available.
- */
-BasicSourceMapConsumer.prototype.sourceContentFor =
- function SourceMapConsumer_sourceContentFor(aSource, nullOnMissing) {
- if (!this.sourcesContent) {
- return null;
- }
-
- var index = this._findSourceIndex(aSource);
- if (index >= 0) {
- return this.sourcesContent[index];
- }
-
- var relativeSource = aSource;
- if (this.sourceRoot != null) {
- relativeSource = util.relative(this.sourceRoot, relativeSource);
- }
-
- var url;
- if (this.sourceRoot != null
- && (url = util.urlParse(this.sourceRoot))) {
- // XXX: file:// URIs and absolute paths lead to unexpected behavior for
- // many users. We can help them out when they expect file:// URIs to
- // behave like it would if they were running a local HTTP server. See
- // https://bugzilla.mozilla.org/show_bug.cgi?id=885597.
- var fileUriAbsPath = relativeSource.replace(/^file:\/\//, "");
- if (url.scheme == "file"
- && this._sources.has(fileUriAbsPath)) {
- return this.sourcesContent[this._sources.indexOf(fileUriAbsPath)]
- }
-
- if ((!url.path || url.path == "/")
- && this._sources.has("/" + relativeSource)) {
- return this.sourcesContent[this._sources.indexOf("/" + relativeSource)];
- }
- }
-
- // This function is used recursively from
- // IndexedSourceMapConsumer.prototype.sourceContentFor. In that case, we
- // don't want to throw if we can't find the source - we just want to
- // return null, so we provide a flag to exit gracefully.
- if (nullOnMissing) {
- return null;
- }
- else {
- throw new Error('"' + relativeSource + '" is not in the SourceMap.');
- }
- };
-
-/**
- * Returns the generated line and column information for the original source,
- * line, and column positions provided. The only argument is an object with
- * the following properties:
- *
- * - source: The filename of the original source.
- * - line: The line number in the original source. The line number
- * is 1-based.
- * - column: The column number in the original source. The column
- * number is 0-based.
- * - bias: Either 'SourceMapConsumer.GREATEST_LOWER_BOUND' or
- * 'SourceMapConsumer.LEAST_UPPER_BOUND'. Specifies whether to return the
- * closest element that is smaller than or greater than the one we are
- * searching for, respectively, if the exact element cannot be found.
- * Defaults to 'SourceMapConsumer.GREATEST_LOWER_BOUND'.
- *
- * and an object is returned with the following properties:
- *
- * - line: The line number in the generated source, or null. The
- * line number is 1-based.
- * - column: The column number in the generated source, or null.
- * The column number is 0-based.
- */
-BasicSourceMapConsumer.prototype.generatedPositionFor =
- function SourceMapConsumer_generatedPositionFor(aArgs) {
- var source = util.getArg(aArgs, 'source');
- source = this._findSourceIndex(source);
- if (source < 0) {
- return {
- line: null,
- column: null,
- lastColumn: null
- };
- }
-
- var needle = {
- source: source,
- originalLine: util.getArg(aArgs, 'line'),
- originalColumn: util.getArg(aArgs, 'column')
- };
-
- var index = this._findMapping(
- needle,
- this._originalMappings,
- "originalLine",
- "originalColumn",
- util.compareByOriginalPositions,
- util.getArg(aArgs, 'bias', SourceMapConsumer.GREATEST_LOWER_BOUND)
- );
-
- if (index >= 0) {
- var mapping = this._originalMappings[index];
-
- if (mapping.source === needle.source) {
- return {
- line: util.getArg(mapping, 'generatedLine', null),
- column: util.getArg(mapping, 'generatedColumn', null),
- lastColumn: util.getArg(mapping, 'lastGeneratedColumn', null)
- };
- }
- }
-
- return {
- line: null,
- column: null,
- lastColumn: null
- };
- };
-
-exports.BasicSourceMapConsumer = BasicSourceMapConsumer;
-
-/**
- * An IndexedSourceMapConsumer instance represents a parsed source map which
- * we can query for information. It differs from BasicSourceMapConsumer in
- * that it takes "indexed" source maps (i.e. ones with a "sections" field) as
- * input.
- *
- * The first parameter is a raw source map (either as a JSON string, or already
- * parsed to an object). According to the spec for indexed source maps, they
- * have the following attributes:
- *
- * - version: Which version of the source map spec this map is following.
- * - file: Optional. The generated file this source map is associated with.
- * - sections: A list of section definitions.
- *
- * Each value under the "sections" field has two fields:
- * - offset: The offset into the original specified at which this section
- * begins to apply, defined as an object with a "line" and "column"
- * field.
- * - map: A source map definition. This source map could also be indexed,
- * but doesn't have to be.
- *
- * Instead of the "map" field, it's also possible to have a "url" field
- * specifying a URL to retrieve a source map from, but that's currently
- * unsupported.
- *
- * Here's an example source map, taken from the source map spec[0], but
- * modified to omit a section which uses the "url" field.
- *
- * {
- * version : 3,
- * file: "app.js",
- * sections: [{
- * offset: {line:100, column:10},
- * map: {
- * version : 3,
- * file: "section.js",
- * sources: ["foo.js", "bar.js"],
- * names: ["src", "maps", "are", "fun"],
- * mappings: "AAAA,E;;ABCDE;"
- * }
- * }],
- * }
- *
- * The second parameter, if given, is a string whose value is the URL
- * at which the source map was found. This URL is used to compute the
- * sources array.
- *
- * [0]: https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.535es3xeprgt
- */
-function IndexedSourceMapConsumer(aSourceMap, aSourceMapURL) {
- var sourceMap = aSourceMap;
- if (typeof aSourceMap === 'string') {
- sourceMap = util.parseSourceMapInput(aSourceMap);
- }
-
- var version = util.getArg(sourceMap, 'version');
- var sections = util.getArg(sourceMap, 'sections');
-
- if (version != this._version) {
- throw new Error('Unsupported version: ' + version);
- }
-
- this._sources = new ArraySet();
- this._names = new ArraySet();
-
- var lastOffset = {
- line: -1,
- column: 0
- };
- this._sections = sections.map(function (s) {
- if (s.url) {
- // The url field will require support for asynchronicity.
- // See https://github.com/mozilla/source-map/issues/16
- throw new Error('Support for url field in sections not implemented.');
- }
- var offset = util.getArg(s, 'offset');
- var offsetLine = util.getArg(offset, 'line');
- var offsetColumn = util.getArg(offset, 'column');
-
- if (offsetLine < lastOffset.line ||
- (offsetLine === lastOffset.line && offsetColumn < lastOffset.column)) {
- throw new Error('Section offsets must be ordered and non-overlapping.');
- }
- lastOffset = offset;
-
- return {
- generatedOffset: {
- // The offset fields are 0-based, but we use 1-based indices when
- // encoding/decoding from VLQ.
- generatedLine: offsetLine + 1,
- generatedColumn: offsetColumn + 1
- },
- consumer: new SourceMapConsumer(util.getArg(s, 'map'), aSourceMapURL)
- }
- });
-}
-
-IndexedSourceMapConsumer.prototype = Object.create(SourceMapConsumer.prototype);
-IndexedSourceMapConsumer.prototype.constructor = SourceMapConsumer;
-
-/**
- * The version of the source mapping spec that we are consuming.
- */
-IndexedSourceMapConsumer.prototype._version = 3;
-
-/**
- * The list of original sources.
- */
-Object.defineProperty(IndexedSourceMapConsumer.prototype, 'sources', {
- get: function () {
- var sources = [];
- for (var i = 0; i < this._sections.length; i++) {
- for (var j = 0; j < this._sections[i].consumer.sources.length; j++) {
- sources.push(this._sections[i].consumer.sources[j]);
- }
- }
- return sources;
- }
-});
-
-/**
- * Returns the original source, line, and column information for the generated
- * source's line and column positions provided. The only argument is an object
- * with the following properties:
- *
- * - line: The line number in the generated source. The line number
- * is 1-based.
- * - column: The column number in the generated source. The column
- * number is 0-based.
- *
- * and an object is returned with the following properties:
- *
- * - source: The original source file, or null.
- * - line: The line number in the original source, or null. The
- * line number is 1-based.
- * - column: The column number in the original source, or null. The
- * column number is 0-based.
- * - name: The original identifier, or null.
- */
-IndexedSourceMapConsumer.prototype.originalPositionFor =
- function IndexedSourceMapConsumer_originalPositionFor(aArgs) {
- var needle = {
- generatedLine: util.getArg(aArgs, 'line'),
- generatedColumn: util.getArg(aArgs, 'column')
- };
-
- // Find the section containing the generated position we're trying to map
- // to an original position.
- var sectionIndex = binarySearch.search(needle, this._sections,
- function(needle, section) {
- var cmp = needle.generatedLine - section.generatedOffset.generatedLine;
- if (cmp) {
- return cmp;
- }
-
- return (needle.generatedColumn -
- section.generatedOffset.generatedColumn);
- });
- var section = this._sections[sectionIndex];
-
- if (!section) {
- return {
- source: null,
- line: null,
- column: null,
- name: null
- };
- }
-
- return section.consumer.originalPositionFor({
- line: needle.generatedLine -
- (section.generatedOffset.generatedLine - 1),
- column: needle.generatedColumn -
- (section.generatedOffset.generatedLine === needle.generatedLine
- ? section.generatedOffset.generatedColumn - 1
- : 0),
- bias: aArgs.bias
- });
- };
-
-/**
- * Return true if we have the source content for every source in the source
- * map, false otherwise.
- */
-IndexedSourceMapConsumer.prototype.hasContentsOfAllSources =
- function IndexedSourceMapConsumer_hasContentsOfAllSources() {
- return this._sections.every(function (s) {
- return s.consumer.hasContentsOfAllSources();
- });
- };
-
-/**
- * Returns the original source content. The only argument is the url of the
- * original source file. Returns null if no original source content is
- * available.
- */
-IndexedSourceMapConsumer.prototype.sourceContentFor =
- function IndexedSourceMapConsumer_sourceContentFor(aSource, nullOnMissing) {
- for (var i = 0; i < this._sections.length; i++) {
- var section = this._sections[i];
-
- var content = section.consumer.sourceContentFor(aSource, true);
- if (content) {
- return content;
- }
- }
- if (nullOnMissing) {
- return null;
- }
- else {
- throw new Error('"' + aSource + '" is not in the SourceMap.');
- }
- };
-
-/**
- * Returns the generated line and column information for the original source,
- * line, and column positions provided. The only argument is an object with
- * the following properties:
- *
- * - source: The filename of the original source.
- * - line: The line number in the original source. The line number
- * is 1-based.
- * - column: The column number in the original source. The column
- * number is 0-based.
- *
- * and an object is returned with the following properties:
- *
- * - line: The line number in the generated source, or null. The
- * line number is 1-based.
- * - column: The column number in the generated source, or null.
- * The column number is 0-based.
- */
-IndexedSourceMapConsumer.prototype.generatedPositionFor =
- function IndexedSourceMapConsumer_generatedPositionFor(aArgs) {
- for (var i = 0; i < this._sections.length; i++) {
- var section = this._sections[i];
-
- // Only consider this section if the requested source is in the list of
- // sources of the consumer.
- if (section.consumer._findSourceIndex(util.getArg(aArgs, 'source')) === -1) {
- continue;
- }
- var generatedPosition = section.consumer.generatedPositionFor(aArgs);
- if (generatedPosition) {
- var ret = {
- line: generatedPosition.line +
- (section.generatedOffset.generatedLine - 1),
- column: generatedPosition.column +
- (section.generatedOffset.generatedLine === generatedPosition.line
- ? section.generatedOffset.generatedColumn - 1
- : 0)
- };
- return ret;
- }
- }
-
- return {
- line: null,
- column: null
- };
- };
-
-/**
- * Parse the mappings in a string in to a data structure which we can easily
- * query (the ordered arrays in the `this.__generatedMappings` and
- * `this.__originalMappings` properties).
- */
-IndexedSourceMapConsumer.prototype._parseMappings =
- function IndexedSourceMapConsumer_parseMappings(aStr, aSourceRoot) {
- this.__generatedMappings = [];
- this.__originalMappings = [];
- for (var i = 0; i < this._sections.length; i++) {
- var section = this._sections[i];
- var sectionMappings = section.consumer._generatedMappings;
- for (var j = 0; j < sectionMappings.length; j++) {
- var mapping = sectionMappings[j];
-
- var source = section.consumer._sources.at(mapping.source);
- source = util.computeSourceURL(section.consumer.sourceRoot, source, this._sourceMapURL);
- this._sources.add(source);
- source = this._sources.indexOf(source);
-
- var name = null;
- if (mapping.name) {
- name = section.consumer._names.at(mapping.name);
- this._names.add(name);
- name = this._names.indexOf(name);
- }
-
- // The mappings coming from the consumer for the section have
- // generated positions relative to the start of the section, so we
- // need to offset them to be relative to the start of the concatenated
- // generated file.
- var adjustedMapping = {
- source: source,
- generatedLine: mapping.generatedLine +
- (section.generatedOffset.generatedLine - 1),
- generatedColumn: mapping.generatedColumn +
- (section.generatedOffset.generatedLine === mapping.generatedLine
- ? section.generatedOffset.generatedColumn - 1
- : 0),
- originalLine: mapping.originalLine,
- originalColumn: mapping.originalColumn,
- name: name
- };
-
- this.__generatedMappings.push(adjustedMapping);
- if (typeof adjustedMapping.originalLine === 'number') {
- this.__originalMappings.push(adjustedMapping);
- }
- }
- }
-
- quickSort(this.__generatedMappings, util.compareByGeneratedPositionsDeflated);
- quickSort(this.__originalMappings, util.compareByOriginalPositions);
- };
-
-exports.IndexedSourceMapConsumer = IndexedSourceMapConsumer;
diff --git a/node_modules/source-map/lib/source-map-generator.js b/node_modules/source-map/lib/source-map-generator.js
deleted file mode 100644
index 508bcfbbc936da21673108f7ace72a0af6081664..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/source-map-generator.js
+++ /dev/null
@@ -1,425 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-var base64VLQ = require('./base64-vlq');
-var util = require('./util');
-var ArraySet = require('./array-set').ArraySet;
-var MappingList = require('./mapping-list').MappingList;
-
-/**
- * An instance of the SourceMapGenerator represents a source map which is
- * being built incrementally. You may pass an object with the following
- * properties:
- *
- * - file: The filename of the generated source.
- * - sourceRoot: A root for all relative URLs in this source map.
- */
-function SourceMapGenerator(aArgs) {
- if (!aArgs) {
- aArgs = {};
- }
- this._file = util.getArg(aArgs, 'file', null);
- this._sourceRoot = util.getArg(aArgs, 'sourceRoot', null);
- this._skipValidation = util.getArg(aArgs, 'skipValidation', false);
- this._sources = new ArraySet();
- this._names = new ArraySet();
- this._mappings = new MappingList();
- this._sourcesContents = null;
-}
-
-SourceMapGenerator.prototype._version = 3;
-
-/**
- * Creates a new SourceMapGenerator based on a SourceMapConsumer
- *
- * @param aSourceMapConsumer The SourceMap.
- */
-SourceMapGenerator.fromSourceMap =
- function SourceMapGenerator_fromSourceMap(aSourceMapConsumer) {
- var sourceRoot = aSourceMapConsumer.sourceRoot;
- var generator = new SourceMapGenerator({
- file: aSourceMapConsumer.file,
- sourceRoot: sourceRoot
- });
- aSourceMapConsumer.eachMapping(function (mapping) {
- var newMapping = {
- generated: {
- line: mapping.generatedLine,
- column: mapping.generatedColumn
- }
- };
-
- if (mapping.source != null) {
- newMapping.source = mapping.source;
- if (sourceRoot != null) {
- newMapping.source = util.relative(sourceRoot, newMapping.source);
- }
-
- newMapping.original = {
- line: mapping.originalLine,
- column: mapping.originalColumn
- };
-
- if (mapping.name != null) {
- newMapping.name = mapping.name;
- }
- }
-
- generator.addMapping(newMapping);
- });
- aSourceMapConsumer.sources.forEach(function (sourceFile) {
- var sourceRelative = sourceFile;
- if (sourceRoot !== null) {
- sourceRelative = util.relative(sourceRoot, sourceFile);
- }
-
- if (!generator._sources.has(sourceRelative)) {
- generator._sources.add(sourceRelative);
- }
-
- var content = aSourceMapConsumer.sourceContentFor(sourceFile);
- if (content != null) {
- generator.setSourceContent(sourceFile, content);
- }
- });
- return generator;
- };
-
-/**
- * Add a single mapping from original source line and column to the generated
- * source's line and column for this source map being created. The mapping
- * object should have the following properties:
- *
- * - generated: An object with the generated line and column positions.
- * - original: An object with the original line and column positions.
- * - source: The original source file (relative to the sourceRoot).
- * - name: An optional original token name for this mapping.
- */
-SourceMapGenerator.prototype.addMapping =
- function SourceMapGenerator_addMapping(aArgs) {
- var generated = util.getArg(aArgs, 'generated');
- var original = util.getArg(aArgs, 'original', null);
- var source = util.getArg(aArgs, 'source', null);
- var name = util.getArg(aArgs, 'name', null);
-
- if (!this._skipValidation) {
- this._validateMapping(generated, original, source, name);
- }
-
- if (source != null) {
- source = String(source);
- if (!this._sources.has(source)) {
- this._sources.add(source);
- }
- }
-
- if (name != null) {
- name = String(name);
- if (!this._names.has(name)) {
- this._names.add(name);
- }
- }
-
- this._mappings.add({
- generatedLine: generated.line,
- generatedColumn: generated.column,
- originalLine: original != null && original.line,
- originalColumn: original != null && original.column,
- source: source,
- name: name
- });
- };
-
-/**
- * Set the source content for a source file.
- */
-SourceMapGenerator.prototype.setSourceContent =
- function SourceMapGenerator_setSourceContent(aSourceFile, aSourceContent) {
- var source = aSourceFile;
- if (this._sourceRoot != null) {
- source = util.relative(this._sourceRoot, source);
- }
-
- if (aSourceContent != null) {
- // Add the source content to the _sourcesContents map.
- // Create a new _sourcesContents map if the property is null.
- if (!this._sourcesContents) {
- this._sourcesContents = Object.create(null);
- }
- this._sourcesContents[util.toSetString(source)] = aSourceContent;
- } else if (this._sourcesContents) {
- // Remove the source file from the _sourcesContents map.
- // If the _sourcesContents map is empty, set the property to null.
- delete this._sourcesContents[util.toSetString(source)];
- if (Object.keys(this._sourcesContents).length === 0) {
- this._sourcesContents = null;
- }
- }
- };
-
-/**
- * Applies the mappings of a sub-source-map for a specific source file to the
- * source map being generated. Each mapping to the supplied source file is
- * rewritten using the supplied source map. Note: The resolution for the
- * resulting mappings is the minimium of this map and the supplied map.
- *
- * @param aSourceMapConsumer The source map to be applied.
- * @param aSourceFile Optional. The filename of the source file.
- * If omitted, SourceMapConsumer's file property will be used.
- * @param aSourceMapPath Optional. The dirname of the path to the source map
- * to be applied. If relative, it is relative to the SourceMapConsumer.
- * This parameter is needed when the two source maps aren't in the same
- * directory, and the source map to be applied contains relative source
- * paths. If so, those relative source paths need to be rewritten
- * relative to the SourceMapGenerator.
- */
-SourceMapGenerator.prototype.applySourceMap =
- function SourceMapGenerator_applySourceMap(aSourceMapConsumer, aSourceFile, aSourceMapPath) {
- var sourceFile = aSourceFile;
- // If aSourceFile is omitted, we will use the file property of the SourceMap
- if (aSourceFile == null) {
- if (aSourceMapConsumer.file == null) {
- throw new Error(
- 'SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, ' +
- 'or the source map\'s "file" property. Both were omitted.'
- );
- }
- sourceFile = aSourceMapConsumer.file;
- }
- var sourceRoot = this._sourceRoot;
- // Make "sourceFile" relative if an absolute Url is passed.
- if (sourceRoot != null) {
- sourceFile = util.relative(sourceRoot, sourceFile);
- }
- // Applying the SourceMap can add and remove items from the sources and
- // the names array.
- var newSources = new ArraySet();
- var newNames = new ArraySet();
-
- // Find mappings for the "sourceFile"
- this._mappings.unsortedForEach(function (mapping) {
- if (mapping.source === sourceFile && mapping.originalLine != null) {
- // Check if it can be mapped by the source map, then update the mapping.
- var original = aSourceMapConsumer.originalPositionFor({
- line: mapping.originalLine,
- column: mapping.originalColumn
- });
- if (original.source != null) {
- // Copy mapping
- mapping.source = original.source;
- if (aSourceMapPath != null) {
- mapping.source = util.join(aSourceMapPath, mapping.source)
- }
- if (sourceRoot != null) {
- mapping.source = util.relative(sourceRoot, mapping.source);
- }
- mapping.originalLine = original.line;
- mapping.originalColumn = original.column;
- if (original.name != null) {
- mapping.name = original.name;
- }
- }
- }
-
- var source = mapping.source;
- if (source != null && !newSources.has(source)) {
- newSources.add(source);
- }
-
- var name = mapping.name;
- if (name != null && !newNames.has(name)) {
- newNames.add(name);
- }
-
- }, this);
- this._sources = newSources;
- this._names = newNames;
-
- // Copy sourcesContents of applied map.
- aSourceMapConsumer.sources.forEach(function (sourceFile) {
- var content = aSourceMapConsumer.sourceContentFor(sourceFile);
- if (content != null) {
- if (aSourceMapPath != null) {
- sourceFile = util.join(aSourceMapPath, sourceFile);
- }
- if (sourceRoot != null) {
- sourceFile = util.relative(sourceRoot, sourceFile);
- }
- this.setSourceContent(sourceFile, content);
- }
- }, this);
- };
-
-/**
- * A mapping can have one of the three levels of data:
- *
- * 1. Just the generated position.
- * 2. The Generated position, original position, and original source.
- * 3. Generated and original position, original source, as well as a name
- * token.
- *
- * To maintain consistency, we validate that any new mapping being added falls
- * in to one of these categories.
- */
-SourceMapGenerator.prototype._validateMapping =
- function SourceMapGenerator_validateMapping(aGenerated, aOriginal, aSource,
- aName) {
- // When aOriginal is truthy but has empty values for .line and .column,
- // it is most likely a programmer error. In this case we throw a very
- // specific error message to try to guide them the right way.
- // For example: https://github.com/Polymer/polymer-bundler/pull/519
- if (aOriginal && typeof aOriginal.line !== 'number' && typeof aOriginal.column !== 'number') {
- throw new Error(
- 'original.line and original.column are not numbers -- you probably meant to omit ' +
- 'the original mapping entirely and only map the generated position. If so, pass ' +
- 'null for the original mapping instead of an object with empty or null values.'
- );
- }
-
- if (aGenerated && 'line' in aGenerated && 'column' in aGenerated
- && aGenerated.line > 0 && aGenerated.column >= 0
- && !aOriginal && !aSource && !aName) {
- // Case 1.
- return;
- }
- else if (aGenerated && 'line' in aGenerated && 'column' in aGenerated
- && aOriginal && 'line' in aOriginal && 'column' in aOriginal
- && aGenerated.line > 0 && aGenerated.column >= 0
- && aOriginal.line > 0 && aOriginal.column >= 0
- && aSource) {
- // Cases 2 and 3.
- return;
- }
- else {
- throw new Error('Invalid mapping: ' + JSON.stringify({
- generated: aGenerated,
- source: aSource,
- original: aOriginal,
- name: aName
- }));
- }
- };
-
-/**
- * Serialize the accumulated mappings in to the stream of base 64 VLQs
- * specified by the source map format.
- */
-SourceMapGenerator.prototype._serializeMappings =
- function SourceMapGenerator_serializeMappings() {
- var previousGeneratedColumn = 0;
- var previousGeneratedLine = 1;
- var previousOriginalColumn = 0;
- var previousOriginalLine = 0;
- var previousName = 0;
- var previousSource = 0;
- var result = '';
- var next;
- var mapping;
- var nameIdx;
- var sourceIdx;
-
- var mappings = this._mappings.toArray();
- for (var i = 0, len = mappings.length; i < len; i++) {
- mapping = mappings[i];
- next = ''
-
- if (mapping.generatedLine !== previousGeneratedLine) {
- previousGeneratedColumn = 0;
- while (mapping.generatedLine !== previousGeneratedLine) {
- next += ';';
- previousGeneratedLine++;
- }
- }
- else {
- if (i > 0) {
- if (!util.compareByGeneratedPositionsInflated(mapping, mappings[i - 1])) {
- continue;
- }
- next += ',';
- }
- }
-
- next += base64VLQ.encode(mapping.generatedColumn
- - previousGeneratedColumn);
- previousGeneratedColumn = mapping.generatedColumn;
-
- if (mapping.source != null) {
- sourceIdx = this._sources.indexOf(mapping.source);
- next += base64VLQ.encode(sourceIdx - previousSource);
- previousSource = sourceIdx;
-
- // lines are stored 0-based in SourceMap spec version 3
- next += base64VLQ.encode(mapping.originalLine - 1
- - previousOriginalLine);
- previousOriginalLine = mapping.originalLine - 1;
-
- next += base64VLQ.encode(mapping.originalColumn
- - previousOriginalColumn);
- previousOriginalColumn = mapping.originalColumn;
-
- if (mapping.name != null) {
- nameIdx = this._names.indexOf(mapping.name);
- next += base64VLQ.encode(nameIdx - previousName);
- previousName = nameIdx;
- }
- }
-
- result += next;
- }
-
- return result;
- };
-
-SourceMapGenerator.prototype._generateSourcesContent =
- function SourceMapGenerator_generateSourcesContent(aSources, aSourceRoot) {
- return aSources.map(function (source) {
- if (!this._sourcesContents) {
- return null;
- }
- if (aSourceRoot != null) {
- source = util.relative(aSourceRoot, source);
- }
- var key = util.toSetString(source);
- return Object.prototype.hasOwnProperty.call(this._sourcesContents, key)
- ? this._sourcesContents[key]
- : null;
- }, this);
- };
-
-/**
- * Externalize the source map.
- */
-SourceMapGenerator.prototype.toJSON =
- function SourceMapGenerator_toJSON() {
- var map = {
- version: this._version,
- sources: this._sources.toArray(),
- names: this._names.toArray(),
- mappings: this._serializeMappings()
- };
- if (this._file != null) {
- map.file = this._file;
- }
- if (this._sourceRoot != null) {
- map.sourceRoot = this._sourceRoot;
- }
- if (this._sourcesContents) {
- map.sourcesContent = this._generateSourcesContent(map.sources, map.sourceRoot);
- }
-
- return map;
- };
-
-/**
- * Render the source map being generated to a string.
- */
-SourceMapGenerator.prototype.toString =
- function SourceMapGenerator_toString() {
- return JSON.stringify(this.toJSON());
- };
-
-exports.SourceMapGenerator = SourceMapGenerator;
diff --git a/node_modules/source-map/lib/source-node.js b/node_modules/source-map/lib/source-node.js
deleted file mode 100644
index 8bcdbe385d2c0d333bcd62648ca700f13f9d3181..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/source-node.js
+++ /dev/null
@@ -1,413 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-var SourceMapGenerator = require('./source-map-generator').SourceMapGenerator;
-var util = require('./util');
-
-// Matches a Windows-style `\r\n` newline or a `\n` newline used by all other
-// operating systems these days (capturing the result).
-var REGEX_NEWLINE = /(\r?\n)/;
-
-// Newline character code for charCodeAt() comparisons
-var NEWLINE_CODE = 10;
-
-// Private symbol for identifying `SourceNode`s when multiple versions of
-// the source-map library are loaded. This MUST NOT CHANGE across
-// versions!
-var isSourceNode = "$$$isSourceNode$$$";
-
-/**
- * SourceNodes provide a way to abstract over interpolating/concatenating
- * snippets of generated JavaScript source code while maintaining the line and
- * column information associated with the original source code.
- *
- * @param aLine The original line number.
- * @param aColumn The original column number.
- * @param aSource The original source's filename.
- * @param aChunks Optional. An array of strings which are snippets of
- * generated JS, or other SourceNodes.
- * @param aName The original identifier.
- */
-function SourceNode(aLine, aColumn, aSource, aChunks, aName) {
- this.children = [];
- this.sourceContents = {};
- this.line = aLine == null ? null : aLine;
- this.column = aColumn == null ? null : aColumn;
- this.source = aSource == null ? null : aSource;
- this.name = aName == null ? null : aName;
- this[isSourceNode] = true;
- if (aChunks != null) this.add(aChunks);
-}
-
-/**
- * Creates a SourceNode from generated code and a SourceMapConsumer.
- *
- * @param aGeneratedCode The generated code
- * @param aSourceMapConsumer The SourceMap for the generated code
- * @param aRelativePath Optional. The path that relative sources in the
- * SourceMapConsumer should be relative to.
- */
-SourceNode.fromStringWithSourceMap =
- function SourceNode_fromStringWithSourceMap(aGeneratedCode, aSourceMapConsumer, aRelativePath) {
- // The SourceNode we want to fill with the generated code
- // and the SourceMap
- var node = new SourceNode();
-
- // All even indices of this array are one line of the generated code,
- // while all odd indices are the newlines between two adjacent lines
- // (since `REGEX_NEWLINE` captures its match).
- // Processed fragments are accessed by calling `shiftNextLine`.
- var remainingLines = aGeneratedCode.split(REGEX_NEWLINE);
- var remainingLinesIndex = 0;
- var shiftNextLine = function() {
- var lineContents = getNextLine();
- // The last line of a file might not have a newline.
- var newLine = getNextLine() || "";
- return lineContents + newLine;
-
- function getNextLine() {
- return remainingLinesIndex < remainingLines.length ?
- remainingLines[remainingLinesIndex++] : undefined;
- }
- };
-
- // We need to remember the position of "remainingLines"
- var lastGeneratedLine = 1, lastGeneratedColumn = 0;
-
- // The generate SourceNodes we need a code range.
- // To extract it current and last mapping is used.
- // Here we store the last mapping.
- var lastMapping = null;
-
- aSourceMapConsumer.eachMapping(function (mapping) {
- if (lastMapping !== null) {
- // We add the code from "lastMapping" to "mapping":
- // First check if there is a new line in between.
- if (lastGeneratedLine < mapping.generatedLine) {
- // Associate first line with "lastMapping"
- addMappingWithCode(lastMapping, shiftNextLine());
- lastGeneratedLine++;
- lastGeneratedColumn = 0;
- // The remaining code is added without mapping
- } else {
- // There is no new line in between.
- // Associate the code between "lastGeneratedColumn" and
- // "mapping.generatedColumn" with "lastMapping"
- var nextLine = remainingLines[remainingLinesIndex] || '';
- var code = nextLine.substr(0, mapping.generatedColumn -
- lastGeneratedColumn);
- remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn -
- lastGeneratedColumn);
- lastGeneratedColumn = mapping.generatedColumn;
- addMappingWithCode(lastMapping, code);
- // No more remaining code, continue
- lastMapping = mapping;
- return;
- }
- }
- // We add the generated code until the first mapping
- // to the SourceNode without any mapping.
- // Each line is added as separate string.
- while (lastGeneratedLine < mapping.generatedLine) {
- node.add(shiftNextLine());
- lastGeneratedLine++;
- }
- if (lastGeneratedColumn < mapping.generatedColumn) {
- var nextLine = remainingLines[remainingLinesIndex] || '';
- node.add(nextLine.substr(0, mapping.generatedColumn));
- remainingLines[remainingLinesIndex] = nextLine.substr(mapping.generatedColumn);
- lastGeneratedColumn = mapping.generatedColumn;
- }
- lastMapping = mapping;
- }, this);
- // We have processed all mappings.
- if (remainingLinesIndex < remainingLines.length) {
- if (lastMapping) {
- // Associate the remaining code in the current line with "lastMapping"
- addMappingWithCode(lastMapping, shiftNextLine());
- }
- // and add the remaining lines without any mapping
- node.add(remainingLines.splice(remainingLinesIndex).join(""));
- }
-
- // Copy sourcesContent into SourceNode
- aSourceMapConsumer.sources.forEach(function (sourceFile) {
- var content = aSourceMapConsumer.sourceContentFor(sourceFile);
- if (content != null) {
- if (aRelativePath != null) {
- sourceFile = util.join(aRelativePath, sourceFile);
- }
- node.setSourceContent(sourceFile, content);
- }
- });
-
- return node;
-
- function addMappingWithCode(mapping, code) {
- if (mapping === null || mapping.source === undefined) {
- node.add(code);
- } else {
- var source = aRelativePath
- ? util.join(aRelativePath, mapping.source)
- : mapping.source;
- node.add(new SourceNode(mapping.originalLine,
- mapping.originalColumn,
- source,
- code,
- mapping.name));
- }
- }
- };
-
-/**
- * Add a chunk of generated JS to this source node.
- *
- * @param aChunk A string snippet of generated JS code, another instance of
- * SourceNode, or an array where each member is one of those things.
- */
-SourceNode.prototype.add = function SourceNode_add(aChunk) {
- if (Array.isArray(aChunk)) {
- aChunk.forEach(function (chunk) {
- this.add(chunk);
- }, this);
- }
- else if (aChunk[isSourceNode] || typeof aChunk === "string") {
- if (aChunk) {
- this.children.push(aChunk);
- }
- }
- else {
- throw new TypeError(
- "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk
- );
- }
- return this;
-};
-
-/**
- * Add a chunk of generated JS to the beginning of this source node.
- *
- * @param aChunk A string snippet of generated JS code, another instance of
- * SourceNode, or an array where each member is one of those things.
- */
-SourceNode.prototype.prepend = function SourceNode_prepend(aChunk) {
- if (Array.isArray(aChunk)) {
- for (var i = aChunk.length-1; i >= 0; i--) {
- this.prepend(aChunk[i]);
- }
- }
- else if (aChunk[isSourceNode] || typeof aChunk === "string") {
- this.children.unshift(aChunk);
- }
- else {
- throw new TypeError(
- "Expected a SourceNode, string, or an array of SourceNodes and strings. Got " + aChunk
- );
- }
- return this;
-};
-
-/**
- * Walk over the tree of JS snippets in this node and its children. The
- * walking function is called once for each snippet of JS and is passed that
- * snippet and the its original associated source's line/column location.
- *
- * @param aFn The traversal function.
- */
-SourceNode.prototype.walk = function SourceNode_walk(aFn) {
- var chunk;
- for (var i = 0, len = this.children.length; i < len; i++) {
- chunk = this.children[i];
- if (chunk[isSourceNode]) {
- chunk.walk(aFn);
- }
- else {
- if (chunk !== '') {
- aFn(chunk, { source: this.source,
- line: this.line,
- column: this.column,
- name: this.name });
- }
- }
- }
-};
-
-/**
- * Like `String.prototype.join` except for SourceNodes. Inserts `aStr` between
- * each of `this.children`.
- *
- * @param aSep The separator.
- */
-SourceNode.prototype.join = function SourceNode_join(aSep) {
- var newChildren;
- var i;
- var len = this.children.length;
- if (len > 0) {
- newChildren = [];
- for (i = 0; i < len-1; i++) {
- newChildren.push(this.children[i]);
- newChildren.push(aSep);
- }
- newChildren.push(this.children[i]);
- this.children = newChildren;
- }
- return this;
-};
-
-/**
- * Call String.prototype.replace on the very right-most source snippet. Useful
- * for trimming whitespace from the end of a source node, etc.
- *
- * @param aPattern The pattern to replace.
- * @param aReplacement The thing to replace the pattern with.
- */
-SourceNode.prototype.replaceRight = function SourceNode_replaceRight(aPattern, aReplacement) {
- var lastChild = this.children[this.children.length - 1];
- if (lastChild[isSourceNode]) {
- lastChild.replaceRight(aPattern, aReplacement);
- }
- else if (typeof lastChild === 'string') {
- this.children[this.children.length - 1] = lastChild.replace(aPattern, aReplacement);
- }
- else {
- this.children.push(''.replace(aPattern, aReplacement));
- }
- return this;
-};
-
-/**
- * Set the source content for a source file. This will be added to the SourceMapGenerator
- * in the sourcesContent field.
- *
- * @param aSourceFile The filename of the source file
- * @param aSourceContent The content of the source file
- */
-SourceNode.prototype.setSourceContent =
- function SourceNode_setSourceContent(aSourceFile, aSourceContent) {
- this.sourceContents[util.toSetString(aSourceFile)] = aSourceContent;
- };
-
-/**
- * Walk over the tree of SourceNodes. The walking function is called for each
- * source file content and is passed the filename and source content.
- *
- * @param aFn The traversal function.
- */
-SourceNode.prototype.walkSourceContents =
- function SourceNode_walkSourceContents(aFn) {
- for (var i = 0, len = this.children.length; i < len; i++) {
- if (this.children[i][isSourceNode]) {
- this.children[i].walkSourceContents(aFn);
- }
- }
-
- var sources = Object.keys(this.sourceContents);
- for (var i = 0, len = sources.length; i < len; i++) {
- aFn(util.fromSetString(sources[i]), this.sourceContents[sources[i]]);
- }
- };
-
-/**
- * Return the string representation of this source node. Walks over the tree
- * and concatenates all the various snippets together to one string.
- */
-SourceNode.prototype.toString = function SourceNode_toString() {
- var str = "";
- this.walk(function (chunk) {
- str += chunk;
- });
- return str;
-};
-
-/**
- * Returns the string representation of this source node along with a source
- * map.
- */
-SourceNode.prototype.toStringWithSourceMap = function SourceNode_toStringWithSourceMap(aArgs) {
- var generated = {
- code: "",
- line: 1,
- column: 0
- };
- var map = new SourceMapGenerator(aArgs);
- var sourceMappingActive = false;
- var lastOriginalSource = null;
- var lastOriginalLine = null;
- var lastOriginalColumn = null;
- var lastOriginalName = null;
- this.walk(function (chunk, original) {
- generated.code += chunk;
- if (original.source !== null
- && original.line !== null
- && original.column !== null) {
- if(lastOriginalSource !== original.source
- || lastOriginalLine !== original.line
- || lastOriginalColumn !== original.column
- || lastOriginalName !== original.name) {
- map.addMapping({
- source: original.source,
- original: {
- line: original.line,
- column: original.column
- },
- generated: {
- line: generated.line,
- column: generated.column
- },
- name: original.name
- });
- }
- lastOriginalSource = original.source;
- lastOriginalLine = original.line;
- lastOriginalColumn = original.column;
- lastOriginalName = original.name;
- sourceMappingActive = true;
- } else if (sourceMappingActive) {
- map.addMapping({
- generated: {
- line: generated.line,
- column: generated.column
- }
- });
- lastOriginalSource = null;
- sourceMappingActive = false;
- }
- for (var idx = 0, length = chunk.length; idx < length; idx++) {
- if (chunk.charCodeAt(idx) === NEWLINE_CODE) {
- generated.line++;
- generated.column = 0;
- // Mappings end at eol
- if (idx + 1 === length) {
- lastOriginalSource = null;
- sourceMappingActive = false;
- } else if (sourceMappingActive) {
- map.addMapping({
- source: original.source,
- original: {
- line: original.line,
- column: original.column
- },
- generated: {
- line: generated.line,
- column: generated.column
- },
- name: original.name
- });
- }
- } else {
- generated.column++;
- }
- }
- });
- this.walkSourceContents(function (sourceFile, sourceContent) {
- map.setSourceContent(sourceFile, sourceContent);
- });
-
- return { code: generated.code, map: map };
-};
-
-exports.SourceNode = SourceNode;
diff --git a/node_modules/source-map/lib/util.js b/node_modules/source-map/lib/util.js
deleted file mode 100644
index 3ca92e56f2a8cf086821031145ed60c53f8e9ab2..0000000000000000000000000000000000000000
--- a/node_modules/source-map/lib/util.js
+++ /dev/null
@@ -1,488 +0,0 @@
-/* -*- Mode: js; js-indent-level: 2; -*- */
-/*
- * Copyright 2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-
-/**
- * This is a helper function for getting values from parameter/options
- * objects.
- *
- * @param args The object we are extracting values from
- * @param name The name of the property we are getting.
- * @param defaultValue An optional value to return if the property is missing
- * from the object. If this is not specified and the property is missing, an
- * error will be thrown.
- */
-function getArg(aArgs, aName, aDefaultValue) {
- if (aName in aArgs) {
- return aArgs[aName];
- } else if (arguments.length === 3) {
- return aDefaultValue;
- } else {
- throw new Error('"' + aName + '" is a required argument.');
- }
-}
-exports.getArg = getArg;
-
-var urlRegexp = /^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/;
-var dataUrlRegexp = /^data:.+\,.+$/;
-
-function urlParse(aUrl) {
- var match = aUrl.match(urlRegexp);
- if (!match) {
- return null;
- }
- return {
- scheme: match[1],
- auth: match[2],
- host: match[3],
- port: match[4],
- path: match[5]
- };
-}
-exports.urlParse = urlParse;
-
-function urlGenerate(aParsedUrl) {
- var url = '';
- if (aParsedUrl.scheme) {
- url += aParsedUrl.scheme + ':';
- }
- url += '//';
- if (aParsedUrl.auth) {
- url += aParsedUrl.auth + '@';
- }
- if (aParsedUrl.host) {
- url += aParsedUrl.host;
- }
- if (aParsedUrl.port) {
- url += ":" + aParsedUrl.port
- }
- if (aParsedUrl.path) {
- url += aParsedUrl.path;
- }
- return url;
-}
-exports.urlGenerate = urlGenerate;
-
-/**
- * Normalizes a path, or the path portion of a URL:
- *
- * - Replaces consecutive slashes with one slash.
- * - Removes unnecessary '.' parts.
- * - Removes unnecessary '/..' parts.
- *
- * Based on code in the Node.js 'path' core module.
- *
- * @param aPath The path or url to normalize.
- */
-function normalize(aPath) {
- var path = aPath;
- var url = urlParse(aPath);
- if (url) {
- if (!url.path) {
- return aPath;
- }
- path = url.path;
- }
- var isAbsolute = exports.isAbsolute(path);
-
- var parts = path.split(/\/+/);
- for (var part, up = 0, i = parts.length - 1; i >= 0; i--) {
- part = parts[i];
- if (part === '.') {
- parts.splice(i, 1);
- } else if (part === '..') {
- up++;
- } else if (up > 0) {
- if (part === '') {
- // The first part is blank if the path is absolute. Trying to go
- // above the root is a no-op. Therefore we can remove all '..' parts
- // directly after the root.
- parts.splice(i + 1, up);
- up = 0;
- } else {
- parts.splice(i, 2);
- up--;
- }
- }
- }
- path = parts.join('/');
-
- if (path === '') {
- path = isAbsolute ? '/' : '.';
- }
-
- if (url) {
- url.path = path;
- return urlGenerate(url);
- }
- return path;
-}
-exports.normalize = normalize;
-
-/**
- * Joins two paths/URLs.
- *
- * @param aRoot The root path or URL.
- * @param aPath The path or URL to be joined with the root.
- *
- * - If aPath is a URL or a data URI, aPath is returned, unless aPath is a
- * scheme-relative URL: Then the scheme of aRoot, if any, is prepended
- * first.
- * - Otherwise aPath is a path. If aRoot is a URL, then its path portion
- * is updated with the result and aRoot is returned. Otherwise the result
- * is returned.
- * - If aPath is absolute, the result is aPath.
- * - Otherwise the two paths are joined with a slash.
- * - Joining for example 'http://' and 'www.example.com' is also supported.
- */
-function join(aRoot, aPath) {
- if (aRoot === "") {
- aRoot = ".";
- }
- if (aPath === "") {
- aPath = ".";
- }
- var aPathUrl = urlParse(aPath);
- var aRootUrl = urlParse(aRoot);
- if (aRootUrl) {
- aRoot = aRootUrl.path || '/';
- }
-
- // `join(foo, '//www.example.org')`
- if (aPathUrl && !aPathUrl.scheme) {
- if (aRootUrl) {
- aPathUrl.scheme = aRootUrl.scheme;
- }
- return urlGenerate(aPathUrl);
- }
-
- if (aPathUrl || aPath.match(dataUrlRegexp)) {
- return aPath;
- }
-
- // `join('http://', 'www.example.com')`
- if (aRootUrl && !aRootUrl.host && !aRootUrl.path) {
- aRootUrl.host = aPath;
- return urlGenerate(aRootUrl);
- }
-
- var joined = aPath.charAt(0) === '/'
- ? aPath
- : normalize(aRoot.replace(/\/+$/, '') + '/' + aPath);
-
- if (aRootUrl) {
- aRootUrl.path = joined;
- return urlGenerate(aRootUrl);
- }
- return joined;
-}
-exports.join = join;
-
-exports.isAbsolute = function (aPath) {
- return aPath.charAt(0) === '/' || urlRegexp.test(aPath);
-};
-
-/**
- * Make a path relative to a URL or another path.
- *
- * @param aRoot The root path or URL.
- * @param aPath The path or URL to be made relative to aRoot.
- */
-function relative(aRoot, aPath) {
- if (aRoot === "") {
- aRoot = ".";
- }
-
- aRoot = aRoot.replace(/\/$/, '');
-
- // It is possible for the path to be above the root. In this case, simply
- // checking whether the root is a prefix of the path won't work. Instead, we
- // need to remove components from the root one by one, until either we find
- // a prefix that fits, or we run out of components to remove.
- var level = 0;
- while (aPath.indexOf(aRoot + '/') !== 0) {
- var index = aRoot.lastIndexOf("/");
- if (index < 0) {
- return aPath;
- }
-
- // If the only part of the root that is left is the scheme (i.e. http://,
- // file:///, etc.), one or more slashes (/), or simply nothing at all, we
- // have exhausted all components, so the path is not relative to the root.
- aRoot = aRoot.slice(0, index);
- if (aRoot.match(/^([^\/]+:\/)?\/*$/)) {
- return aPath;
- }
-
- ++level;
- }
-
- // Make sure we add a "../" for each component we removed from the root.
- return Array(level + 1).join("../") + aPath.substr(aRoot.length + 1);
-}
-exports.relative = relative;
-
-var supportsNullProto = (function () {
- var obj = Object.create(null);
- return !('__proto__' in obj);
-}());
-
-function identity (s) {
- return s;
-}
-
-/**
- * Because behavior goes wacky when you set `__proto__` on objects, we
- * have to prefix all the strings in our set with an arbitrary character.
- *
- * See https://github.com/mozilla/source-map/pull/31 and
- * https://github.com/mozilla/source-map/issues/30
- *
- * @param String aStr
- */
-function toSetString(aStr) {
- if (isProtoString(aStr)) {
- return '$' + aStr;
- }
-
- return aStr;
-}
-exports.toSetString = supportsNullProto ? identity : toSetString;
-
-function fromSetString(aStr) {
- if (isProtoString(aStr)) {
- return aStr.slice(1);
- }
-
- return aStr;
-}
-exports.fromSetString = supportsNullProto ? identity : fromSetString;
-
-function isProtoString(s) {
- if (!s) {
- return false;
- }
-
- var length = s.length;
-
- if (length < 9 /* "__proto__".length */) {
- return false;
- }
-
- if (s.charCodeAt(length - 1) !== 95 /* '_' */ ||
- s.charCodeAt(length - 2) !== 95 /* '_' */ ||
- s.charCodeAt(length - 3) !== 111 /* 'o' */ ||
- s.charCodeAt(length - 4) !== 116 /* 't' */ ||
- s.charCodeAt(length - 5) !== 111 /* 'o' */ ||
- s.charCodeAt(length - 6) !== 114 /* 'r' */ ||
- s.charCodeAt(length - 7) !== 112 /* 'p' */ ||
- s.charCodeAt(length - 8) !== 95 /* '_' */ ||
- s.charCodeAt(length - 9) !== 95 /* '_' */) {
- return false;
- }
-
- for (var i = length - 10; i >= 0; i--) {
- if (s.charCodeAt(i) !== 36 /* '$' */) {
- return false;
- }
- }
-
- return true;
-}
-
-/**
- * Comparator between two mappings where the original positions are compared.
- *
- * Optionally pass in `true` as `onlyCompareGenerated` to consider two
- * mappings with the same original source/line/column, but different generated
- * line and column the same. Useful when searching for a mapping with a
- * stubbed out mapping.
- */
-function compareByOriginalPositions(mappingA, mappingB, onlyCompareOriginal) {
- var cmp = strcmp(mappingA.source, mappingB.source);
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.originalLine - mappingB.originalLine;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.originalColumn - mappingB.originalColumn;
- if (cmp !== 0 || onlyCompareOriginal) {
- return cmp;
- }
-
- cmp = mappingA.generatedColumn - mappingB.generatedColumn;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.generatedLine - mappingB.generatedLine;
- if (cmp !== 0) {
- return cmp;
- }
-
- return strcmp(mappingA.name, mappingB.name);
-}
-exports.compareByOriginalPositions = compareByOriginalPositions;
-
-/**
- * Comparator between two mappings with deflated source and name indices where
- * the generated positions are compared.
- *
- * Optionally pass in `true` as `onlyCompareGenerated` to consider two
- * mappings with the same generated line and column, but different
- * source/name/original line and column the same. Useful when searching for a
- * mapping with a stubbed out mapping.
- */
-function compareByGeneratedPositionsDeflated(mappingA, mappingB, onlyCompareGenerated) {
- var cmp = mappingA.generatedLine - mappingB.generatedLine;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.generatedColumn - mappingB.generatedColumn;
- if (cmp !== 0 || onlyCompareGenerated) {
- return cmp;
- }
-
- cmp = strcmp(mappingA.source, mappingB.source);
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.originalLine - mappingB.originalLine;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.originalColumn - mappingB.originalColumn;
- if (cmp !== 0) {
- return cmp;
- }
-
- return strcmp(mappingA.name, mappingB.name);
-}
-exports.compareByGeneratedPositionsDeflated = compareByGeneratedPositionsDeflated;
-
-function strcmp(aStr1, aStr2) {
- if (aStr1 === aStr2) {
- return 0;
- }
-
- if (aStr1 === null) {
- return 1; // aStr2 !== null
- }
-
- if (aStr2 === null) {
- return -1; // aStr1 !== null
- }
-
- if (aStr1 > aStr2) {
- return 1;
- }
-
- return -1;
-}
-
-/**
- * Comparator between two mappings with inflated source and name strings where
- * the generated positions are compared.
- */
-function compareByGeneratedPositionsInflated(mappingA, mappingB) {
- var cmp = mappingA.generatedLine - mappingB.generatedLine;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.generatedColumn - mappingB.generatedColumn;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = strcmp(mappingA.source, mappingB.source);
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.originalLine - mappingB.originalLine;
- if (cmp !== 0) {
- return cmp;
- }
-
- cmp = mappingA.originalColumn - mappingB.originalColumn;
- if (cmp !== 0) {
- return cmp;
- }
-
- return strcmp(mappingA.name, mappingB.name);
-}
-exports.compareByGeneratedPositionsInflated = compareByGeneratedPositionsInflated;
-
-/**
- * Strip any JSON XSSI avoidance prefix from the string (as documented
- * in the source maps specification), and then parse the string as
- * JSON.
- */
-function parseSourceMapInput(str) {
- return JSON.parse(str.replace(/^\)]}'[^\n]*\n/, ''));
-}
-exports.parseSourceMapInput = parseSourceMapInput;
-
-/**
- * Compute the URL of a source given the the source root, the source's
- * URL, and the source map's URL.
- */
-function computeSourceURL(sourceRoot, sourceURL, sourceMapURL) {
- sourceURL = sourceURL || '';
-
- if (sourceRoot) {
- // This follows what Chrome does.
- if (sourceRoot[sourceRoot.length - 1] !== '/' && sourceURL[0] !== '/') {
- sourceRoot += '/';
- }
- // The spec says:
- // Line 4: An optional source root, useful for relocating source
- // files on a server or removing repeated values in the
- // “sources” entry. This value is prepended to the individual
- // entries in the “source” field.
- sourceURL = sourceRoot + sourceURL;
- }
-
- // Historically, SourceMapConsumer did not take the sourceMapURL as
- // a parameter. This mode is still somewhat supported, which is why
- // this code block is conditional. However, it's preferable to pass
- // the source map URL to SourceMapConsumer, so that this function
- // can implement the source URL resolution algorithm as outlined in
- // the spec. This block is basically the equivalent of:
- // new URL(sourceURL, sourceMapURL).toString()
- // ... except it avoids using URL, which wasn't available in the
- // older releases of node still supported by this library.
- //
- // The spec says:
- // If the sources are not absolute URLs after prepending of the
- // “sourceRoot”, the sources are resolved relative to the
- // SourceMap (like resolving script src in a html document).
- if (sourceMapURL) {
- var parsed = urlParse(sourceMapURL);
- if (!parsed) {
- throw new Error("sourceMapURL could not be parsed");
- }
- if (parsed.path) {
- // Strip the last path component, but keep the "/".
- var index = parsed.path.lastIndexOf('/');
- if (index >= 0) {
- parsed.path = parsed.path.substring(0, index + 1);
- }
- }
- sourceURL = join(urlGenerate(parsed), sourceURL);
- }
-
- return normalize(sourceURL);
-}
-exports.computeSourceURL = computeSourceURL;
diff --git a/node_modules/source-map/package.json b/node_modules/source-map/package.json
deleted file mode 100644
index ac311324bfea0b8d84bcf522ff79aefbc783b3ea..0000000000000000000000000000000000000000
--- a/node_modules/source-map/package.json
+++ /dev/null
@@ -1,264 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "source-map",
- "raw": "source-map@^0.6.1",
- "rawSpec": "^0.6.1",
- "scope": null,
- "spec": ">=0.6.1 <0.7.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-with-sourcemaps"
- ]
- ],
- "_from": "source-map@>=0.6.1 <0.7.0",
- "_id": "source-map@0.6.1",
- "_inCache": true,
- "_installable": true,
- "_location": "/source-map",
- "_nodeVersion": "8.4.0",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/source-map-0.6.1.tgz_1506696150821_0.6614652345888317"
- },
- "_npmUser": {
- "email": "tom@tromey.com",
- "name": "tromey"
- },
- "_npmVersion": "5.3.0",
- "_phantomChildren": {},
- "_requested": {
- "name": "source-map",
- "raw": "source-map@^0.6.1",
- "rawSpec": "^0.6.1",
- "scope": null,
- "spec": ">=0.6.1 <0.7.0",
- "type": "range"
- },
- "_requiredBy": [
- "/concat-with-sourcemaps"
- ],
- "_resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "_shasum": "74722af32e9614e9c287a8d0bbde48b5e2f1a263",
- "_shrinkwrap": null,
- "_spec": "source-map@^0.6.1",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-with-sourcemaps",
- "author": {
- "email": "nfitzgerald@mozilla.com",
- "name": "Nick Fitzgerald"
- },
- "bugs": {
- "url": "https://github.com/mozilla/source-map/issues"
- },
- "contributors": [
- {
- "email": "tobias.koppers@googlemail.com",
- "name": "Tobias Koppers"
- },
- {
- "email": "duncan@dweebd.com",
- "name": "Duncan Beevers"
- },
- {
- "email": "scrane@mozilla.com",
- "name": "Stephen Crane"
- },
- {
- "email": "seddon.ryan@gmail.com",
- "name": "Ryan Seddon"
- },
- {
- "email": "miles.elam@deem.com",
- "name": "Miles Elam"
- },
- {
- "email": "mihai.bazon@gmail.com",
- "name": "Mihai Bazon"
- },
- {
- "email": "github.public.email@michael.ficarra.me",
- "name": "Michael Ficarra"
- },
- {
- "email": "todd@twolfson.com",
- "name": "Todd Wolfson"
- },
- {
- "email": "alexander@solovyov.net",
- "name": "Alexander Solovyov"
- },
- {
- "email": "fgnass@gmail.com",
- "name": "Felix Gnass"
- },
- {
- "email": "conrad.irwin@gmail.com",
- "name": "Conrad Irwin"
- },
- {
- "email": "usrbincc@yahoo.com",
- "name": "usrbincc"
- },
- {
- "email": "glasser@davidglasser.net",
- "name": "David Glasser"
- },
- {
- "email": "chase@newrelic.com",
- "name": "Chase Douglas"
- },
- {
- "email": "evan.exe@gmail.com",
- "name": "Evan Wallace"
- },
- {
- "email": "fayearthur@gmail.com",
- "name": "Heather Arthur"
- },
- {
- "email": "hughskennedy@gmail.com",
- "name": "Hugh Kennedy"
- },
- {
- "email": "glasser@davidglasser.net",
- "name": "David Glasser"
- },
- {
- "email": "simon.lydell@gmail.com",
- "name": "Simon Lydell"
- },
- {
- "email": "jellyes2@gmail.com",
- "name": "Jmeas Smith"
- },
- {
- "email": "mzgoddard@gmail.com",
- "name": "Michael Z Goddard"
- },
- {
- "email": "azu@users.noreply.github.com",
- "name": "azu"
- },
- {
- "email": "john@gozde.ca",
- "name": "John Gozde"
- },
- {
- "email": "akirkton@truefitinnovation.com",
- "name": "Adam Kirkton"
- },
- {
- "email": "christopher.montgomery@dowjones.com",
- "name": "Chris Montgomery"
- },
- {
- "email": "jryans@gmail.com",
- "name": "J. Ryan Stinnett"
- },
- {
- "email": "jherrington@walmartlabs.com",
- "name": "Jack Herrington"
- },
- {
- "email": "jeffpalentine@gmail.com",
- "name": "Chris Truter"
- },
- {
- "email": "daniel@danielespeset.com",
- "name": "Daniel Espeset"
- },
- {
- "email": "jamie.lf.wong@gmail.com",
- "name": "Jamie Wong"
- },
- {
- "email": "ejpbruel@mozilla.com",
- "name": "Eddy Bruël"
- },
- {
- "email": "hawkrives@gmail.com",
- "name": "Hawken Rives"
- },
- {
- "email": "giladp007@gmail.com",
- "name": "Gilad Peleg"
- },
- {
- "email": "djchie.dev@gmail.com",
- "name": "djchie"
- },
- {
- "email": "garysye@gmail.com",
- "name": "Gary Ye"
- },
- {
- "email": "nicolas.lalevee@hibnet.org",
- "name": "Nicolas Lalevée"
- }
- ],
- "dependencies": {},
- "description": "Generates and consumes source maps",
- "devDependencies": {
- "doctoc": "^0.15.0",
- "webpack": "^1.12.0"
- },
- "directories": {},
- "dist": {
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
- "shasum": "74722af32e9614e9c287a8d0bbde48b5e2f1a263",
- "tarball": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz"
- },
- "engines": {
- "node": ">=0.10.0"
- },
- "files": [
- "source-map.js",
- "source-map.d.ts",
- "lib/",
- "dist/source-map.debug.js",
- "dist/source-map.js",
- "dist/source-map.min.js",
- "dist/source-map.min.js.map"
- ],
- "gitHead": "ac518d2f21818146f3310557bd51c13d8cff2ba8",
- "homepage": "https://github.com/mozilla/source-map",
- "license": "BSD-3-Clause",
- "main": "./source-map.js",
- "maintainers": [
- {
- "email": "tom@tromey.com",
- "name": "tromey"
- },
- {
- "email": "ejpbruel@gmail.com",
- "name": "ejpbruel"
- },
- {
- "email": "nfitzgerald@mozilla.com",
- "name": "mozilla-devtools"
- },
- {
- "email": "dherman@mozilla.com",
- "name": "mozilla"
- },
- {
- "email": "fitzgen@gmail.com",
- "name": "nickfitzgerald"
- }
- ],
- "name": "source-map",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git+ssh://git@github.com/mozilla/source-map.git"
- },
- "scripts": {
- "build": "webpack --color",
- "test": "npm run build && node test/run-tests.js",
- "toc": "doctoc --title '## Table of Contents' README.md && doctoc --title '## Table of Contents' CONTRIBUTING.md"
- },
- "typings": "source-map",
- "version": "0.6.1"
-}
diff --git a/node_modules/source-map/source-map.d.ts b/node_modules/source-map/source-map.d.ts
deleted file mode 100644
index 8f972b0cfbf389901ed4392a635f91d5cf2bf0e0..0000000000000000000000000000000000000000
--- a/node_modules/source-map/source-map.d.ts
+++ /dev/null
@@ -1,98 +0,0 @@
-export interface StartOfSourceMap {
- file?: string;
- sourceRoot?: string;
-}
-
-export interface RawSourceMap extends StartOfSourceMap {
- version: string;
- sources: string[];
- names: string[];
- sourcesContent?: string[];
- mappings: string;
-}
-
-export interface Position {
- line: number;
- column: number;
-}
-
-export interface LineRange extends Position {
- lastColumn: number;
-}
-
-export interface FindPosition extends Position {
- // SourceMapConsumer.GREATEST_LOWER_BOUND or SourceMapConsumer.LEAST_UPPER_BOUND
- bias?: number;
-}
-
-export interface SourceFindPosition extends FindPosition {
- source: string;
-}
-
-export interface MappedPosition extends Position {
- source: string;
- name?: string;
-}
-
-export interface MappingItem {
- source: string;
- generatedLine: number;
- generatedColumn: number;
- originalLine: number;
- originalColumn: number;
- name: string;
-}
-
-export class SourceMapConsumer {
- static GENERATED_ORDER: number;
- static ORIGINAL_ORDER: number;
-
- static GREATEST_LOWER_BOUND: number;
- static LEAST_UPPER_BOUND: number;
-
- constructor(rawSourceMap: RawSourceMap);
- computeColumnSpans(): void;
- originalPositionFor(generatedPosition: FindPosition): MappedPosition;
- generatedPositionFor(originalPosition: SourceFindPosition): LineRange;
- allGeneratedPositionsFor(originalPosition: MappedPosition): Position[];
- hasContentsOfAllSources(): boolean;
- sourceContentFor(source: string, returnNullOnMissing?: boolean): string;
- eachMapping(callback: (mapping: MappingItem) => void, context?: any, order?: number): void;
-}
-
-export interface Mapping {
- generated: Position;
- original: Position;
- source: string;
- name?: string;
-}
-
-export class SourceMapGenerator {
- constructor(startOfSourceMap?: StartOfSourceMap);
- static fromSourceMap(sourceMapConsumer: SourceMapConsumer): SourceMapGenerator;
- addMapping(mapping: Mapping): void;
- setSourceContent(sourceFile: string, sourceContent: string): void;
- applySourceMap(sourceMapConsumer: SourceMapConsumer, sourceFile?: string, sourceMapPath?: string): void;
- toString(): string;
-}
-
-export interface CodeWithSourceMap {
- code: string;
- map: SourceMapGenerator;
-}
-
-export class SourceNode {
- constructor();
- constructor(line: number, column: number, source: string);
- constructor(line: number, column: number, source: string, chunk?: string, name?: string);
- static fromStringWithSourceMap(code: string, sourceMapConsumer: SourceMapConsumer, relativePath?: string): SourceNode;
- add(chunk: string): void;
- prepend(chunk: string): void;
- setSourceContent(sourceFile: string, sourceContent: string): void;
- walk(fn: (chunk: string, mapping: MappedPosition) => void): void;
- walkSourceContents(fn: (file: string, content: string) => void): void;
- join(sep: string): SourceNode;
- replaceRight(pattern: string, replacement: string): SourceNode;
- toString(): string;
- toStringWithSourceMap(startOfSourceMap?: StartOfSourceMap): CodeWithSourceMap;
-}
diff --git a/node_modules/source-map/source-map.js b/node_modules/source-map/source-map.js
deleted file mode 100644
index bc88fe820c87a217d27eb010281fe39b71163835..0000000000000000000000000000000000000000
--- a/node_modules/source-map/source-map.js
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
- * Copyright 2009-2011 Mozilla Foundation and contributors
- * Licensed under the New BSD license. See LICENSE.txt or:
- * http://opensource.org/licenses/BSD-3-Clause
- */
-exports.SourceMapGenerator = require('./lib/source-map-generator').SourceMapGenerator;
-exports.SourceMapConsumer = require('./lib/source-map-consumer').SourceMapConsumer;
-exports.SourceNode = require('./lib/source-node').SourceNode;
diff --git a/node_modules/sprintf-js/.npmignore b/node_modules/sprintf-js/.npmignore
deleted file mode 100644
index 096746c1480d8f93be244a3d71f0594b072092d1..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/.npmignore
+++ /dev/null
@@ -1 +0,0 @@
-/node_modules/
\ No newline at end of file
diff --git a/node_modules/sprintf-js/LICENSE b/node_modules/sprintf-js/LICENSE
deleted file mode 100644
index 663ac52e4d8cd9eaf2a25080f3d0545f798b877c..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-Copyright (c) 2007-2014, Alexandru Marasteanu
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-* Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-* Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-* Neither the name of this software nor the names of its contributors may be
- used to endorse or promote products derived from this software without
- specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/node_modules/sprintf-js/README.md b/node_modules/sprintf-js/README.md
deleted file mode 100644
index 83863561b29199c421df29daa9e798b3b940f246..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/README.md
+++ /dev/null
@@ -1,88 +0,0 @@
-# sprintf.js
-**sprintf.js** is a complete open source JavaScript sprintf implementation for the *browser* and *node.js*.
-
-Its prototype is simple:
-
- string sprintf(string format , [mixed arg1 [, mixed arg2 [ ,...]]])
-
-The placeholders in the format string are marked by `%` and are followed by one or more of these elements, in this order:
-
-* An optional number followed by a `$` sign that selects which argument index to use for the value. If not specified, arguments will be placed in the same order as the placeholders in the input string.
-* An optional `+` sign that forces to preceed the result with a plus or minus sign on numeric values. By default, only the `-` sign is used on negative numbers.
-* An optional padding specifier that says what character to use for padding (if specified). Possible values are `0` or any other character precedeed by a `'` (single quote). The default is to pad with *spaces*.
-* An optional `-` sign, that causes sprintf to left-align the result of this placeholder. The default is to right-align the result.
-* An optional number, that says how many characters the result should have. If the value to be returned is shorter than this number, the result will be padded. When used with the `j` (JSON) type specifier, the padding length specifies the tab size used for indentation.
-* An optional precision modifier, consisting of a `.` (dot) followed by a number, that says how many digits should be displayed for floating point numbers. When used with the `g` type specifier, it specifies the number of significant digits. When used on a string, it causes the result to be truncated.
-* A type specifier that can be any of:
- * `%` — yields a literal `%` character
- * `b` — yields an integer as a binary number
- * `c` — yields an integer as the character with that ASCII value
- * `d` or `i` — yields an integer as a signed decimal number
- * `e` — yields a float using scientific notation
- * `u` — yields an integer as an unsigned decimal number
- * `f` — yields a float as is; see notes on precision above
- * `g` — yields a float as is; see notes on precision above
- * `o` — yields an integer as an octal number
- * `s` — yields a string as is
- * `x` — yields an integer as a hexadecimal number (lower-case)
- * `X` — yields an integer as a hexadecimal number (upper-case)
- * `j` — yields a JavaScript object or array as a JSON encoded string
-
-## JavaScript `vsprintf`
-`vsprintf` is the same as `sprintf` except that it accepts an array of arguments, rather than a variable number of arguments:
-
- vsprintf("The first 4 letters of the english alphabet are: %s, %s, %s and %s", ["a", "b", "c", "d"])
-
-## Argument swapping
-You can also swap the arguments. That is, the order of the placeholders doesn't have to match the order of the arguments. You can do that by simply indicating in the format string which arguments the placeholders refer to:
-
- sprintf("%2$s %3$s a %1$s", "cracker", "Polly", "wants")
-And, of course, you can repeat the placeholders without having to increase the number of arguments.
-
-## Named arguments
-Format strings may contain replacement fields rather than positional placeholders. Instead of referring to a certain argument, you can now refer to a certain key within an object. Replacement fields are surrounded by rounded parentheses - `(` and `)` - and begin with a keyword that refers to a key:
-
- var user = {
- name: "Dolly"
- }
- sprintf("Hello %(name)s", user) // Hello Dolly
-Keywords in replacement fields can be optionally followed by any number of keywords or indexes:
-
- var users = [
- {name: "Dolly"},
- {name: "Molly"},
- {name: "Polly"}
- ]
- sprintf("Hello %(users[0].name)s, %(users[1].name)s and %(users[2].name)s", {users: users}) // Hello Dolly, Molly and Polly
-Note: mixing positional and named placeholders is not (yet) supported
-
-## Computed values
-You can pass in a function as a dynamic value and it will be invoked (with no arguments) in order to compute the value on-the-fly.
-
- sprintf("Current timestamp: %d", Date.now) // Current timestamp: 1398005382890
- sprintf("Current date and time: %s", function() { return new Date().toString() })
-
-# AngularJS
-You can now use `sprintf` and `vsprintf` (also aliased as `fmt` and `vfmt` respectively) in your AngularJS projects. See `demo/`.
-
-# Installation
-
-## Via Bower
-
- bower install sprintf
-
-## Or as a node.js module
-
- npm install sprintf-js
-
-### Usage
-
- var sprintf = require("sprintf-js").sprintf,
- vsprintf = require("sprintf-js").vsprintf
-
- sprintf("%2$s %3$s a %1$s", "cracker", "Polly", "wants")
- vsprintf("The first 4 letters of the english alphabet are: %s, %s, %s and %s", ["a", "b", "c", "d"])
-
-# License
-
-**sprintf.js** is licensed under the terms of the 3-clause BSD license.
diff --git a/node_modules/sprintf-js/bower.json b/node_modules/sprintf-js/bower.json
deleted file mode 100644
index d90a75989f7b056e912c5d3dc87a9bf858ecf5d6..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/bower.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "name": "sprintf",
- "description": "JavaScript sprintf implementation",
- "version": "1.0.3",
- "main": "src/sprintf.js",
- "license": "BSD-3-Clause-Clear",
- "keywords": ["sprintf", "string", "formatting"],
- "authors": ["Alexandru Marasteanu (http://alexei.ro/)"],
- "homepage": "https://github.com/alexei/sprintf.js",
- "repository": {
- "type": "git",
- "url": "git://github.com/alexei/sprintf.js.git"
- }
-}
diff --git a/node_modules/sprintf-js/demo/angular.html b/node_modules/sprintf-js/demo/angular.html
deleted file mode 100644
index 3559efd7635634a52746f717186318787bf4fa72..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/demo/angular.html
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
-
-
-
-
-
- {{ "%+010d"|sprintf:-123 }}
- {{ "%+010d"|vsprintf:[-123] }}
- {{ "%+010d"|fmt:-123 }}
- {{ "%+010d"|vfmt:[-123] }}
- {{ "I've got %2$d apples and %1$d oranges."|fmt:4:2 }}
- {{ "I've got %(apples)d apples and %(oranges)d oranges."|fmt:{apples: 2, oranges: 4} }}
-
-
-
-
diff --git a/node_modules/sprintf-js/dist/angular-sprintf.min.js b/node_modules/sprintf-js/dist/angular-sprintf.min.js
deleted file mode 100644
index dbaf744d83c21455f271676cd9ec0e64a0cc1ec5..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/dist/angular-sprintf.min.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/*! sprintf-js | Alexandru Marasteanu (http://alexei.ro/) | BSD-3-Clause */
-
-angular.module("sprintf",[]).filter("sprintf",function(){return function(){return sprintf.apply(null,arguments)}}).filter("fmt",["$filter",function(a){return a("sprintf")}]).filter("vsprintf",function(){return function(a,b){return vsprintf(a,b)}}).filter("vfmt",["$filter",function(a){return a("vsprintf")}]);
-//# sourceMappingURL=angular-sprintf.min.map
\ No newline at end of file
diff --git a/node_modules/sprintf-js/dist/angular-sprintf.min.js.map b/node_modules/sprintf-js/dist/angular-sprintf.min.js.map
deleted file mode 100644
index 055964c624c1aca1629d60f3ab8a0b5e02a8e798..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/dist/angular-sprintf.min.js.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"angular-sprintf.min.js","sources":["../src/angular-sprintf.js"],"names":["angular","module","filter","sprintf","apply","arguments","$filter","format","argv","vsprintf"],"mappings":";;AAAAA,QACIC,OAAO,cACPC,OAAO,UAAW,WACd,MAAO,YACH,MAAOC,SAAQC,MAAM,KAAMC,cAGnCH,OAAO,OAAQ,UAAW,SAASI,GAC/B,MAAOA,GAAQ,cAEnBJ,OAAO,WAAY,WACf,MAAO,UAASK,EAAQC,GACpB,MAAOC,UAASF,EAAQC,MAGhCN,OAAO,QAAS,UAAW,SAASI,GAChC,MAAOA,GAAQ"}
\ No newline at end of file
diff --git a/node_modules/sprintf-js/dist/angular-sprintf.min.map b/node_modules/sprintf-js/dist/angular-sprintf.min.map
deleted file mode 100644
index 055964c624c1aca1629d60f3ab8a0b5e02a8e798..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/dist/angular-sprintf.min.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"angular-sprintf.min.js","sources":["../src/angular-sprintf.js"],"names":["angular","module","filter","sprintf","apply","arguments","$filter","format","argv","vsprintf"],"mappings":";;AAAAA,QACIC,OAAO,cACPC,OAAO,UAAW,WACd,MAAO,YACH,MAAOC,SAAQC,MAAM,KAAMC,cAGnCH,OAAO,OAAQ,UAAW,SAASI,GAC/B,MAAOA,GAAQ,cAEnBJ,OAAO,WAAY,WACf,MAAO,UAASK,EAAQC,GACpB,MAAOC,UAASF,EAAQC,MAGhCN,OAAO,QAAS,UAAW,SAASI,GAChC,MAAOA,GAAQ"}
\ No newline at end of file
diff --git a/node_modules/sprintf-js/dist/sprintf.min.js b/node_modules/sprintf-js/dist/sprintf.min.js
deleted file mode 100644
index dc61e51add29bb9904898831d4bcc44acc6267a9..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/dist/sprintf.min.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/*! sprintf-js | Alexandru Marasteanu (http://alexei.ro/) | BSD-3-Clause */
-
-!function(a){function b(){var a=arguments[0],c=b.cache;return c[a]&&c.hasOwnProperty(a)||(c[a]=b.parse(a)),b.format.call(null,c[a],arguments)}function c(a){return Object.prototype.toString.call(a).slice(8,-1).toLowerCase()}function d(a,b){return Array(b+1).join(a)}var e={not_string:/[^s]/,number:/[diefg]/,json:/[j]/,not_json:/[^j]/,text:/^[^\x25]+/,modulo:/^\x25{2}/,placeholder:/^\x25(?:([1-9]\d*)\$|\(([^\)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-gijosuxX])/,key:/^([a-z_][a-z_\d]*)/i,key_access:/^\.([a-z_][a-z_\d]*)/i,index_access:/^\[(\d+)\]/,sign:/^[\+\-]/};b.format=function(a,f){var g,h,i,j,k,l,m,n=1,o=a.length,p="",q=[],r=!0,s="";for(h=0;o>h;h++)if(p=c(a[h]),"string"===p)q[q.length]=a[h];else if("array"===p){if(j=a[h],j[2])for(g=f[n],i=0;i=0),j[8]){case"b":g=g.toString(2);break;case"c":g=String.fromCharCode(g);break;case"d":case"i":g=parseInt(g,10);break;case"j":g=JSON.stringify(g,null,j[6]?parseInt(j[6]):0);break;case"e":g=j[7]?g.toExponential(j[7]):g.toExponential();break;case"f":g=j[7]?parseFloat(g).toFixed(j[7]):parseFloat(g);break;case"g":g=j[7]?parseFloat(g).toPrecision(j[7]):parseFloat(g);break;case"o":g=g.toString(8);break;case"s":g=(g=String(g))&&j[7]?g.substring(0,j[7]):g;break;case"u":g>>>=0;break;case"x":g=g.toString(16);break;case"X":g=g.toString(16).toUpperCase()}e.json.test(j[8])?q[q.length]=g:(!e.number.test(j[8])||r&&!j[3]?s="":(s=r?"+":"-",g=g.toString().replace(e.sign,"")),l=j[4]?"0"===j[4]?"0":j[4].charAt(1):" ",m=j[6]-(s+g).length,k=j[6]&&m>0?d(l,m):"",q[q.length]=j[5]?s+g+k:"0"===l?s+k+g:k+s+g)}return q.join("")},b.cache={},b.parse=function(a){for(var b=a,c=[],d=[],f=0;b;){if(null!==(c=e.text.exec(b)))d[d.length]=c[0];else if(null!==(c=e.modulo.exec(b)))d[d.length]="%";else{if(null===(c=e.placeholder.exec(b)))throw new SyntaxError("[sprintf] unexpected placeholder");if(c[2]){f|=1;var g=[],h=c[2],i=[];if(null===(i=e.key.exec(h)))throw new SyntaxError("[sprintf] failed to parse named argument key");for(g[g.length]=i[1];""!==(h=h.substring(i[0].length));)if(null!==(i=e.key_access.exec(h)))g[g.length]=i[1];else{if(null===(i=e.index_access.exec(h)))throw new SyntaxError("[sprintf] failed to parse named argument key");g[g.length]=i[1]}c[2]=g}else f|=2;if(3===f)throw new Error("[sprintf] mixing positional and named placeholders is not (yet) supported");d[d.length]=c}b=b.substring(c[0].length)}return d};var f=function(a,c,d){return d=(c||[]).slice(0),d.splice(0,0,a),b.apply(null,d)};"undefined"!=typeof exports?(exports.sprintf=b,exports.vsprintf=f):(a.sprintf=b,a.vsprintf=f,"function"==typeof define&&define.amd&&define(function(){return{sprintf:b,vsprintf:f}}))}("undefined"==typeof window?this:window);
-//# sourceMappingURL=sprintf.min.map
\ No newline at end of file
diff --git a/node_modules/sprintf-js/dist/sprintf.min.js.map b/node_modules/sprintf-js/dist/sprintf.min.js.map
deleted file mode 100644
index 369dbafab157ae8a3a63e7d03620eda49455b22c..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/dist/sprintf.min.js.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"sprintf.min.js","sources":["../src/sprintf.js"],"names":["window","sprintf","key","arguments","cache","hasOwnProperty","parse","format","call","get_type","variable","Object","prototype","toString","slice","toLowerCase","str_repeat","input","multiplier","Array","join","re","not_string","number","json","not_json","text","modulo","placeholder","key_access","index_access","sign","parse_tree","argv","arg","i","k","match","pad","pad_character","pad_length","cursor","tree_length","length","node_type","output","is_positive","Error","test","isNaN","TypeError","String","fromCharCode","parseInt","JSON","stringify","toExponential","parseFloat","toFixed","substring","toUpperCase","replace","charAt","fmt","_fmt","arg_names","exec","SyntaxError","field_list","replacement_field","field_match","vsprintf","_argv","splice","apply","exports","define","amd","this"],"mappings":";;CAAA,SAAUA,GAeN,QAASC,KACL,GAAIC,GAAMC,UAAU,GAAIC,EAAQH,EAAQG,KAIxC,OAHMA,GAAMF,IAAQE,EAAMC,eAAeH,KACrCE,EAAMF,GAAOD,EAAQK,MAAMJ,IAExBD,EAAQM,OAAOC,KAAK,KAAMJ,EAAMF,GAAMC,WA4JjD,QAASM,GAASC,GACd,MAAOC,QAAOC,UAAUC,SAASL,KAAKE,GAAUI,MAAM,EAAG,IAAIC,cAGjE,QAASC,GAAWC,EAAOC,GACvB,MAAOC,OAAMD,EAAa,GAAGE,KAAKH,GApLtC,GAAII,IACAC,WAAY,OACZC,OAAQ,SACRC,KAAM,MACNC,SAAU,OACVC,KAAM,YACNC,OAAQ,WACRC,YAAa,yFACb1B,IAAK,sBACL2B,WAAY,wBACZC,aAAc,aACdC,KAAM,UAWV9B,GAAQM,OAAS,SAASyB,EAAYC,GAClC,GAAiEC,GAAkBC,EAAGC,EAAGC,EAAOC,EAAKC,EAAeC,EAAhHC,EAAS,EAAGC,EAAcV,EAAWW,OAAQC,EAAY,GAASC,KAA0DC,GAAc,EAAMf,EAAO,EAC3J,KAAKI,EAAI,EAAOO,EAAJP,EAAiBA,IAEzB,GADAS,EAAYnC,EAASuB,EAAWG,IACd,WAAdS,EACAC,EAAOA,EAAOF,QAAUX,EAAWG,OAElC,IAAkB,UAAdS,EAAuB,CAE5B,GADAP,EAAQL,EAAWG,GACfE,EAAM,GAEN,IADAH,EAAMD,EAAKQ,GACNL,EAAI,EAAGA,EAAIC,EAAM,GAAGM,OAAQP,IAAK,CAClC,IAAKF,EAAI7B,eAAegC,EAAM,GAAGD,IAC7B,KAAM,IAAIW,OAAM9C,EAAQ,yCAA0CoC,EAAM,GAAGD,IAE/EF,GAAMA,EAAIG,EAAM,GAAGD,QAIvBF,GADKG,EAAM,GACLJ,EAAKI,EAAM,IAGXJ,EAAKQ,IAOf,IAJqB,YAAjBhC,EAASyB,KACTA,EAAMA,KAGNb,EAAGC,WAAW0B,KAAKX,EAAM,KAAOhB,EAAGI,SAASuB,KAAKX,EAAM,KAAyB,UAAjB5B,EAASyB,IAAoBe,MAAMf,GAClG,KAAM,IAAIgB,WAAUjD,EAAQ,0CAA2CQ,EAASyB,IAOpF,QAJIb,EAAGE,OAAOyB,KAAKX,EAAM,MACrBS,EAAcZ,GAAO,GAGjBG,EAAM,IACV,IAAK,IACDH,EAAMA,EAAIrB,SAAS,EACvB,MACA,KAAK,IACDqB,EAAMiB,OAAOC,aAAalB,EAC9B,MACA,KAAK,IACL,IAAK,IACDA,EAAMmB,SAASnB,EAAK,GACxB,MACA,KAAK,IACDA,EAAMoB,KAAKC,UAAUrB,EAAK,KAAMG,EAAM,GAAKgB,SAAShB,EAAM,IAAM,EACpE,MACA,KAAK,IACDH,EAAMG,EAAM,GAAKH,EAAIsB,cAAcnB,EAAM,IAAMH,EAAIsB,eACvD,MACA,KAAK,IACDtB,EAAMG,EAAM,GAAKoB,WAAWvB,GAAKwB,QAAQrB,EAAM,IAAMoB,WAAWvB,EACpE,MACA,KAAK,IACDA,EAAMA,EAAIrB,SAAS,EACvB,MACA,KAAK,IACDqB,GAAQA,EAAMiB,OAAOjB,KAASG,EAAM,GAAKH,EAAIyB,UAAU,EAAGtB,EAAM,IAAMH,CAC1E,MACA,KAAK,IACDA,KAAc,CAClB,MACA,KAAK,IACDA,EAAMA,EAAIrB,SAAS,GACvB,MACA,KAAK,IACDqB,EAAMA,EAAIrB,SAAS,IAAI+C,cAG3BvC,EAAGG,KAAKwB,KAAKX,EAAM,IACnBQ,EAAOA,EAAOF,QAAUT,IAGpBb,EAAGE,OAAOyB,KAAKX,EAAM,KAASS,IAAeT,EAAM,GAKnDN,EAAO,IAJPA,EAAOe,EAAc,IAAM,IAC3BZ,EAAMA,EAAIrB,WAAWgD,QAAQxC,EAAGU,KAAM,KAK1CQ,EAAgBF,EAAM,GAAkB,MAAbA,EAAM,GAAa,IAAMA,EAAM,GAAGyB,OAAO,GAAK,IACzEtB,EAAaH,EAAM,IAAMN,EAAOG,GAAKS,OACrCL,EAAMD,EAAM,IAAMG,EAAa,EAAIxB,EAAWuB,EAAeC,GAAoB,GACjFK,EAAOA,EAAOF,QAAUN,EAAM,GAAKN,EAAOG,EAAMI,EAAyB,MAAlBC,EAAwBR,EAAOO,EAAMJ,EAAMI,EAAMP,EAAOG,GAI3H,MAAOW,GAAOzB,KAAK,KAGvBnB,EAAQG,SAERH,EAAQK,MAAQ,SAASyD,GAErB,IADA,GAAIC,GAAOD,EAAK1B,KAAYL,KAAiBiC,EAAY,EAClDD,GAAM,CACT,GAAqC,QAAhC3B,EAAQhB,EAAGK,KAAKwC,KAAKF,IACtBhC,EAAWA,EAAWW,QAAUN,EAAM,OAErC,IAAuC,QAAlCA,EAAQhB,EAAGM,OAAOuC,KAAKF,IAC7BhC,EAAWA,EAAWW,QAAU,QAE/B,CAAA,GAA4C,QAAvCN,EAAQhB,EAAGO,YAAYsC,KAAKF,IAgClC,KAAM,IAAIG,aAAY,mCA/BtB,IAAI9B,EAAM,GAAI,CACV4B,GAAa,CACb,IAAIG,MAAiBC,EAAoBhC,EAAM,GAAIiC,IACnD,IAAuD,QAAlDA,EAAcjD,EAAGnB,IAAIgE,KAAKG,IAe3B,KAAM,IAAIF,aAAY,+CAbtB,KADAC,EAAWA,EAAWzB,QAAU2B,EAAY,GACwC,MAA5ED,EAAoBA,EAAkBV,UAAUW,EAAY,GAAG3B,UACnE,GAA8D,QAAzD2B,EAAcjD,EAAGQ,WAAWqC,KAAKG,IAClCD,EAAWA,EAAWzB,QAAU2B,EAAY,OAE3C,CAAA,GAAgE,QAA3DA,EAAcjD,EAAGS,aAAaoC,KAAKG,IAIzC,KAAM,IAAIF,aAAY,+CAHtBC,GAAWA,EAAWzB,QAAU2B,EAAY,GAUxDjC,EAAM,GAAK+B,MAGXH,IAAa,CAEjB,IAAkB,IAAdA,EACA,KAAM,IAAIlB,OAAM,4EAEpBf,GAAWA,EAAWW,QAAUN,EAKpC2B,EAAOA,EAAKL,UAAUtB,EAAM,GAAGM,QAEnC,MAAOX,GAGX,IAAIuC,GAAW,SAASR,EAAK9B,EAAMuC,GAG/B,MAFAA,IAASvC,OAAYnB,MAAM,GAC3B0D,EAAMC,OAAO,EAAG,EAAGV,GACZ9D,EAAQyE,MAAM,KAAMF,GAiBR,oBAAZG,UACPA,QAAQ1E,QAAUA,EAClB0E,QAAQJ,SAAWA,IAGnBvE,EAAOC,QAAUA,EACjBD,EAAOuE,SAAWA,EAEI,kBAAXK,SAAyBA,OAAOC,KACvCD,OAAO,WACH,OACI3E,QAASA,EACTsE,SAAUA,OAKT,mBAAXvE,QAAyB8E,KAAO9E"}
\ No newline at end of file
diff --git a/node_modules/sprintf-js/dist/sprintf.min.map b/node_modules/sprintf-js/dist/sprintf.min.map
deleted file mode 100644
index ee011aaa5aa56fb871e527f7579f34cb02bcf425..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/dist/sprintf.min.map
+++ /dev/null
@@ -1 +0,0 @@
-{"version":3,"file":"sprintf.min.js","sources":["../src/sprintf.js"],"names":["window","sprintf","key","arguments","cache","hasOwnProperty","parse","format","call","get_type","variable","Object","prototype","toString","slice","toLowerCase","str_repeat","input","multiplier","Array","join","re","not_string","number","json","not_json","text","modulo","placeholder","key_access","index_access","sign","parse_tree","argv","arg","i","k","match","pad","pad_character","pad_length","cursor","tree_length","length","node_type","output","is_positive","Error","test","isNaN","TypeError","String","fromCharCode","parseInt","JSON","stringify","toExponential","parseFloat","toFixed","toPrecision","substring","toUpperCase","replace","charAt","fmt","_fmt","arg_names","exec","SyntaxError","field_list","replacement_field","field_match","vsprintf","_argv","splice","apply","exports","define","amd","this"],"mappings":";;CAAA,SAAUA,GAeN,QAASC,KACL,GAAIC,GAAMC,UAAU,GAAIC,EAAQH,EAAQG,KAIxC,OAHMA,GAAMF,IAAQE,EAAMC,eAAeH,KACrCE,EAAMF,GAAOD,EAAQK,MAAMJ,IAExBD,EAAQM,OAAOC,KAAK,KAAMJ,EAAMF,GAAMC,WA+JjD,QAASM,GAASC,GACd,MAAOC,QAAOC,UAAUC,SAASL,KAAKE,GAAUI,MAAM,EAAG,IAAIC,cAGjE,QAASC,GAAWC,EAAOC,GACvB,MAAOC,OAAMD,EAAa,GAAGE,KAAKH,GAvLtC,GAAII,IACAC,WAAY,OACZC,OAAQ,UACRC,KAAM,MACNC,SAAU,OACVC,KAAM,YACNC,OAAQ,WACRC,YAAa,yFACb1B,IAAK,sBACL2B,WAAY,wBACZC,aAAc,aACdC,KAAM,UAWV9B,GAAQM,OAAS,SAASyB,EAAYC,GAClC,GAAiEC,GAAkBC,EAAGC,EAAGC,EAAOC,EAAKC,EAAeC,EAAhHC,EAAS,EAAGC,EAAcV,EAAWW,OAAQC,EAAY,GAASC,KAA0DC,GAAc,EAAMf,EAAO,EAC3J,KAAKI,EAAI,EAAOO,EAAJP,EAAiBA,IAEzB,GADAS,EAAYnC,EAASuB,EAAWG,IACd,WAAdS,EACAC,EAAOA,EAAOF,QAAUX,EAAWG,OAElC,IAAkB,UAAdS,EAAuB,CAE5B,GADAP,EAAQL,EAAWG,GACfE,EAAM,GAEN,IADAH,EAAMD,EAAKQ,GACNL,EAAI,EAAGA,EAAIC,EAAM,GAAGM,OAAQP,IAAK,CAClC,IAAKF,EAAI7B,eAAegC,EAAM,GAAGD,IAC7B,KAAM,IAAIW,OAAM9C,EAAQ,yCAA0CoC,EAAM,GAAGD,IAE/EF,GAAMA,EAAIG,EAAM,GAAGD,QAIvBF,GADKG,EAAM,GACLJ,EAAKI,EAAM,IAGXJ,EAAKQ,IAOf,IAJqB,YAAjBhC,EAASyB,KACTA,EAAMA,KAGNb,EAAGC,WAAW0B,KAAKX,EAAM,KAAOhB,EAAGI,SAASuB,KAAKX,EAAM,KAAyB,UAAjB5B,EAASyB,IAAoBe,MAAMf,GAClG,KAAM,IAAIgB,WAAUjD,EAAQ,0CAA2CQ,EAASyB,IAOpF,QAJIb,EAAGE,OAAOyB,KAAKX,EAAM,MACrBS,EAAcZ,GAAO,GAGjBG,EAAM,IACV,IAAK,IACDH,EAAMA,EAAIrB,SAAS,EACvB,MACA,KAAK,IACDqB,EAAMiB,OAAOC,aAAalB,EAC9B,MACA,KAAK,IACL,IAAK,IACDA,EAAMmB,SAASnB,EAAK,GACxB,MACA,KAAK,IACDA,EAAMoB,KAAKC,UAAUrB,EAAK,KAAMG,EAAM,GAAKgB,SAAShB,EAAM,IAAM,EACpE,MACA,KAAK,IACDH,EAAMG,EAAM,GAAKH,EAAIsB,cAAcnB,EAAM,IAAMH,EAAIsB,eACvD,MACA,KAAK,IACDtB,EAAMG,EAAM,GAAKoB,WAAWvB,GAAKwB,QAAQrB,EAAM,IAAMoB,WAAWvB,EACpE,MACA,KAAK,IACDA,EAAMG,EAAM,GAAKoB,WAAWvB,GAAKyB,YAAYtB,EAAM,IAAMoB,WAAWvB,EACxE,MACA,KAAK,IACDA,EAAMA,EAAIrB,SAAS,EACvB,MACA,KAAK,IACDqB,GAAQA,EAAMiB,OAAOjB,KAASG,EAAM,GAAKH,EAAI0B,UAAU,EAAGvB,EAAM,IAAMH,CAC1E,MACA,KAAK,IACDA,KAAc,CAClB,MACA,KAAK,IACDA,EAAMA,EAAIrB,SAAS,GACvB,MACA,KAAK,IACDqB,EAAMA,EAAIrB,SAAS,IAAIgD,cAG3BxC,EAAGG,KAAKwB,KAAKX,EAAM,IACnBQ,EAAOA,EAAOF,QAAUT,IAGpBb,EAAGE,OAAOyB,KAAKX,EAAM,KAASS,IAAeT,EAAM,GAKnDN,EAAO,IAJPA,EAAOe,EAAc,IAAM,IAC3BZ,EAAMA,EAAIrB,WAAWiD,QAAQzC,EAAGU,KAAM,KAK1CQ,EAAgBF,EAAM,GAAkB,MAAbA,EAAM,GAAa,IAAMA,EAAM,GAAG0B,OAAO,GAAK,IACzEvB,EAAaH,EAAM,IAAMN,EAAOG,GAAKS,OACrCL,EAAMD,EAAM,IAAMG,EAAa,EAAIxB,EAAWuB,EAAeC,GAAoB,GACjFK,EAAOA,EAAOF,QAAUN,EAAM,GAAKN,EAAOG,EAAMI,EAAyB,MAAlBC,EAAwBR,EAAOO,EAAMJ,EAAMI,EAAMP,EAAOG,GAI3H,MAAOW,GAAOzB,KAAK,KAGvBnB,EAAQG,SAERH,EAAQK,MAAQ,SAAS0D,GAErB,IADA,GAAIC,GAAOD,EAAK3B,KAAYL,KAAiBkC,EAAY,EAClDD,GAAM,CACT,GAAqC,QAAhC5B,EAAQhB,EAAGK,KAAKyC,KAAKF,IACtBjC,EAAWA,EAAWW,QAAUN,EAAM,OAErC,IAAuC,QAAlCA,EAAQhB,EAAGM,OAAOwC,KAAKF,IAC7BjC,EAAWA,EAAWW,QAAU,QAE/B,CAAA,GAA4C,QAAvCN,EAAQhB,EAAGO,YAAYuC,KAAKF,IAgClC,KAAM,IAAIG,aAAY,mCA/BtB,IAAI/B,EAAM,GAAI,CACV6B,GAAa,CACb,IAAIG,MAAiBC,EAAoBjC,EAAM,GAAIkC,IACnD,IAAuD,QAAlDA,EAAclD,EAAGnB,IAAIiE,KAAKG,IAe3B,KAAM,IAAIF,aAAY,+CAbtB,KADAC,EAAWA,EAAW1B,QAAU4B,EAAY,GACwC,MAA5ED,EAAoBA,EAAkBV,UAAUW,EAAY,GAAG5B,UACnE,GAA8D,QAAzD4B,EAAclD,EAAGQ,WAAWsC,KAAKG,IAClCD,EAAWA,EAAW1B,QAAU4B,EAAY,OAE3C,CAAA,GAAgE,QAA3DA,EAAclD,EAAGS,aAAaqC,KAAKG,IAIzC,KAAM,IAAIF,aAAY,+CAHtBC,GAAWA,EAAW1B,QAAU4B,EAAY,GAUxDlC,EAAM,GAAKgC,MAGXH,IAAa,CAEjB,IAAkB,IAAdA,EACA,KAAM,IAAInB,OAAM,4EAEpBf,GAAWA,EAAWW,QAAUN,EAKpC4B,EAAOA,EAAKL,UAAUvB,EAAM,GAAGM,QAEnC,MAAOX,GAGX,IAAIwC,GAAW,SAASR,EAAK/B,EAAMwC,GAG/B,MAFAA,IAASxC,OAAYnB,MAAM,GAC3B2D,EAAMC,OAAO,EAAG,EAAGV,GACZ/D,EAAQ0E,MAAM,KAAMF,GAiBR,oBAAZG,UACPA,QAAQ3E,QAAUA,EAClB2E,QAAQJ,SAAWA,IAGnBxE,EAAOC,QAAUA,EACjBD,EAAOwE,SAAWA,EAEI,kBAAXK,SAAyBA,OAAOC,KACvCD,OAAO,WACH,OACI5E,QAASA,EACTuE,SAAUA,OAKT,mBAAXxE,QAAyB+E,KAAO/E"}
\ No newline at end of file
diff --git a/node_modules/sprintf-js/gruntfile.js b/node_modules/sprintf-js/gruntfile.js
deleted file mode 100644
index 246e1c3b9801fc8d687d8c03823b2cf5149841af..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/gruntfile.js
+++ /dev/null
@@ -1,36 +0,0 @@
-module.exports = function(grunt) {
- grunt.initConfig({
- pkg: grunt.file.readJSON("package.json"),
-
- uglify: {
- options: {
- banner: "/*! <%= pkg.name %> | <%= pkg.author %> | <%= pkg.license %> */\n",
- sourceMap: true
- },
- build: {
- files: [
- {
- src: "src/sprintf.js",
- dest: "dist/sprintf.min.js"
- },
- {
- src: "src/angular-sprintf.js",
- dest: "dist/angular-sprintf.min.js"
- }
- ]
- }
- },
-
- watch: {
- js: {
- files: "src/*.js",
- tasks: ["uglify"]
- }
- }
- })
-
- grunt.loadNpmTasks("grunt-contrib-uglify")
- grunt.loadNpmTasks("grunt-contrib-watch")
-
- grunt.registerTask("default", ["uglify", "watch"])
-}
diff --git a/node_modules/sprintf-js/package.json b/node_modules/sprintf-js/package.json
deleted file mode 100644
index d460fd241e12de8d7144fbb059c8313d8ea44f17..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/package.json
+++ /dev/null
@@ -1,85 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "sprintf-js",
- "raw": "sprintf-js@~1.0.2",
- "rawSpec": "~1.0.2",
- "scope": null,
- "spec": ">=1.0.2 <1.1.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/argparse"
- ]
- ],
- "_from": "sprintf-js@>=1.0.2 <1.1.0",
- "_id": "sprintf-js@1.0.3",
- "_inCache": true,
- "_installable": true,
- "_location": "/sprintf-js",
- "_nodeVersion": "0.12.4",
- "_npmUser": {
- "email": "hello@alexei.ro",
- "name": "alexei"
- },
- "_npmVersion": "2.10.1",
- "_phantomChildren": {},
- "_requested": {
- "name": "sprintf-js",
- "raw": "sprintf-js@~1.0.2",
- "rawSpec": "~1.0.2",
- "scope": null,
- "spec": ">=1.0.2 <1.1.0",
- "type": "range"
- },
- "_requiredBy": [
- "/argparse"
- ],
- "_resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
- "_shasum": "04e6926f662895354f3dd015203633b857297e2c",
- "_shrinkwrap": null,
- "_spec": "sprintf-js@~1.0.2",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/argparse",
- "author": {
- "email": "hello@alexei.ro",
- "name": "Alexandru Marasteanu",
- "url": "http://alexei.ro/"
- },
- "bugs": {
- "url": "https://github.com/alexei/sprintf.js/issues"
- },
- "dependencies": {},
- "description": "JavaScript sprintf implementation",
- "devDependencies": {
- "grunt": "*",
- "grunt-contrib-uglify": "*",
- "grunt-contrib-watch": "*",
- "mocha": "*"
- },
- "directories": {},
- "dist": {
- "shasum": "04e6926f662895354f3dd015203633b857297e2c",
- "tarball": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz"
- },
- "gitHead": "747b806c2dab5b64d5c9958c42884946a187c3b1",
- "homepage": "https://github.com/alexei/sprintf.js#readme",
- "license": "BSD-3-Clause",
- "main": "src/sprintf.js",
- "maintainers": [
- {
- "email": "hello@alexei.ro",
- "name": "alexei"
- }
- ],
- "name": "sprintf-js",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git+https://github.com/alexei/sprintf.js.git"
- },
- "scripts": {
- "test": "mocha test/test.js"
- },
- "version": "1.0.3"
-}
diff --git a/node_modules/sprintf-js/src/angular-sprintf.js b/node_modules/sprintf-js/src/angular-sprintf.js
deleted file mode 100644
index 9c69123bea291aa65dfa924c63b1221fd14d55aa..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/src/angular-sprintf.js
+++ /dev/null
@@ -1,18 +0,0 @@
-angular.
- module("sprintf", []).
- filter("sprintf", function() {
- return function() {
- return sprintf.apply(null, arguments)
- }
- }).
- filter("fmt", ["$filter", function($filter) {
- return $filter("sprintf")
- }]).
- filter("vsprintf", function() {
- return function(format, argv) {
- return vsprintf(format, argv)
- }
- }).
- filter("vfmt", ["$filter", function($filter) {
- return $filter("vsprintf")
- }])
diff --git a/node_modules/sprintf-js/src/sprintf.js b/node_modules/sprintf-js/src/sprintf.js
deleted file mode 100644
index c0fc7c08b2e6fe38de2883034b8d28c8f34e9866..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/src/sprintf.js
+++ /dev/null
@@ -1,208 +0,0 @@
-(function(window) {
- var re = {
- not_string: /[^s]/,
- number: /[diefg]/,
- json: /[j]/,
- not_json: /[^j]/,
- text: /^[^\x25]+/,
- modulo: /^\x25{2}/,
- placeholder: /^\x25(?:([1-9]\d*)\$|\(([^\)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-gijosuxX])/,
- key: /^([a-z_][a-z_\d]*)/i,
- key_access: /^\.([a-z_][a-z_\d]*)/i,
- index_access: /^\[(\d+)\]/,
- sign: /^[\+\-]/
- }
-
- function sprintf() {
- var key = arguments[0], cache = sprintf.cache
- if (!(cache[key] && cache.hasOwnProperty(key))) {
- cache[key] = sprintf.parse(key)
- }
- return sprintf.format.call(null, cache[key], arguments)
- }
-
- sprintf.format = function(parse_tree, argv) {
- var cursor = 1, tree_length = parse_tree.length, node_type = "", arg, output = [], i, k, match, pad, pad_character, pad_length, is_positive = true, sign = ""
- for (i = 0; i < tree_length; i++) {
- node_type = get_type(parse_tree[i])
- if (node_type === "string") {
- output[output.length] = parse_tree[i]
- }
- else if (node_type === "array") {
- match = parse_tree[i] // convenience purposes only
- if (match[2]) { // keyword argument
- arg = argv[cursor]
- for (k = 0; k < match[2].length; k++) {
- if (!arg.hasOwnProperty(match[2][k])) {
- throw new Error(sprintf("[sprintf] property '%s' does not exist", match[2][k]))
- }
- arg = arg[match[2][k]]
- }
- }
- else if (match[1]) { // positional argument (explicit)
- arg = argv[match[1]]
- }
- else { // positional argument (implicit)
- arg = argv[cursor++]
- }
-
- if (get_type(arg) == "function") {
- arg = arg()
- }
-
- if (re.not_string.test(match[8]) && re.not_json.test(match[8]) && (get_type(arg) != "number" && isNaN(arg))) {
- throw new TypeError(sprintf("[sprintf] expecting number but found %s", get_type(arg)))
- }
-
- if (re.number.test(match[8])) {
- is_positive = arg >= 0
- }
-
- switch (match[8]) {
- case "b":
- arg = arg.toString(2)
- break
- case "c":
- arg = String.fromCharCode(arg)
- break
- case "d":
- case "i":
- arg = parseInt(arg, 10)
- break
- case "j":
- arg = JSON.stringify(arg, null, match[6] ? parseInt(match[6]) : 0)
- break
- case "e":
- arg = match[7] ? arg.toExponential(match[7]) : arg.toExponential()
- break
- case "f":
- arg = match[7] ? parseFloat(arg).toFixed(match[7]) : parseFloat(arg)
- break
- case "g":
- arg = match[7] ? parseFloat(arg).toPrecision(match[7]) : parseFloat(arg)
- break
- case "o":
- arg = arg.toString(8)
- break
- case "s":
- arg = ((arg = String(arg)) && match[7] ? arg.substring(0, match[7]) : arg)
- break
- case "u":
- arg = arg >>> 0
- break
- case "x":
- arg = arg.toString(16)
- break
- case "X":
- arg = arg.toString(16).toUpperCase()
- break
- }
- if (re.json.test(match[8])) {
- output[output.length] = arg
- }
- else {
- if (re.number.test(match[8]) && (!is_positive || match[3])) {
- sign = is_positive ? "+" : "-"
- arg = arg.toString().replace(re.sign, "")
- }
- else {
- sign = ""
- }
- pad_character = match[4] ? match[4] === "0" ? "0" : match[4].charAt(1) : " "
- pad_length = match[6] - (sign + arg).length
- pad = match[6] ? (pad_length > 0 ? str_repeat(pad_character, pad_length) : "") : ""
- output[output.length] = match[5] ? sign + arg + pad : (pad_character === "0" ? sign + pad + arg : pad + sign + arg)
- }
- }
- }
- return output.join("")
- }
-
- sprintf.cache = {}
-
- sprintf.parse = function(fmt) {
- var _fmt = fmt, match = [], parse_tree = [], arg_names = 0
- while (_fmt) {
- if ((match = re.text.exec(_fmt)) !== null) {
- parse_tree[parse_tree.length] = match[0]
- }
- else if ((match = re.modulo.exec(_fmt)) !== null) {
- parse_tree[parse_tree.length] = "%"
- }
- else if ((match = re.placeholder.exec(_fmt)) !== null) {
- if (match[2]) {
- arg_names |= 1
- var field_list = [], replacement_field = match[2], field_match = []
- if ((field_match = re.key.exec(replacement_field)) !== null) {
- field_list[field_list.length] = field_match[1]
- while ((replacement_field = replacement_field.substring(field_match[0].length)) !== "") {
- if ((field_match = re.key_access.exec(replacement_field)) !== null) {
- field_list[field_list.length] = field_match[1]
- }
- else if ((field_match = re.index_access.exec(replacement_field)) !== null) {
- field_list[field_list.length] = field_match[1]
- }
- else {
- throw new SyntaxError("[sprintf] failed to parse named argument key")
- }
- }
- }
- else {
- throw new SyntaxError("[sprintf] failed to parse named argument key")
- }
- match[2] = field_list
- }
- else {
- arg_names |= 2
- }
- if (arg_names === 3) {
- throw new Error("[sprintf] mixing positional and named placeholders is not (yet) supported")
- }
- parse_tree[parse_tree.length] = match
- }
- else {
- throw new SyntaxError("[sprintf] unexpected placeholder")
- }
- _fmt = _fmt.substring(match[0].length)
- }
- return parse_tree
- }
-
- var vsprintf = function(fmt, argv, _argv) {
- _argv = (argv || []).slice(0)
- _argv.splice(0, 0, fmt)
- return sprintf.apply(null, _argv)
- }
-
- /**
- * helpers
- */
- function get_type(variable) {
- return Object.prototype.toString.call(variable).slice(8, -1).toLowerCase()
- }
-
- function str_repeat(input, multiplier) {
- return Array(multiplier + 1).join(input)
- }
-
- /**
- * export to either browser or node.js
- */
- if (typeof exports !== "undefined") {
- exports.sprintf = sprintf
- exports.vsprintf = vsprintf
- }
- else {
- window.sprintf = sprintf
- window.vsprintf = vsprintf
-
- if (typeof define === "function" && define.amd) {
- define(function() {
- return {
- sprintf: sprintf,
- vsprintf: vsprintf
- }
- })
- }
- }
-})(typeof window === "undefined" ? this : window);
diff --git a/node_modules/sprintf-js/test/test.js b/node_modules/sprintf-js/test/test.js
deleted file mode 100644
index 6f57b2538c8522137ff3a8dd1b826781e0fce41c..0000000000000000000000000000000000000000
--- a/node_modules/sprintf-js/test/test.js
+++ /dev/null
@@ -1,82 +0,0 @@
-var assert = require("assert"),
- sprintfjs = require("../src/sprintf.js"),
- sprintf = sprintfjs.sprintf,
- vsprintf = sprintfjs.vsprintf
-
-describe("sprintfjs", function() {
- var pi = 3.141592653589793
-
- it("should return formated strings for simple placeholders", function() {
- assert.equal("%", sprintf("%%"))
- assert.equal("10", sprintf("%b", 2))
- assert.equal("A", sprintf("%c", 65))
- assert.equal("2", sprintf("%d", 2))
- assert.equal("2", sprintf("%i", 2))
- assert.equal("2", sprintf("%d", "2"))
- assert.equal("2", sprintf("%i", "2"))
- assert.equal('{"foo":"bar"}', sprintf("%j", {foo: "bar"}))
- assert.equal('["foo","bar"]', sprintf("%j", ["foo", "bar"]))
- assert.equal("2e+0", sprintf("%e", 2))
- assert.equal("2", sprintf("%u", 2))
- assert.equal("4294967294", sprintf("%u", -2))
- assert.equal("2.2", sprintf("%f", 2.2))
- assert.equal("3.141592653589793", sprintf("%g", pi))
- assert.equal("10", sprintf("%o", 8))
- assert.equal("%s", sprintf("%s", "%s"))
- assert.equal("ff", sprintf("%x", 255))
- assert.equal("FF", sprintf("%X", 255))
- assert.equal("Polly wants a cracker", sprintf("%2$s %3$s a %1$s", "cracker", "Polly", "wants"))
- assert.equal("Hello world!", sprintf("Hello %(who)s!", {"who": "world"}))
- })
-
- it("should return formated strings for complex placeholders", function() {
- // sign
- assert.equal("2", sprintf("%d", 2))
- assert.equal("-2", sprintf("%d", -2))
- assert.equal("+2", sprintf("%+d", 2))
- assert.equal("-2", sprintf("%+d", -2))
- assert.equal("2", sprintf("%i", 2))
- assert.equal("-2", sprintf("%i", -2))
- assert.equal("+2", sprintf("%+i", 2))
- assert.equal("-2", sprintf("%+i", -2))
- assert.equal("2.2", sprintf("%f", 2.2))
- assert.equal("-2.2", sprintf("%f", -2.2))
- assert.equal("+2.2", sprintf("%+f", 2.2))
- assert.equal("-2.2", sprintf("%+f", -2.2))
- assert.equal("-2.3", sprintf("%+.1f", -2.34))
- assert.equal("-0.0", sprintf("%+.1f", -0.01))
- assert.equal("3.14159", sprintf("%.6g", pi))
- assert.equal("3.14", sprintf("%.3g", pi))
- assert.equal("3", sprintf("%.1g", pi))
- assert.equal("-000000123", sprintf("%+010d", -123))
- assert.equal("______-123", sprintf("%+'_10d", -123))
- assert.equal("-234.34 123.2", sprintf("%f %f", -234.34, 123.2))
-
- // padding
- assert.equal("-0002", sprintf("%05d", -2))
- assert.equal("-0002", sprintf("%05i", -2))
- assert.equal(" <", sprintf("%5s", "<"))
- assert.equal("0000<", sprintf("%05s", "<"))
- assert.equal("____<", sprintf("%'_5s", "<"))
- assert.equal("> ", sprintf("%-5s", ">"))
- assert.equal(">0000", sprintf("%0-5s", ">"))
- assert.equal(">____", sprintf("%'_-5s", ">"))
- assert.equal("xxxxxx", sprintf("%5s", "xxxxxx"))
- assert.equal("1234", sprintf("%02u", 1234))
- assert.equal(" -10.235", sprintf("%8.3f", -10.23456))
- assert.equal("-12.34 xxx", sprintf("%f %s", -12.34, "xxx"))
- assert.equal('{\n "foo": "bar"\n}', sprintf("%2j", {foo: "bar"}))
- assert.equal('[\n "foo",\n "bar"\n]', sprintf("%2j", ["foo", "bar"]))
-
- // precision
- assert.equal("2.3", sprintf("%.1f", 2.345))
- assert.equal("xxxxx", sprintf("%5.5s", "xxxxxx"))
- assert.equal(" x", sprintf("%5.1s", "xxxxxx"))
-
- })
-
- it("should return formated strings for callbacks", function() {
- assert.equal("foobar", sprintf("%s", function() { return "foobar" }))
- assert.equal(Date.now(), sprintf("%s", Date.now)) // should pass...
- })
-})
diff --git a/node_modules/string_decoder/LICENSE b/node_modules/string_decoder/LICENSE
deleted file mode 100644
index 778edb20730ef48c01002248f4d51e7752c13487..0000000000000000000000000000000000000000
--- a/node_modules/string_decoder/LICENSE
+++ /dev/null
@@ -1,48 +0,0 @@
-Node.js is licensed for use as follows:
-
-"""
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-This license applies to parts of Node.js originating from the
-https://github.com/joyent/node repository:
-
-"""
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
diff --git a/node_modules/string_decoder/README.md b/node_modules/string_decoder/README.md
deleted file mode 100644
index 5fd58315ed588027742dde690a31cd0a2610649d..0000000000000000000000000000000000000000
--- a/node_modules/string_decoder/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# string_decoder
-
-***Node-core v8.9.4 string_decoder for userland***
-
-
-[![NPM](https://nodei.co/npm/string_decoder.png?downloads=true&downloadRank=true)](https://nodei.co/npm/string_decoder/)
-[![NPM](https://nodei.co/npm-dl/string_decoder.png?&months=6&height=3)](https://nodei.co/npm/string_decoder/)
-
-
-```bash
-npm install --save string_decoder
-```
-
-***Node-core string_decoder for userland***
-
-This package is a mirror of the string_decoder implementation in Node-core.
-
-Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v8.9.4/docs/api/).
-
-As of version 1.0.0 **string_decoder** uses semantic versioning.
-
-## Previous versions
-
-Previous version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10.
-
-## Update
-
-The *build/* directory contains a build script that will scrape the source from the [nodejs/node](https://github.com/nodejs/node) repo given a specific Node version.
-
-## Streams Working Group
-
-`string_decoder` is maintained by the Streams Working Group, which
-oversees the development and maintenance of the Streams API within
-Node.js. The responsibilities of the Streams Working Group include:
-
-* Addressing stream issues on the Node.js issue tracker.
-* Authoring and editing stream documentation within the Node.js project.
-* Reviewing changes to stream subclasses within the Node.js project.
-* Redirecting changes to streams from the Node.js project to this
- project.
-* Assisting in the implementation of stream providers within Node.js.
-* Recommending versions of `readable-stream` to be included in Node.js.
-* Messaging about the future of streams to give the community advance
- notice of changes.
-
-See [readable-stream](https://github.com/nodejs/readable-stream) for
-more details.
diff --git a/node_modules/string_decoder/lib/string_decoder.js b/node_modules/string_decoder/lib/string_decoder.js
deleted file mode 100644
index 2e89e63f7933e42b8ba543ede35d2a8fa3e4f100..0000000000000000000000000000000000000000
--- a/node_modules/string_decoder/lib/string_decoder.js
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-'use strict';
-
-/**/
-
-var Buffer = require('safe-buffer').Buffer;
-/**/
-
-var isEncoding = Buffer.isEncoding || function (encoding) {
- encoding = '' + encoding;
- switch (encoding && encoding.toLowerCase()) {
- case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw':
- return true;
- default:
- return false;
- }
-};
-
-function _normalizeEncoding(enc) {
- if (!enc) return 'utf8';
- var retried;
- while (true) {
- switch (enc) {
- case 'utf8':
- case 'utf-8':
- return 'utf8';
- case 'ucs2':
- case 'ucs-2':
- case 'utf16le':
- case 'utf-16le':
- return 'utf16le';
- case 'latin1':
- case 'binary':
- return 'latin1';
- case 'base64':
- case 'ascii':
- case 'hex':
- return enc;
- default:
- if (retried) return; // undefined
- enc = ('' + enc).toLowerCase();
- retried = true;
- }
- }
-};
-
-// Do not cache `Buffer.isEncoding` when checking encoding names as some
-// modules monkey-patch it to support additional encodings
-function normalizeEncoding(enc) {
- var nenc = _normalizeEncoding(enc);
- if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc);
- return nenc || enc;
-}
-
-// StringDecoder provides an interface for efficiently splitting a series of
-// buffers into a series of JS strings without breaking apart multi-byte
-// characters.
-exports.StringDecoder = StringDecoder;
-function StringDecoder(encoding) {
- this.encoding = normalizeEncoding(encoding);
- var nb;
- switch (this.encoding) {
- case 'utf16le':
- this.text = utf16Text;
- this.end = utf16End;
- nb = 4;
- break;
- case 'utf8':
- this.fillLast = utf8FillLast;
- nb = 4;
- break;
- case 'base64':
- this.text = base64Text;
- this.end = base64End;
- nb = 3;
- break;
- default:
- this.write = simpleWrite;
- this.end = simpleEnd;
- return;
- }
- this.lastNeed = 0;
- this.lastTotal = 0;
- this.lastChar = Buffer.allocUnsafe(nb);
-}
-
-StringDecoder.prototype.write = function (buf) {
- if (buf.length === 0) return '';
- var r;
- var i;
- if (this.lastNeed) {
- r = this.fillLast(buf);
- if (r === undefined) return '';
- i = this.lastNeed;
- this.lastNeed = 0;
- } else {
- i = 0;
- }
- if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i);
- return r || '';
-};
-
-StringDecoder.prototype.end = utf8End;
-
-// Returns only complete characters in a Buffer
-StringDecoder.prototype.text = utf8Text;
-
-// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer
-StringDecoder.prototype.fillLast = function (buf) {
- if (this.lastNeed <= buf.length) {
- buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed);
- return this.lastChar.toString(this.encoding, 0, this.lastTotal);
- }
- buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length);
- this.lastNeed -= buf.length;
-};
-
-// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a
-// continuation byte. If an invalid byte is detected, -2 is returned.
-function utf8CheckByte(byte) {
- if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4;
- return byte >> 6 === 0x02 ? -1 : -2;
-}
-
-// Checks at most 3 bytes at the end of a Buffer in order to detect an
-// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4)
-// needed to complete the UTF-8 character (if applicable) are returned.
-function utf8CheckIncomplete(self, buf, i) {
- var j = buf.length - 1;
- if (j < i) return 0;
- var nb = utf8CheckByte(buf[j]);
- if (nb >= 0) {
- if (nb > 0) self.lastNeed = nb - 1;
- return nb;
- }
- if (--j < i || nb === -2) return 0;
- nb = utf8CheckByte(buf[j]);
- if (nb >= 0) {
- if (nb > 0) self.lastNeed = nb - 2;
- return nb;
- }
- if (--j < i || nb === -2) return 0;
- nb = utf8CheckByte(buf[j]);
- if (nb >= 0) {
- if (nb > 0) {
- if (nb === 2) nb = 0;else self.lastNeed = nb - 3;
- }
- return nb;
- }
- return 0;
-}
-
-// Validates as many continuation bytes for a multi-byte UTF-8 character as
-// needed or are available. If we see a non-continuation byte where we expect
-// one, we "replace" the validated continuation bytes we've seen so far with
-// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding
-// behavior. The continuation byte check is included three times in the case
-// where all of the continuation bytes for a character exist in the same buffer.
-// It is also done this way as a slight performance increase instead of using a
-// loop.
-function utf8CheckExtraBytes(self, buf, p) {
- if ((buf[0] & 0xC0) !== 0x80) {
- self.lastNeed = 0;
- return '\ufffd';
- }
- if (self.lastNeed > 1 && buf.length > 1) {
- if ((buf[1] & 0xC0) !== 0x80) {
- self.lastNeed = 1;
- return '\ufffd';
- }
- if (self.lastNeed > 2 && buf.length > 2) {
- if ((buf[2] & 0xC0) !== 0x80) {
- self.lastNeed = 2;
- return '\ufffd';
- }
- }
- }
-}
-
-// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer.
-function utf8FillLast(buf) {
- var p = this.lastTotal - this.lastNeed;
- var r = utf8CheckExtraBytes(this, buf, p);
- if (r !== undefined) return r;
- if (this.lastNeed <= buf.length) {
- buf.copy(this.lastChar, p, 0, this.lastNeed);
- return this.lastChar.toString(this.encoding, 0, this.lastTotal);
- }
- buf.copy(this.lastChar, p, 0, buf.length);
- this.lastNeed -= buf.length;
-}
-
-// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a
-// partial character, the character's bytes are buffered until the required
-// number of bytes are available.
-function utf8Text(buf, i) {
- var total = utf8CheckIncomplete(this, buf, i);
- if (!this.lastNeed) return buf.toString('utf8', i);
- this.lastTotal = total;
- var end = buf.length - (total - this.lastNeed);
- buf.copy(this.lastChar, 0, end);
- return buf.toString('utf8', i, end);
-}
-
-// For UTF-8, a replacement character is added when ending on a partial
-// character.
-function utf8End(buf) {
- var r = buf && buf.length ? this.write(buf) : '';
- if (this.lastNeed) return r + '\ufffd';
- return r;
-}
-
-// UTF-16LE typically needs two bytes per character, but even if we have an even
-// number of bytes available, we need to check if we end on a leading/high
-// surrogate. In that case, we need to wait for the next two bytes in order to
-// decode the last character properly.
-function utf16Text(buf, i) {
- if ((buf.length - i) % 2 === 0) {
- var r = buf.toString('utf16le', i);
- if (r) {
- var c = r.charCodeAt(r.length - 1);
- if (c >= 0xD800 && c <= 0xDBFF) {
- this.lastNeed = 2;
- this.lastTotal = 4;
- this.lastChar[0] = buf[buf.length - 2];
- this.lastChar[1] = buf[buf.length - 1];
- return r.slice(0, -1);
- }
- }
- return r;
- }
- this.lastNeed = 1;
- this.lastTotal = 2;
- this.lastChar[0] = buf[buf.length - 1];
- return buf.toString('utf16le', i, buf.length - 1);
-}
-
-// For UTF-16LE we do not explicitly append special replacement characters if we
-// end on a partial character, we simply let v8 handle that.
-function utf16End(buf) {
- var r = buf && buf.length ? this.write(buf) : '';
- if (this.lastNeed) {
- var end = this.lastTotal - this.lastNeed;
- return r + this.lastChar.toString('utf16le', 0, end);
- }
- return r;
-}
-
-function base64Text(buf, i) {
- var n = (buf.length - i) % 3;
- if (n === 0) return buf.toString('base64', i);
- this.lastNeed = 3 - n;
- this.lastTotal = 3;
- if (n === 1) {
- this.lastChar[0] = buf[buf.length - 1];
- } else {
- this.lastChar[0] = buf[buf.length - 2];
- this.lastChar[1] = buf[buf.length - 1];
- }
- return buf.toString('base64', i, buf.length - n);
-}
-
-function base64End(buf) {
- var r = buf && buf.length ? this.write(buf) : '';
- if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed);
- return r;
-}
-
-// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex)
-function simpleWrite(buf) {
- return buf.toString(this.encoding);
-}
-
-function simpleEnd(buf) {
- return buf && buf.length ? this.write(buf) : '';
-}
\ No newline at end of file
diff --git a/node_modules/string_decoder/package.json b/node_modules/string_decoder/package.json
deleted file mode 100644
index 7d0104d99e424f0417b714bc301a09fcf4a6454d..0000000000000000000000000000000000000000
--- a/node_modules/string_decoder/package.json
+++ /dev/null
@@ -1,114 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "string_decoder",
- "raw": "string_decoder@^1.1.1",
- "rawSpec": "^1.1.1",
- "scope": null,
- "spec": ">=1.1.1 <2.0.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/readable-stream"
- ]
- ],
- "_from": "string_decoder@>=1.1.1 <2.0.0",
- "_hasShrinkwrap": false,
- "_id": "string_decoder@1.3.0",
- "_inCache": true,
- "_installable": true,
- "_location": "/string_decoder",
- "_nodeVersion": "10.16.0",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/string_decoder_1.3.0_1565169636487_0.6257381665304729"
- },
- "_npmUser": {
- "email": "hello@matteocollina.com",
- "name": "matteo.collina"
- },
- "_npmVersion": "6.10.2",
- "_phantomChildren": {},
- "_requested": {
- "name": "string_decoder",
- "raw": "string_decoder@^1.1.1",
- "rawSpec": "^1.1.1",
- "scope": null,
- "spec": ">=1.1.1 <2.0.0",
- "type": "range"
- },
- "_requiredBy": [
- "/readable-stream"
- ],
- "_resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
- "_shasum": "42f114594a46cf1a8e30b0a84f56c78c3edac21e",
- "_shrinkwrap": null,
- "_spec": "string_decoder@^1.1.1",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/readable-stream",
- "bugs": {
- "url": "https://github.com/nodejs/string_decoder/issues"
- },
- "dependencies": {
- "safe-buffer": "~5.2.0"
- },
- "description": "The string_decoder module from Node core",
- "devDependencies": {
- "babel-polyfill": "^6.23.0",
- "core-util-is": "^1.0.2",
- "inherits": "^2.0.3",
- "tap": "~0.4.8"
- },
- "directories": {},
- "dist": {
- "fileCount": 4,
- "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
- "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJdSpflCRA9TVsSAnZWagAAVE8QAIBhyQ4HL/C719h6NHEm\noaNdOGDpxw0zyapxhztRy+YPAEGVOAnS5L/VMQINojzPQQ8YTZDM5jQ5+NyC\naVBB9KtQpCXftizA0DX5NbowL/k9s1ko+PFf0/hOPA4NWhZ9H5Vym+fNNMQJ\n+p0leE2ovcz0PVT8t8/IrntVTYRnhkDnkTJa9byWGZFiumoR4J/y0k/UfuR4\nqKF9NGgDURJrAvdkbaBEc4xs4ZM2Eqfevxki/Ia6cV2eWO/ssWzZXdS0oLfN\ncPieD7D80jTnrZNnh6iEdYFU6GQDgu8ovDURIVnI3qowB/aDBpMAMpKliY32\nMyB1VSAWPADAYwHaW6ldS23Bd1fFA9LJZtL4/2YmR5lTri14j6szJdXQZOCE\n26kivguwcdEsbsDkE4ayRJ6DNC18lvcmJCyBUpHPBzEFP8HvC+En3Cxd4wAE\n+6laaHp7x+Af3KV4cnBsKHsDpugmzi6SWKMjt9ymySNfefvzll7pI5p8mQD1\niFM92XvnHY1RjXtuSwSHo/U8F9B/EogCC4kRSXSyq1xSDvZ530BaWsX27iv4\n2kbmRndn4pImKpIzFsUNaG8IAs90bw3mjc+dmMi7y/mbid5+xesX5RCztajK\nfFWZhV46ePff4+YFJVtCKR/m2LCu6dbSLHxTxpdFVfcjU83qnzpWGYdbHWwG\nxT/z\r\n=TD7t\r\n-----END PGP SIGNATURE-----\r\n",
- "shasum": "42f114594a46cf1a8e30b0a84f56c78c3edac21e",
- "tarball": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
- "unpackedSize": 14427
- },
- "gitHead": "60db81e031c126112039157ba9437484b1329dff",
- "homepage": "https://github.com/nodejs/string_decoder",
- "keywords": [
- "string",
- "decoder",
- "browser",
- "browserify"
- ],
- "license": "MIT",
- "main": "lib/string_decoder.js",
- "maintainers": [
- {
- "email": "calvin.metcalf@gmail.com",
- "name": "cwmma"
- },
- {
- "email": "hello@matteocollina.com",
- "name": "matteo.collina"
- },
- {
- "email": "build@iojs.org",
- "name": "nodejs-foundation"
- },
- {
- "email": "rod@vagg.org",
- "name": "rvagg"
- },
- {
- "email": "substack@gmail.com",
- "name": "substack"
- }
- ],
- "name": "string_decoder",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/nodejs/string_decoder.git"
- },
- "scripts": {
- "ci": "tap test/parallel/*.js test/ours/*.js --tap | tee test.tap && node test/verify-dependencies.js",
- "test": "tap test/parallel/*.js && node test/verify-dependencies"
- },
- "version": "1.3.0"
-}
diff --git a/node_modules/through2/LICENSE.md b/node_modules/through2/LICENSE.md
deleted file mode 100644
index a2429b6385be26e4b2e4048c6359865d7429918a..0000000000000000000000000000000000000000
--- a/node_modules/through2/LICENSE.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# The MIT License (MIT)
-
-**Copyright (c) Rod Vagg (the "Original Author") and additional contributors**
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/node_modules/through2/README.md b/node_modules/through2/README.md
deleted file mode 100644
index b5e44c7b178a8a075dd7bc7355efb2727bd6639b..0000000000000000000000000000000000000000
--- a/node_modules/through2/README.md
+++ /dev/null
@@ -1,134 +0,0 @@
-# through2
-
-[![NPM](https://nodei.co/npm/through2.png?downloads&downloadRank)](https://nodei.co/npm/through2/)
-
-**A tiny wrapper around Node streams.Transform (Streams2/3) to avoid explicit subclassing noise**
-
-Inspired by [Dominic Tarr](https://github.com/dominictarr)'s [through](https://github.com/dominictarr/through) in that it's so much easier to make a stream out of a function than it is to set up the prototype chain properly: `through(function (chunk) { ... })`.
-
-Note: As 2.x.x this module starts using **Streams3** instead of Stream2. To continue using a Streams2 version use `npm install through2@0` to fetch the latest version of 0.x.x. More information about Streams2 vs Streams3 and recommendations see the article **[Why I don't use Node's core 'stream' module](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html)**.
-
-```js
-fs.createReadStream('ex.txt')
- .pipe(through2(function (chunk, enc, callback) {
- for (var i = 0; i < chunk.length; i++)
- if (chunk[i] == 97)
- chunk[i] = 122 // swap 'a' for 'z'
-
- this.push(chunk)
-
- callback()
- }))
- .pipe(fs.createWriteStream('out.txt'))
- .on('finish', () => doSomethingSpecial())
-```
-
-Or object streams:
-
-```js
-var all = []
-
-fs.createReadStream('data.csv')
- .pipe(csv2())
- .pipe(through2.obj(function (chunk, enc, callback) {
- var data = {
- name : chunk[0]
- , address : chunk[3]
- , phone : chunk[10]
- }
- this.push(data)
-
- callback()
- }))
- .on('data', (data) => {
- all.push(data)
- })
- .on('end', () => {
- doSomethingSpecial(all)
- })
-```
-
-Note that `through2.obj(fn)` is a convenience wrapper around `through2({ objectMode: true }, fn)`.
-
-## API
-
-through2([ options, ] [ transformFunction ] [, flushFunction ])
-
-Consult the **[stream.Transform](http://nodejs.org/docs/latest/api/stream.html#stream_class_stream_transform)** documentation for the exact rules of the `transformFunction` (i.e. `this._transform`) and the optional `flushFunction` (i.e. `this._flush`).
-
-### options
-
-The options argument is optional and is passed straight through to `stream.Transform`. So you can use `objectMode:true` if you are processing non-binary streams (or just use `through2.obj()`).
-
-The `options` argument is first, unlike standard convention, because if I'm passing in an anonymous function then I'd prefer for the options argument to not get lost at the end of the call:
-
-```js
-fs.createReadStream('/tmp/important.dat')
- .pipe(through2({ objectMode: true, allowHalfOpen: false },
- (chunk, enc, cb) => {
- cb(null, 'wut?') // note we can use the second argument on the callback
- // to provide data as an alternative to this.push('wut?')
- }
- )
- .pipe(fs.createWriteStream('/tmp/wut.txt'))
-```
-
-### transformFunction
-
-The `transformFunction` must have the following signature: `function (chunk, encoding, callback) {}`. A minimal implementation should call the `callback` function to indicate that the transformation is done, even if that transformation means discarding the chunk.
-
-To queue a new chunk, call `this.push(chunk)`—this can be called as many times as required before the `callback()` if you have multiple pieces to send on.
-
-Alternatively, you may use `callback(err, chunk)` as shorthand for emitting a single chunk or an error.
-
-If you **do not provide a `transformFunction`** then you will get a simple pass-through stream.
-
-### flushFunction
-
-The optional `flushFunction` is provided as the last argument (2nd or 3rd, depending on whether you've supplied options) is called just prior to the stream ending. Can be used to finish up any processing that may be in progress.
-
-```js
-fs.createReadStream('/tmp/important.dat')
- .pipe(through2(
- (chunk, enc, cb) => cb(null, chunk), // transform is a noop
- function (cb) { // flush function
- this.push('tacking on an extra buffer to the end');
- cb();
- }
- ))
- .pipe(fs.createWriteStream('/tmp/wut.txt'));
-```
-
-through2.ctor([ options, ] transformFunction[, flushFunction ])
-
-Instead of returning a `stream.Transform` instance, `through2.ctor()` returns a **constructor** for a custom Transform. This is useful when you want to use the same transform logic in multiple instances.
-
-```js
-var FToC = through2.ctor({objectMode: true}, function (record, encoding, callback) {
- if (record.temp != null && record.unit == "F") {
- record.temp = ( ( record.temp - 32 ) * 5 ) / 9
- record.unit = "C"
- }
- this.push(record)
- callback()
-})
-
-// Create instances of FToC like so:
-var converter = new FToC()
-// Or:
-var converter = FToC()
-// Or specify/override options when you instantiate, if you prefer:
-var converter = FToC({objectMode: true})
-```
-
-## See Also
-
- - [through2-map](https://github.com/brycebaril/through2-map) - Array.prototype.map analog for streams.
- - [through2-filter](https://github.com/brycebaril/through2-filter) - Array.prototype.filter analog for streams.
- - [through2-reduce](https://github.com/brycebaril/through2-reduce) - Array.prototype.reduce analog for streams.
- - [through2-spy](https://github.com/brycebaril/through2-spy) - Wrapper for simple stream.PassThrough spies.
- - the [mississippi stream utility collection](https://github.com/maxogden/mississippi) includes `through2` as well as many more useful stream modules similar to this one
-
-## License
-
-**through2** is Copyright (c) Rod Vagg [@rvagg](https://twitter.com/rvagg) and additional contributors and licensed under the MIT license. All rights not explicitly granted in the MIT license are reserved. See the included LICENSE file for more details.
diff --git a/node_modules/through2/node_modules/readable-stream/.travis.yml b/node_modules/through2/node_modules/readable-stream/.travis.yml
deleted file mode 100644
index f62cdac0686da613ecdbf214fb2b43a828cb6ce9..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/.travis.yml
+++ /dev/null
@@ -1,34 +0,0 @@
-sudo: false
-language: node_js
-before_install:
- - (test $NPM_LEGACY && npm install -g npm@2 && npm install -g npm@3) || true
-notifications:
- email: false
-matrix:
- fast_finish: true
- include:
- - node_js: '0.8'
- env: NPM_LEGACY=true
- - node_js: '0.10'
- env: NPM_LEGACY=true
- - node_js: '0.11'
- env: NPM_LEGACY=true
- - node_js: '0.12'
- env: NPM_LEGACY=true
- - node_js: 1
- env: NPM_LEGACY=true
- - node_js: 2
- env: NPM_LEGACY=true
- - node_js: 3
- env: NPM_LEGACY=true
- - node_js: 4
- - node_js: 5
- - node_js: 6
- - node_js: 7
- - node_js: 8
- - node_js: 9
-script: "npm run test"
-env:
- global:
- - secure: rE2Vvo7vnjabYNULNyLFxOyt98BoJexDqsiOnfiD6kLYYsiQGfr/sbZkPMOFm9qfQG7pjqx+zZWZjGSswhTt+626C0t/njXqug7Yps4c3dFblzGfreQHp7wNX5TFsvrxd6dAowVasMp61sJcRnB2w8cUzoe3RAYUDHyiHktwqMc=
- - secure: g9YINaKAdMatsJ28G9jCGbSaguXCyxSTy+pBO6Ch0Cf57ZLOTka3HqDj8p3nV28LUIHZ3ut5WO43CeYKwt4AUtLpBS3a0dndHdY6D83uY6b2qh5hXlrcbeQTq2cvw2y95F7hm4D1kwrgZ7ViqaKggRcEupAL69YbJnxeUDKWEdI=
diff --git a/node_modules/through2/node_modules/readable-stream/CONTRIBUTING.md b/node_modules/through2/node_modules/readable-stream/CONTRIBUTING.md
deleted file mode 100644
index f478d58dca85b2c396e2da8a2251be0071c4e9e0..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/CONTRIBUTING.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-* (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-* (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-* (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-* (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
-
-## Moderation Policy
-
-The [Node.js Moderation Policy] applies to this WG.
-
-## Code of Conduct
-
-The [Node.js Code of Conduct][] applies to this WG.
-
-[Node.js Code of Conduct]:
-https://github.com/nodejs/node/blob/master/CODE_OF_CONDUCT.md
-[Node.js Moderation Policy]:
-https://github.com/nodejs/TSC/blob/master/Moderation-Policy.md
diff --git a/node_modules/through2/node_modules/readable-stream/GOVERNANCE.md b/node_modules/through2/node_modules/readable-stream/GOVERNANCE.md
deleted file mode 100644
index 16ffb93f24bece9519cc4a220a0c1d3c91481453..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/GOVERNANCE.md
+++ /dev/null
@@ -1,136 +0,0 @@
-### Streams Working Group
-
-The Node.js Streams is jointly governed by a Working Group
-(WG)
-that is responsible for high-level guidance of the project.
-
-The WG has final authority over this project including:
-
-* Technical direction
-* Project governance and process (including this policy)
-* Contribution policy
-* GitHub repository hosting
-* Conduct guidelines
-* Maintaining the list of additional Collaborators
-
-For the current list of WG members, see the project
-[README.md](./README.md#current-project-team-members).
-
-### Collaborators
-
-The readable-stream GitHub repository is
-maintained by the WG and additional Collaborators who are added by the
-WG on an ongoing basis.
-
-Individuals making significant and valuable contributions are made
-Collaborators and given commit-access to the project. These
-individuals are identified by the WG and their addition as
-Collaborators is discussed during the WG meeting.
-
-_Note:_ If you make a significant contribution and are not considered
-for commit-access log an issue or contact a WG member directly and it
-will be brought up in the next WG meeting.
-
-Modifications of the contents of the readable-stream repository are
-made on
-a collaborative basis. Anybody with a GitHub account may propose a
-modification via pull request and it will be considered by the project
-Collaborators. All pull requests must be reviewed and accepted by a
-Collaborator with sufficient expertise who is able to take full
-responsibility for the change. In the case of pull requests proposed
-by an existing Collaborator, an additional Collaborator is required
-for sign-off. Consensus should be sought if additional Collaborators
-participate and there is disagreement around a particular
-modification. See _Consensus Seeking Process_ below for further detail
-on the consensus model used for governance.
-
-Collaborators may opt to elevate significant or controversial
-modifications, or modifications that have not found consensus to the
-WG for discussion by assigning the ***WG-agenda*** tag to a pull
-request or issue. The WG should serve as the final arbiter where
-required.
-
-For the current list of Collaborators, see the project
-[README.md](./README.md#members).
-
-### WG Membership
-
-WG seats are not time-limited. There is no fixed size of the WG.
-However, the expected target is between 6 and 12, to ensure adequate
-coverage of important areas of expertise, balanced with the ability to
-make decisions efficiently.
-
-There is no specific set of requirements or qualifications for WG
-membership beyond these rules.
-
-The WG may add additional members to the WG by unanimous consensus.
-
-A WG member may be removed from the WG by voluntary resignation, or by
-unanimous consensus of all other WG members.
-
-Changes to WG membership should be posted in the agenda, and may be
-suggested as any other agenda item (see "WG Meetings" below).
-
-If an addition or removal is proposed during a meeting, and the full
-WG is not in attendance to participate, then the addition or removal
-is added to the agenda for the subsequent meeting. This is to ensure
-that all members are given the opportunity to participate in all
-membership decisions. If a WG member is unable to attend a meeting
-where a planned membership decision is being made, then their consent
-is assumed.
-
-No more than 1/3 of the WG members may be affiliated with the same
-employer. If removal or resignation of a WG member, or a change of
-employment by a WG member, creates a situation where more than 1/3 of
-the WG membership shares an employer, then the situation must be
-immediately remedied by the resignation or removal of one or more WG
-members affiliated with the over-represented employer(s).
-
-### WG Meetings
-
-The WG meets occasionally on a Google Hangout On Air. A designated moderator
-approved by the WG runs the meeting. Each meeting should be
-published to YouTube.
-
-Items are added to the WG agenda that are considered contentious or
-are modifications of governance, contribution policy, WG membership,
-or release process.
-
-The intention of the agenda is not to approve or review all patches;
-that should happen continuously on GitHub and be handled by the larger
-group of Collaborators.
-
-Any community member or contributor can ask that something be added to
-the next meeting's agenda by logging a GitHub Issue. Any Collaborator,
-WG member or the moderator can add the item to the agenda by adding
-the ***WG-agenda*** tag to the issue.
-
-Prior to each WG meeting the moderator will share the Agenda with
-members of the WG. WG members can add any items they like to the
-agenda at the beginning of each meeting. The moderator and the WG
-cannot veto or remove items.
-
-The WG may invite persons or representatives from certain projects to
-participate in a non-voting capacity.
-
-The moderator is responsible for summarizing the discussion of each
-agenda item and sends it as a pull request after the meeting.
-
-### Consensus Seeking Process
-
-The WG follows a
-[Consensus
-Seeking](http://en.wikipedia.org/wiki/Consensus-seeking_decision-making)
-decision-making model.
-
-When an agenda item has appeared to reach a consensus the moderator
-will ask "Does anyone object?" as a final call for dissent from the
-consensus.
-
-If an agenda item cannot reach a consensus a WG member can call for
-either a closing vote or a vote to table the issue to the next
-meeting. The call for a vote must be seconded by a majority of the WG
-or else the discussion will continue. Simple majority wins.
-
-Note that changes to WG membership require a majority consensus. See
-"WG Membership" above.
diff --git a/node_modules/through2/node_modules/readable-stream/LICENSE b/node_modules/through2/node_modules/readable-stream/LICENSE
deleted file mode 100644
index 2873b3b2e595072e66330369d83e8af46655970c..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/LICENSE
+++ /dev/null
@@ -1,47 +0,0 @@
-Node.js is licensed for use as follows:
-
-"""
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-This license applies to parts of Node.js originating from the
-https://github.com/joyent/node repository:
-
-"""
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
diff --git a/node_modules/through2/node_modules/readable-stream/README.md b/node_modules/through2/node_modules/readable-stream/README.md
deleted file mode 100644
index 23fe3f3e3009a2c63c3791738299504d40ebbca9..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/README.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# readable-stream
-
-***Node-core v8.11.1 streams for userland*** [![Build Status](https://travis-ci.org/nodejs/readable-stream.svg?branch=master)](https://travis-ci.org/nodejs/readable-stream)
-
-
-[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true&downloadRank=true)](https://nodei.co/npm/readable-stream/)
-[![NPM](https://nodei.co/npm-dl/readable-stream.png?&months=6&height=3)](https://nodei.co/npm/readable-stream/)
-
-
-[![Sauce Test Status](https://saucelabs.com/browser-matrix/readable-stream.svg)](https://saucelabs.com/u/readable-stream)
-
-```bash
-npm install --save readable-stream
-```
-
-***Node-core streams for userland***
-
-This package is a mirror of the Streams2 and Streams3 implementations in
-Node-core.
-
-Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v8.11.1/docs/api/stream.html).
-
-If you want to guarantee a stable streams base, regardless of what version of
-Node you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *"stream"* module in Node-core, for background see [this blogpost](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html).
-
-As of version 2.0.0 **readable-stream** uses semantic versioning.
-
-# Streams Working Group
-
-`readable-stream` is maintained by the Streams Working Group, which
-oversees the development and maintenance of the Streams API within
-Node.js. The responsibilities of the Streams Working Group include:
-
-* Addressing stream issues on the Node.js issue tracker.
-* Authoring and editing stream documentation within the Node.js project.
-* Reviewing changes to stream subclasses within the Node.js project.
-* Redirecting changes to streams from the Node.js project to this
- project.
-* Assisting in the implementation of stream providers within Node.js.
-* Recommending versions of `readable-stream` to be included in Node.js.
-* Messaging about the future of streams to give the community advance
- notice of changes.
-
-
-## Team Members
-
-* **Chris Dickinson** ([@chrisdickinson](https://github.com/chrisdickinson)) <christopher.s.dickinson@gmail.com>
- - Release GPG key: 9554F04D7259F04124DE6B476D5A82AC7E37093B
-* **Calvin Metcalf** ([@calvinmetcalf](https://github.com/calvinmetcalf)) <calvin.metcalf@gmail.com>
- - Release GPG key: F3EF5F62A87FC27A22E643F714CE4FF5015AA242
-* **Rod Vagg** ([@rvagg](https://github.com/rvagg)) <rod@vagg.org>
- - Release GPG key: DD8F2338BAE7501E3DD5AC78C273792F7D83545D
-* **Sam Newman** ([@sonewman](https://github.com/sonewman)) <newmansam@outlook.com>
-* **Mathias Buus** ([@mafintosh](https://github.com/mafintosh)) <mathiasbuus@gmail.com>
-* **Domenic Denicola** ([@domenic](https://github.com/domenic)) <d@domenic.me>
-* **Matteo Collina** ([@mcollina](https://github.com/mcollina)) <matteo.collina@gmail.com>
- - Release GPG key: 3ABC01543F22DD2239285CDD818674489FBC127E
-* **Irina Shestak** ([@lrlna](https://github.com/lrlna)) <shestak.irina@gmail.com>
diff --git a/node_modules/through2/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md b/node_modules/through2/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md
deleted file mode 100644
index 83275f192e4077d32942525aaf510fa449a7c417..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/doc/wg-meetings/2015-01-30.md
+++ /dev/null
@@ -1,60 +0,0 @@
-# streams WG Meeting 2015-01-30
-
-## Links
-
-* **Google Hangouts Video**: http://www.youtube.com/watch?v=I9nDOSGfwZg
-* **GitHub Issue**: https://github.com/iojs/readable-stream/issues/106
-* **Original Minutes Google Doc**: https://docs.google.com/document/d/17aTgLnjMXIrfjgNaTUnHQO7m3xgzHR2VXBTmi03Qii4/
-
-## Agenda
-
-Extracted from https://github.com/iojs/readable-stream/labels/wg-agenda prior to meeting.
-
-* adopt a charter [#105](https://github.com/iojs/readable-stream/issues/105)
-* release and versioning strategy [#101](https://github.com/iojs/readable-stream/issues/101)
-* simpler stream creation [#102](https://github.com/iojs/readable-stream/issues/102)
-* proposal: deprecate implicit flowing of streams [#99](https://github.com/iojs/readable-stream/issues/99)
-
-## Minutes
-
-### adopt a charter
-
-* group: +1's all around
-
-### What versioning scheme should be adopted?
-* group: +1’s 3.0.0
-* domenic+group: pulling in patches from other sources where appropriate
-* mikeal: version independently, suggesting versions for io.js
-* mikeal+domenic: work with TC to notify in advance of changes
-simpler stream creation
-
-### streamline creation of streams
-* sam: streamline creation of streams
-* domenic: nice simple solution posted
- but, we lose the opportunity to change the model
- may not be backwards incompatible (double check keys)
-
- **action item:** domenic will check
-
-### remove implicit flowing of streams on(‘data’)
-* add isFlowing / isPaused
-* mikeal: worrying that we’re documenting polyfill methods – confuses users
-* domenic: more reflective API is probably good, with warning labels for users
-* new section for mad scientists (reflective stream access)
-* calvin: name the “third state”
-* mikeal: maybe borrow the name from whatwg?
-* domenic: we’re missing the “third state”
-* consensus: kind of difficult to name the third state
-* mikeal: figure out differences in states / compat
-* mathias: always flow on data – eliminates third state
- * explore what it breaks
-
-**action items:**
-* ask isaac for ability to list packages by what public io.js APIs they use (esp. Stream)
-* ask rod/build for infrastructure
-* **chris**: explore the “flow on data” approach
-* add isPaused/isFlowing
-* add new docs section
-* move isPaused to that section
-
-
diff --git a/node_modules/through2/node_modules/readable-stream/duplex-browser.js b/node_modules/through2/node_modules/readable-stream/duplex-browser.js
deleted file mode 100644
index f8b2db83dbe733d7720264a9840202e29ebeffbd..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/duplex-browser.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('./lib/_stream_duplex.js');
diff --git a/node_modules/through2/node_modules/readable-stream/duplex.js b/node_modules/through2/node_modules/readable-stream/duplex.js
deleted file mode 100644
index 46924cbfdf53871b574d3f6f5b4bc6064b824aaa..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/duplex.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('./readable').Duplex
diff --git a/node_modules/through2/node_modules/readable-stream/lib/_stream_duplex.js b/node_modules/through2/node_modules/readable-stream/lib/_stream_duplex.js
deleted file mode 100644
index 57003c32d256c0a1fe20dadd279abef2d463074f..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/_stream_duplex.js
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// a duplex stream is just a stream that is both readable and writable.
-// Since JS doesn't have multiple prototypal inheritance, this class
-// prototypally inherits from Readable, and then parasitically from
-// Writable.
-
-'use strict';
-
-/**/
-
-var pna = require('process-nextick-args');
-/**/
-
-/**/
-var objectKeys = Object.keys || function (obj) {
- var keys = [];
- for (var key in obj) {
- keys.push(key);
- }return keys;
-};
-/**/
-
-module.exports = Duplex;
-
-/**/
-var util = Object.create(require('core-util-is'));
-util.inherits = require('inherits');
-/**/
-
-var Readable = require('./_stream_readable');
-var Writable = require('./_stream_writable');
-
-util.inherits(Duplex, Readable);
-
-{
- // avoid scope creep, the keys array can then be collected
- var keys = objectKeys(Writable.prototype);
- for (var v = 0; v < keys.length; v++) {
- var method = keys[v];
- if (!Duplex.prototype[method]) Duplex.prototype[method] = Writable.prototype[method];
- }
-}
-
-function Duplex(options) {
- if (!(this instanceof Duplex)) return new Duplex(options);
-
- Readable.call(this, options);
- Writable.call(this, options);
-
- if (options && options.readable === false) this.readable = false;
-
- if (options && options.writable === false) this.writable = false;
-
- this.allowHalfOpen = true;
- if (options && options.allowHalfOpen === false) this.allowHalfOpen = false;
-
- this.once('end', onend);
-}
-
-Object.defineProperty(Duplex.prototype, 'writableHighWaterMark', {
- // making it explicit this property is not enumerable
- // because otherwise some prototype manipulation in
- // userland will fail
- enumerable: false,
- get: function () {
- return this._writableState.highWaterMark;
- }
-});
-
-// the no-half-open enforcer
-function onend() {
- // if we allow half-open state, or if the writable side ended,
- // then we're ok.
- if (this.allowHalfOpen || this._writableState.ended) return;
-
- // no more data can be written.
- // But allow more writes to happen in this tick.
- pna.nextTick(onEndNT, this);
-}
-
-function onEndNT(self) {
- self.end();
-}
-
-Object.defineProperty(Duplex.prototype, 'destroyed', {
- get: function () {
- if (this._readableState === undefined || this._writableState === undefined) {
- return false;
- }
- return this._readableState.destroyed && this._writableState.destroyed;
- },
- set: function (value) {
- // we ignore the value if the stream
- // has not been initialized yet
- if (this._readableState === undefined || this._writableState === undefined) {
- return;
- }
-
- // backward compatibility, the user is explicitly
- // managing destroyed
- this._readableState.destroyed = value;
- this._writableState.destroyed = value;
- }
-});
-
-Duplex.prototype._destroy = function (err, cb) {
- this.push(null);
- this.end();
-
- pna.nextTick(cb, err);
-};
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/_stream_passthrough.js b/node_modules/through2/node_modules/readable-stream/lib/_stream_passthrough.js
deleted file mode 100644
index 612edb4d8b443fabc4ddac619da420bad62fc5b0..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/_stream_passthrough.js
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// a passthrough stream.
-// basically just the most minimal sort of Transform stream.
-// Every written chunk gets output as-is.
-
-'use strict';
-
-module.exports = PassThrough;
-
-var Transform = require('./_stream_transform');
-
-/**/
-var util = Object.create(require('core-util-is'));
-util.inherits = require('inherits');
-/**/
-
-util.inherits(PassThrough, Transform);
-
-function PassThrough(options) {
- if (!(this instanceof PassThrough)) return new PassThrough(options);
-
- Transform.call(this, options);
-}
-
-PassThrough.prototype._transform = function (chunk, encoding, cb) {
- cb(null, chunk);
-};
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/_stream_readable.js b/node_modules/through2/node_modules/readable-stream/lib/_stream_readable.js
deleted file mode 100644
index 0f807646b0f67d8ab98c46ca516478c2684b70b1..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/_stream_readable.js
+++ /dev/null
@@ -1,1019 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-'use strict';
-
-/**/
-
-var pna = require('process-nextick-args');
-/**/
-
-module.exports = Readable;
-
-/**/
-var isArray = require('isarray');
-/**/
-
-/**/
-var Duplex;
-/**/
-
-Readable.ReadableState = ReadableState;
-
-/**/
-var EE = require('events').EventEmitter;
-
-var EElistenerCount = function (emitter, type) {
- return emitter.listeners(type).length;
-};
-/**/
-
-/**/
-var Stream = require('./internal/streams/stream');
-/**/
-
-/**/
-
-var Buffer = require('safe-buffer').Buffer;
-var OurUint8Array = global.Uint8Array || function () {};
-function _uint8ArrayToBuffer(chunk) {
- return Buffer.from(chunk);
-}
-function _isUint8Array(obj) {
- return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
-}
-
-/**/
-
-/**/
-var util = Object.create(require('core-util-is'));
-util.inherits = require('inherits');
-/**/
-
-/**/
-var debugUtil = require('util');
-var debug = void 0;
-if (debugUtil && debugUtil.debuglog) {
- debug = debugUtil.debuglog('stream');
-} else {
- debug = function () {};
-}
-/**/
-
-var BufferList = require('./internal/streams/BufferList');
-var destroyImpl = require('./internal/streams/destroy');
-var StringDecoder;
-
-util.inherits(Readable, Stream);
-
-var kProxyEvents = ['error', 'close', 'destroy', 'pause', 'resume'];
-
-function prependListener(emitter, event, fn) {
- // Sadly this is not cacheable as some libraries bundle their own
- // event emitter implementation with them.
- if (typeof emitter.prependListener === 'function') return emitter.prependListener(event, fn);
-
- // This is a hack to make sure that our error handler is attached before any
- // userland ones. NEVER DO THIS. This is here only because this code needs
- // to continue to work with older versions of Node.js that do not include
- // the prependListener() method. The goal is to eventually remove this hack.
- if (!emitter._events || !emitter._events[event]) emitter.on(event, fn);else if (isArray(emitter._events[event])) emitter._events[event].unshift(fn);else emitter._events[event] = [fn, emitter._events[event]];
-}
-
-function ReadableState(options, stream) {
- Duplex = Duplex || require('./_stream_duplex');
-
- options = options || {};
-
- // Duplex streams are both readable and writable, but share
- // the same options object.
- // However, some cases require setting options to different
- // values for the readable and the writable sides of the duplex stream.
- // These options can be provided separately as readableXXX and writableXXX.
- var isDuplex = stream instanceof Duplex;
-
- // object stream flag. Used to make read(n) ignore n and to
- // make all the buffer merging and length checks go away
- this.objectMode = !!options.objectMode;
-
- if (isDuplex) this.objectMode = this.objectMode || !!options.readableObjectMode;
-
- // the point at which it stops calling _read() to fill the buffer
- // Note: 0 is a valid value, means "don't call _read preemptively ever"
- var hwm = options.highWaterMark;
- var readableHwm = options.readableHighWaterMark;
- var defaultHwm = this.objectMode ? 16 : 16 * 1024;
-
- if (hwm || hwm === 0) this.highWaterMark = hwm;else if (isDuplex && (readableHwm || readableHwm === 0)) this.highWaterMark = readableHwm;else this.highWaterMark = defaultHwm;
-
- // cast to ints.
- this.highWaterMark = Math.floor(this.highWaterMark);
-
- // A linked list is used to store data chunks instead of an array because the
- // linked list can remove elements from the beginning faster than
- // array.shift()
- this.buffer = new BufferList();
- this.length = 0;
- this.pipes = null;
- this.pipesCount = 0;
- this.flowing = null;
- this.ended = false;
- this.endEmitted = false;
- this.reading = false;
-
- // a flag to be able to tell if the event 'readable'/'data' is emitted
- // immediately, or on a later tick. We set this to true at first, because
- // any actions that shouldn't happen until "later" should generally also
- // not happen before the first read call.
- this.sync = true;
-
- // whenever we return null, then we set a flag to say
- // that we're awaiting a 'readable' event emission.
- this.needReadable = false;
- this.emittedReadable = false;
- this.readableListening = false;
- this.resumeScheduled = false;
-
- // has it been destroyed
- this.destroyed = false;
-
- // Crypto is kind of old and crusty. Historically, its default string
- // encoding is 'binary' so we have to make this configurable.
- // Everything else in the universe uses 'utf8', though.
- this.defaultEncoding = options.defaultEncoding || 'utf8';
-
- // the number of writers that are awaiting a drain event in .pipe()s
- this.awaitDrain = 0;
-
- // if true, a maybeReadMore has been scheduled
- this.readingMore = false;
-
- this.decoder = null;
- this.encoding = null;
- if (options.encoding) {
- if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
- this.decoder = new StringDecoder(options.encoding);
- this.encoding = options.encoding;
- }
-}
-
-function Readable(options) {
- Duplex = Duplex || require('./_stream_duplex');
-
- if (!(this instanceof Readable)) return new Readable(options);
-
- this._readableState = new ReadableState(options, this);
-
- // legacy
- this.readable = true;
-
- if (options) {
- if (typeof options.read === 'function') this._read = options.read;
-
- if (typeof options.destroy === 'function') this._destroy = options.destroy;
- }
-
- Stream.call(this);
-}
-
-Object.defineProperty(Readable.prototype, 'destroyed', {
- get: function () {
- if (this._readableState === undefined) {
- return false;
- }
- return this._readableState.destroyed;
- },
- set: function (value) {
- // we ignore the value if the stream
- // has not been initialized yet
- if (!this._readableState) {
- return;
- }
-
- // backward compatibility, the user is explicitly
- // managing destroyed
- this._readableState.destroyed = value;
- }
-});
-
-Readable.prototype.destroy = destroyImpl.destroy;
-Readable.prototype._undestroy = destroyImpl.undestroy;
-Readable.prototype._destroy = function (err, cb) {
- this.push(null);
- cb(err);
-};
-
-// Manually shove something into the read() buffer.
-// This returns true if the highWaterMark has not been hit yet,
-// similar to how Writable.write() returns true if you should
-// write() some more.
-Readable.prototype.push = function (chunk, encoding) {
- var state = this._readableState;
- var skipChunkCheck;
-
- if (!state.objectMode) {
- if (typeof chunk === 'string') {
- encoding = encoding || state.defaultEncoding;
- if (encoding !== state.encoding) {
- chunk = Buffer.from(chunk, encoding);
- encoding = '';
- }
- skipChunkCheck = true;
- }
- } else {
- skipChunkCheck = true;
- }
-
- return readableAddChunk(this, chunk, encoding, false, skipChunkCheck);
-};
-
-// Unshift should *always* be something directly out of read()
-Readable.prototype.unshift = function (chunk) {
- return readableAddChunk(this, chunk, null, true, false);
-};
-
-function readableAddChunk(stream, chunk, encoding, addToFront, skipChunkCheck) {
- var state = stream._readableState;
- if (chunk === null) {
- state.reading = false;
- onEofChunk(stream, state);
- } else {
- var er;
- if (!skipChunkCheck) er = chunkInvalid(state, chunk);
- if (er) {
- stream.emit('error', er);
- } else if (state.objectMode || chunk && chunk.length > 0) {
- if (typeof chunk !== 'string' && !state.objectMode && Object.getPrototypeOf(chunk) !== Buffer.prototype) {
- chunk = _uint8ArrayToBuffer(chunk);
- }
-
- if (addToFront) {
- if (state.endEmitted) stream.emit('error', new Error('stream.unshift() after end event'));else addChunk(stream, state, chunk, true);
- } else if (state.ended) {
- stream.emit('error', new Error('stream.push() after EOF'));
- } else {
- state.reading = false;
- if (state.decoder && !encoding) {
- chunk = state.decoder.write(chunk);
- if (state.objectMode || chunk.length !== 0) addChunk(stream, state, chunk, false);else maybeReadMore(stream, state);
- } else {
- addChunk(stream, state, chunk, false);
- }
- }
- } else if (!addToFront) {
- state.reading = false;
- }
- }
-
- return needMoreData(state);
-}
-
-function addChunk(stream, state, chunk, addToFront) {
- if (state.flowing && state.length === 0 && !state.sync) {
- stream.emit('data', chunk);
- stream.read(0);
- } else {
- // update the buffer info.
- state.length += state.objectMode ? 1 : chunk.length;
- if (addToFront) state.buffer.unshift(chunk);else state.buffer.push(chunk);
-
- if (state.needReadable) emitReadable(stream);
- }
- maybeReadMore(stream, state);
-}
-
-function chunkInvalid(state, chunk) {
- var er;
- if (!_isUint8Array(chunk) && typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
- er = new TypeError('Invalid non-string/buffer chunk');
- }
- return er;
-}
-
-// if it's past the high water mark, we can push in some more.
-// Also, if we have no data yet, we can stand some
-// more bytes. This is to work around cases where hwm=0,
-// such as the repl. Also, if the push() triggered a
-// readable event, and the user called read(largeNumber) such that
-// needReadable was set, then we ought to push more, so that another
-// 'readable' event will be triggered.
-function needMoreData(state) {
- return !state.ended && (state.needReadable || state.length < state.highWaterMark || state.length === 0);
-}
-
-Readable.prototype.isPaused = function () {
- return this._readableState.flowing === false;
-};
-
-// backwards compatibility.
-Readable.prototype.setEncoding = function (enc) {
- if (!StringDecoder) StringDecoder = require('string_decoder/').StringDecoder;
- this._readableState.decoder = new StringDecoder(enc);
- this._readableState.encoding = enc;
- return this;
-};
-
-// Don't raise the hwm > 8MB
-var MAX_HWM = 0x800000;
-function computeNewHighWaterMark(n) {
- if (n >= MAX_HWM) {
- n = MAX_HWM;
- } else {
- // Get the next highest power of 2 to prevent increasing hwm excessively in
- // tiny amounts
- n--;
- n |= n >>> 1;
- n |= n >>> 2;
- n |= n >>> 4;
- n |= n >>> 8;
- n |= n >>> 16;
- n++;
- }
- return n;
-}
-
-// This function is designed to be inlinable, so please take care when making
-// changes to the function body.
-function howMuchToRead(n, state) {
- if (n <= 0 || state.length === 0 && state.ended) return 0;
- if (state.objectMode) return 1;
- if (n !== n) {
- // Only flow one buffer at a time
- if (state.flowing && state.length) return state.buffer.head.data.length;else return state.length;
- }
- // If we're asking for more than the current hwm, then raise the hwm.
- if (n > state.highWaterMark) state.highWaterMark = computeNewHighWaterMark(n);
- if (n <= state.length) return n;
- // Don't have enough
- if (!state.ended) {
- state.needReadable = true;
- return 0;
- }
- return state.length;
-}
-
-// you can override either this method, or the async _read(n) below.
-Readable.prototype.read = function (n) {
- debug('read', n);
- n = parseInt(n, 10);
- var state = this._readableState;
- var nOrig = n;
-
- if (n !== 0) state.emittedReadable = false;
-
- // if we're doing read(0) to trigger a readable event, but we
- // already have a bunch of data in the buffer, then just trigger
- // the 'readable' event and move on.
- if (n === 0 && state.needReadable && (state.length >= state.highWaterMark || state.ended)) {
- debug('read: emitReadable', state.length, state.ended);
- if (state.length === 0 && state.ended) endReadable(this);else emitReadable(this);
- return null;
- }
-
- n = howMuchToRead(n, state);
-
- // if we've ended, and we're now clear, then finish it up.
- if (n === 0 && state.ended) {
- if (state.length === 0) endReadable(this);
- return null;
- }
-
- // All the actual chunk generation logic needs to be
- // *below* the call to _read. The reason is that in certain
- // synthetic stream cases, such as passthrough streams, _read
- // may be a completely synchronous operation which may change
- // the state of the read buffer, providing enough data when
- // before there was *not* enough.
- //
- // So, the steps are:
- // 1. Figure out what the state of things will be after we do
- // a read from the buffer.
- //
- // 2. If that resulting state will trigger a _read, then call _read.
- // Note that this may be asynchronous, or synchronous. Yes, it is
- // deeply ugly to write APIs this way, but that still doesn't mean
- // that the Readable class should behave improperly, as streams are
- // designed to be sync/async agnostic.
- // Take note if the _read call is sync or async (ie, if the read call
- // has returned yet), so that we know whether or not it's safe to emit
- // 'readable' etc.
- //
- // 3. Actually pull the requested chunks out of the buffer and return.
-
- // if we need a readable event, then we need to do some reading.
- var doRead = state.needReadable;
- debug('need readable', doRead);
-
- // if we currently have less than the highWaterMark, then also read some
- if (state.length === 0 || state.length - n < state.highWaterMark) {
- doRead = true;
- debug('length less than watermark', doRead);
- }
-
- // however, if we've ended, then there's no point, and if we're already
- // reading, then it's unnecessary.
- if (state.ended || state.reading) {
- doRead = false;
- debug('reading or ended', doRead);
- } else if (doRead) {
- debug('do read');
- state.reading = true;
- state.sync = true;
- // if the length is currently zero, then we *need* a readable event.
- if (state.length === 0) state.needReadable = true;
- // call internal read method
- this._read(state.highWaterMark);
- state.sync = false;
- // If _read pushed data synchronously, then `reading` will be false,
- // and we need to re-evaluate how much data we can return to the user.
- if (!state.reading) n = howMuchToRead(nOrig, state);
- }
-
- var ret;
- if (n > 0) ret = fromList(n, state);else ret = null;
-
- if (ret === null) {
- state.needReadable = true;
- n = 0;
- } else {
- state.length -= n;
- }
-
- if (state.length === 0) {
- // If we have nothing in the buffer, then we want to know
- // as soon as we *do* get something into the buffer.
- if (!state.ended) state.needReadable = true;
-
- // If we tried to read() past the EOF, then emit end on the next tick.
- if (nOrig !== n && state.ended) endReadable(this);
- }
-
- if (ret !== null) this.emit('data', ret);
-
- return ret;
-};
-
-function onEofChunk(stream, state) {
- if (state.ended) return;
- if (state.decoder) {
- var chunk = state.decoder.end();
- if (chunk && chunk.length) {
- state.buffer.push(chunk);
- state.length += state.objectMode ? 1 : chunk.length;
- }
- }
- state.ended = true;
-
- // emit 'readable' now to make sure it gets picked up.
- emitReadable(stream);
-}
-
-// Don't emit readable right away in sync mode, because this can trigger
-// another read() call => stack overflow. This way, it might trigger
-// a nextTick recursion warning, but that's not so bad.
-function emitReadable(stream) {
- var state = stream._readableState;
- state.needReadable = false;
- if (!state.emittedReadable) {
- debug('emitReadable', state.flowing);
- state.emittedReadable = true;
- if (state.sync) pna.nextTick(emitReadable_, stream);else emitReadable_(stream);
- }
-}
-
-function emitReadable_(stream) {
- debug('emit readable');
- stream.emit('readable');
- flow(stream);
-}
-
-// at this point, the user has presumably seen the 'readable' event,
-// and called read() to consume some data. that may have triggered
-// in turn another _read(n) call, in which case reading = true if
-// it's in progress.
-// However, if we're not ended, or reading, and the length < hwm,
-// then go ahead and try to read some more preemptively.
-function maybeReadMore(stream, state) {
- if (!state.readingMore) {
- state.readingMore = true;
- pna.nextTick(maybeReadMore_, stream, state);
- }
-}
-
-function maybeReadMore_(stream, state) {
- var len = state.length;
- while (!state.reading && !state.flowing && !state.ended && state.length < state.highWaterMark) {
- debug('maybeReadMore read 0');
- stream.read(0);
- if (len === state.length)
- // didn't get any data, stop spinning.
- break;else len = state.length;
- }
- state.readingMore = false;
-}
-
-// abstract method. to be overridden in specific implementation classes.
-// call cb(er, data) where data is <= n in length.
-// for virtual (non-string, non-buffer) streams, "length" is somewhat
-// arbitrary, and perhaps not very meaningful.
-Readable.prototype._read = function (n) {
- this.emit('error', new Error('_read() is not implemented'));
-};
-
-Readable.prototype.pipe = function (dest, pipeOpts) {
- var src = this;
- var state = this._readableState;
-
- switch (state.pipesCount) {
- case 0:
- state.pipes = dest;
- break;
- case 1:
- state.pipes = [state.pipes, dest];
- break;
- default:
- state.pipes.push(dest);
- break;
- }
- state.pipesCount += 1;
- debug('pipe count=%d opts=%j', state.pipesCount, pipeOpts);
-
- var doEnd = (!pipeOpts || pipeOpts.end !== false) && dest !== process.stdout && dest !== process.stderr;
-
- var endFn = doEnd ? onend : unpipe;
- if (state.endEmitted) pna.nextTick(endFn);else src.once('end', endFn);
-
- dest.on('unpipe', onunpipe);
- function onunpipe(readable, unpipeInfo) {
- debug('onunpipe');
- if (readable === src) {
- if (unpipeInfo && unpipeInfo.hasUnpiped === false) {
- unpipeInfo.hasUnpiped = true;
- cleanup();
- }
- }
- }
-
- function onend() {
- debug('onend');
- dest.end();
- }
-
- // when the dest drains, it reduces the awaitDrain counter
- // on the source. This would be more elegant with a .once()
- // handler in flow(), but adding and removing repeatedly is
- // too slow.
- var ondrain = pipeOnDrain(src);
- dest.on('drain', ondrain);
-
- var cleanedUp = false;
- function cleanup() {
- debug('cleanup');
- // cleanup event handlers once the pipe is broken
- dest.removeListener('close', onclose);
- dest.removeListener('finish', onfinish);
- dest.removeListener('drain', ondrain);
- dest.removeListener('error', onerror);
- dest.removeListener('unpipe', onunpipe);
- src.removeListener('end', onend);
- src.removeListener('end', unpipe);
- src.removeListener('data', ondata);
-
- cleanedUp = true;
-
- // if the reader is waiting for a drain event from this
- // specific writer, then it would cause it to never start
- // flowing again.
- // So, if this is awaiting a drain, then we just call it now.
- // If we don't know, then assume that we are waiting for one.
- if (state.awaitDrain && (!dest._writableState || dest._writableState.needDrain)) ondrain();
- }
-
- // If the user pushes more data while we're writing to dest then we'll end up
- // in ondata again. However, we only want to increase awaitDrain once because
- // dest will only emit one 'drain' event for the multiple writes.
- // => Introduce a guard on increasing awaitDrain.
- var increasedAwaitDrain = false;
- src.on('data', ondata);
- function ondata(chunk) {
- debug('ondata');
- increasedAwaitDrain = false;
- var ret = dest.write(chunk);
- if (false === ret && !increasedAwaitDrain) {
- // If the user unpiped during `dest.write()`, it is possible
- // to get stuck in a permanently paused state if that write
- // also returned false.
- // => Check whether `dest` is still a piping destination.
- if ((state.pipesCount === 1 && state.pipes === dest || state.pipesCount > 1 && indexOf(state.pipes, dest) !== -1) && !cleanedUp) {
- debug('false write response, pause', src._readableState.awaitDrain);
- src._readableState.awaitDrain++;
- increasedAwaitDrain = true;
- }
- src.pause();
- }
- }
-
- // if the dest has an error, then stop piping into it.
- // however, don't suppress the throwing behavior for this.
- function onerror(er) {
- debug('onerror', er);
- unpipe();
- dest.removeListener('error', onerror);
- if (EElistenerCount(dest, 'error') === 0) dest.emit('error', er);
- }
-
- // Make sure our error handler is attached before userland ones.
- prependListener(dest, 'error', onerror);
-
- // Both close and finish should trigger unpipe, but only once.
- function onclose() {
- dest.removeListener('finish', onfinish);
- unpipe();
- }
- dest.once('close', onclose);
- function onfinish() {
- debug('onfinish');
- dest.removeListener('close', onclose);
- unpipe();
- }
- dest.once('finish', onfinish);
-
- function unpipe() {
- debug('unpipe');
- src.unpipe(dest);
- }
-
- // tell the dest that it's being piped to
- dest.emit('pipe', src);
-
- // start the flow if it hasn't been started already.
- if (!state.flowing) {
- debug('pipe resume');
- src.resume();
- }
-
- return dest;
-};
-
-function pipeOnDrain(src) {
- return function () {
- var state = src._readableState;
- debug('pipeOnDrain', state.awaitDrain);
- if (state.awaitDrain) state.awaitDrain--;
- if (state.awaitDrain === 0 && EElistenerCount(src, 'data')) {
- state.flowing = true;
- flow(src);
- }
- };
-}
-
-Readable.prototype.unpipe = function (dest) {
- var state = this._readableState;
- var unpipeInfo = { hasUnpiped: false };
-
- // if we're not piping anywhere, then do nothing.
- if (state.pipesCount === 0) return this;
-
- // just one destination. most common case.
- if (state.pipesCount === 1) {
- // passed in one, but it's not the right one.
- if (dest && dest !== state.pipes) return this;
-
- if (!dest) dest = state.pipes;
-
- // got a match.
- state.pipes = null;
- state.pipesCount = 0;
- state.flowing = false;
- if (dest) dest.emit('unpipe', this, unpipeInfo);
- return this;
- }
-
- // slow case. multiple pipe destinations.
-
- if (!dest) {
- // remove all.
- var dests = state.pipes;
- var len = state.pipesCount;
- state.pipes = null;
- state.pipesCount = 0;
- state.flowing = false;
-
- for (var i = 0; i < len; i++) {
- dests[i].emit('unpipe', this, unpipeInfo);
- }return this;
- }
-
- // try to find the right one.
- var index = indexOf(state.pipes, dest);
- if (index === -1) return this;
-
- state.pipes.splice(index, 1);
- state.pipesCount -= 1;
- if (state.pipesCount === 1) state.pipes = state.pipes[0];
-
- dest.emit('unpipe', this, unpipeInfo);
-
- return this;
-};
-
-// set up data events if they are asked for
-// Ensure readable listeners eventually get something
-Readable.prototype.on = function (ev, fn) {
- var res = Stream.prototype.on.call(this, ev, fn);
-
- if (ev === 'data') {
- // Start flowing on next tick if stream isn't explicitly paused
- if (this._readableState.flowing !== false) this.resume();
- } else if (ev === 'readable') {
- var state = this._readableState;
- if (!state.endEmitted && !state.readableListening) {
- state.readableListening = state.needReadable = true;
- state.emittedReadable = false;
- if (!state.reading) {
- pna.nextTick(nReadingNextTick, this);
- } else if (state.length) {
- emitReadable(this);
- }
- }
- }
-
- return res;
-};
-Readable.prototype.addListener = Readable.prototype.on;
-
-function nReadingNextTick(self) {
- debug('readable nexttick read 0');
- self.read(0);
-}
-
-// pause() and resume() are remnants of the legacy readable stream API
-// If the user uses them, then switch into old mode.
-Readable.prototype.resume = function () {
- var state = this._readableState;
- if (!state.flowing) {
- debug('resume');
- state.flowing = true;
- resume(this, state);
- }
- return this;
-};
-
-function resume(stream, state) {
- if (!state.resumeScheduled) {
- state.resumeScheduled = true;
- pna.nextTick(resume_, stream, state);
- }
-}
-
-function resume_(stream, state) {
- if (!state.reading) {
- debug('resume read 0');
- stream.read(0);
- }
-
- state.resumeScheduled = false;
- state.awaitDrain = 0;
- stream.emit('resume');
- flow(stream);
- if (state.flowing && !state.reading) stream.read(0);
-}
-
-Readable.prototype.pause = function () {
- debug('call pause flowing=%j', this._readableState.flowing);
- if (false !== this._readableState.flowing) {
- debug('pause');
- this._readableState.flowing = false;
- this.emit('pause');
- }
- return this;
-};
-
-function flow(stream) {
- var state = stream._readableState;
- debug('flow', state.flowing);
- while (state.flowing && stream.read() !== null) {}
-}
-
-// wrap an old-style stream as the async data source.
-// This is *not* part of the readable stream interface.
-// It is an ugly unfortunate mess of history.
-Readable.prototype.wrap = function (stream) {
- var _this = this;
-
- var state = this._readableState;
- var paused = false;
-
- stream.on('end', function () {
- debug('wrapped end');
- if (state.decoder && !state.ended) {
- var chunk = state.decoder.end();
- if (chunk && chunk.length) _this.push(chunk);
- }
-
- _this.push(null);
- });
-
- stream.on('data', function (chunk) {
- debug('wrapped data');
- if (state.decoder) chunk = state.decoder.write(chunk);
-
- // don't skip over falsy values in objectMode
- if (state.objectMode && (chunk === null || chunk === undefined)) return;else if (!state.objectMode && (!chunk || !chunk.length)) return;
-
- var ret = _this.push(chunk);
- if (!ret) {
- paused = true;
- stream.pause();
- }
- });
-
- // proxy all the other methods.
- // important when wrapping filters and duplexes.
- for (var i in stream) {
- if (this[i] === undefined && typeof stream[i] === 'function') {
- this[i] = function (method) {
- return function () {
- return stream[method].apply(stream, arguments);
- };
- }(i);
- }
- }
-
- // proxy certain important events.
- for (var n = 0; n < kProxyEvents.length; n++) {
- stream.on(kProxyEvents[n], this.emit.bind(this, kProxyEvents[n]));
- }
-
- // when we try to consume some more bytes, simply unpause the
- // underlying stream.
- this._read = function (n) {
- debug('wrapped _read', n);
- if (paused) {
- paused = false;
- stream.resume();
- }
- };
-
- return this;
-};
-
-Object.defineProperty(Readable.prototype, 'readableHighWaterMark', {
- // making it explicit this property is not enumerable
- // because otherwise some prototype manipulation in
- // userland will fail
- enumerable: false,
- get: function () {
- return this._readableState.highWaterMark;
- }
-});
-
-// exposed for testing purposes only.
-Readable._fromList = fromList;
-
-// Pluck off n bytes from an array of buffers.
-// Length is the combined lengths of all the buffers in the list.
-// This function is designed to be inlinable, so please take care when making
-// changes to the function body.
-function fromList(n, state) {
- // nothing buffered
- if (state.length === 0) return null;
-
- var ret;
- if (state.objectMode) ret = state.buffer.shift();else if (!n || n >= state.length) {
- // read it all, truncate the list
- if (state.decoder) ret = state.buffer.join('');else if (state.buffer.length === 1) ret = state.buffer.head.data;else ret = state.buffer.concat(state.length);
- state.buffer.clear();
- } else {
- // read part of list
- ret = fromListPartial(n, state.buffer, state.decoder);
- }
-
- return ret;
-}
-
-// Extracts only enough buffered data to satisfy the amount requested.
-// This function is designed to be inlinable, so please take care when making
-// changes to the function body.
-function fromListPartial(n, list, hasStrings) {
- var ret;
- if (n < list.head.data.length) {
- // slice is the same for buffers and strings
- ret = list.head.data.slice(0, n);
- list.head.data = list.head.data.slice(n);
- } else if (n === list.head.data.length) {
- // first chunk is a perfect match
- ret = list.shift();
- } else {
- // result spans more than one buffer
- ret = hasStrings ? copyFromBufferString(n, list) : copyFromBuffer(n, list);
- }
- return ret;
-}
-
-// Copies a specified amount of characters from the list of buffered data
-// chunks.
-// This function is designed to be inlinable, so please take care when making
-// changes to the function body.
-function copyFromBufferString(n, list) {
- var p = list.head;
- var c = 1;
- var ret = p.data;
- n -= ret.length;
- while (p = p.next) {
- var str = p.data;
- var nb = n > str.length ? str.length : n;
- if (nb === str.length) ret += str;else ret += str.slice(0, n);
- n -= nb;
- if (n === 0) {
- if (nb === str.length) {
- ++c;
- if (p.next) list.head = p.next;else list.head = list.tail = null;
- } else {
- list.head = p;
- p.data = str.slice(nb);
- }
- break;
- }
- ++c;
- }
- list.length -= c;
- return ret;
-}
-
-// Copies a specified amount of bytes from the list of buffered data chunks.
-// This function is designed to be inlinable, so please take care when making
-// changes to the function body.
-function copyFromBuffer(n, list) {
- var ret = Buffer.allocUnsafe(n);
- var p = list.head;
- var c = 1;
- p.data.copy(ret);
- n -= p.data.length;
- while (p = p.next) {
- var buf = p.data;
- var nb = n > buf.length ? buf.length : n;
- buf.copy(ret, ret.length - n, 0, nb);
- n -= nb;
- if (n === 0) {
- if (nb === buf.length) {
- ++c;
- if (p.next) list.head = p.next;else list.head = list.tail = null;
- } else {
- list.head = p;
- p.data = buf.slice(nb);
- }
- break;
- }
- ++c;
- }
- list.length -= c;
- return ret;
-}
-
-function endReadable(stream) {
- var state = stream._readableState;
-
- // If we get here before consuming all the bytes, then that is a
- // bug in node. Should never happen.
- if (state.length > 0) throw new Error('"endReadable()" called on non-empty stream');
-
- if (!state.endEmitted) {
- state.ended = true;
- pna.nextTick(endReadableNT, state, stream);
- }
-}
-
-function endReadableNT(state, stream) {
- // Check that we didn't get one last unshift.
- if (!state.endEmitted && state.length === 0) {
- state.endEmitted = true;
- stream.readable = false;
- stream.emit('end');
- }
-}
-
-function indexOf(xs, x) {
- for (var i = 0, l = xs.length; i < l; i++) {
- if (xs[i] === x) return i;
- }
- return -1;
-}
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/_stream_transform.js b/node_modules/through2/node_modules/readable-stream/lib/_stream_transform.js
deleted file mode 100644
index fcfc105af8e9a124bea4b82011f6cb7d6d2a7158..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/_stream_transform.js
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// a transform stream is a readable/writable stream where you do
-// something with the data. Sometimes it's called a "filter",
-// but that's not a great name for it, since that implies a thing where
-// some bits pass through, and others are simply ignored. (That would
-// be a valid example of a transform, of course.)
-//
-// While the output is causally related to the input, it's not a
-// necessarily symmetric or synchronous transformation. For example,
-// a zlib stream might take multiple plain-text writes(), and then
-// emit a single compressed chunk some time in the future.
-//
-// Here's how this works:
-//
-// The Transform stream has all the aspects of the readable and writable
-// stream classes. When you write(chunk), that calls _write(chunk,cb)
-// internally, and returns false if there's a lot of pending writes
-// buffered up. When you call read(), that calls _read(n) until
-// there's enough pending readable data buffered up.
-//
-// In a transform stream, the written data is placed in a buffer. When
-// _read(n) is called, it transforms the queued up data, calling the
-// buffered _write cb's as it consumes chunks. If consuming a single
-// written chunk would result in multiple output chunks, then the first
-// outputted bit calls the readcb, and subsequent chunks just go into
-// the read buffer, and will cause it to emit 'readable' if necessary.
-//
-// This way, back-pressure is actually determined by the reading side,
-// since _read has to be called to start processing a new chunk. However,
-// a pathological inflate type of transform can cause excessive buffering
-// here. For example, imagine a stream where every byte of input is
-// interpreted as an integer from 0-255, and then results in that many
-// bytes of output. Writing the 4 bytes {ff,ff,ff,ff} would result in
-// 1kb of data being output. In this case, you could write a very small
-// amount of input, and end up with a very large amount of output. In
-// such a pathological inflating mechanism, there'd be no way to tell
-// the system to stop doing the transform. A single 4MB write could
-// cause the system to run out of memory.
-//
-// However, even in such a pathological case, only a single written chunk
-// would be consumed, and then the rest would wait (un-transformed) until
-// the results of the previous transformed chunk were consumed.
-
-'use strict';
-
-module.exports = Transform;
-
-var Duplex = require('./_stream_duplex');
-
-/**/
-var util = Object.create(require('core-util-is'));
-util.inherits = require('inherits');
-/**/
-
-util.inherits(Transform, Duplex);
-
-function afterTransform(er, data) {
- var ts = this._transformState;
- ts.transforming = false;
-
- var cb = ts.writecb;
-
- if (!cb) {
- return this.emit('error', new Error('write callback called multiple times'));
- }
-
- ts.writechunk = null;
- ts.writecb = null;
-
- if (data != null) // single equals check for both `null` and `undefined`
- this.push(data);
-
- cb(er);
-
- var rs = this._readableState;
- rs.reading = false;
- if (rs.needReadable || rs.length < rs.highWaterMark) {
- this._read(rs.highWaterMark);
- }
-}
-
-function Transform(options) {
- if (!(this instanceof Transform)) return new Transform(options);
-
- Duplex.call(this, options);
-
- this._transformState = {
- afterTransform: afterTransform.bind(this),
- needTransform: false,
- transforming: false,
- writecb: null,
- writechunk: null,
- writeencoding: null
- };
-
- // start out asking for a readable event once data is transformed.
- this._readableState.needReadable = true;
-
- // we have implemented the _read method, and done the other things
- // that Readable wants before the first _read call, so unset the
- // sync guard flag.
- this._readableState.sync = false;
-
- if (options) {
- if (typeof options.transform === 'function') this._transform = options.transform;
-
- if (typeof options.flush === 'function') this._flush = options.flush;
- }
-
- // When the writable side finishes, then flush out anything remaining.
- this.on('prefinish', prefinish);
-}
-
-function prefinish() {
- var _this = this;
-
- if (typeof this._flush === 'function') {
- this._flush(function (er, data) {
- done(_this, er, data);
- });
- } else {
- done(this, null, null);
- }
-}
-
-Transform.prototype.push = function (chunk, encoding) {
- this._transformState.needTransform = false;
- return Duplex.prototype.push.call(this, chunk, encoding);
-};
-
-// This is the part where you do stuff!
-// override this function in implementation classes.
-// 'chunk' is an input chunk.
-//
-// Call `push(newChunk)` to pass along transformed output
-// to the readable side. You may call 'push' zero or more times.
-//
-// Call `cb(err)` when you are done with this chunk. If you pass
-// an error, then that'll put the hurt on the whole operation. If you
-// never call cb(), then you'll never get another chunk.
-Transform.prototype._transform = function (chunk, encoding, cb) {
- throw new Error('_transform() is not implemented');
-};
-
-Transform.prototype._write = function (chunk, encoding, cb) {
- var ts = this._transformState;
- ts.writecb = cb;
- ts.writechunk = chunk;
- ts.writeencoding = encoding;
- if (!ts.transforming) {
- var rs = this._readableState;
- if (ts.needTransform || rs.needReadable || rs.length < rs.highWaterMark) this._read(rs.highWaterMark);
- }
-};
-
-// Doesn't matter what the args are here.
-// _transform does all the work.
-// That we got here means that the readable side wants more data.
-Transform.prototype._read = function (n) {
- var ts = this._transformState;
-
- if (ts.writechunk !== null && ts.writecb && !ts.transforming) {
- ts.transforming = true;
- this._transform(ts.writechunk, ts.writeencoding, ts.afterTransform);
- } else {
- // mark that we need a transform, so that any data that comes in
- // will get processed, now that we've asked for it.
- ts.needTransform = true;
- }
-};
-
-Transform.prototype._destroy = function (err, cb) {
- var _this2 = this;
-
- Duplex.prototype._destroy.call(this, err, function (err2) {
- cb(err2);
- _this2.emit('close');
- });
-};
-
-function done(stream, er, data) {
- if (er) return stream.emit('error', er);
-
- if (data != null) // single equals check for both `null` and `undefined`
- stream.push(data);
-
- // if there's nothing in the write buffer, then that means
- // that nothing more will ever be provided
- if (stream._writableState.length) throw new Error('Calling transform done when ws.length != 0');
-
- if (stream._transformState.transforming) throw new Error('Calling transform done when still transforming');
-
- return stream.push(null);
-}
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/_stream_writable.js b/node_modules/through2/node_modules/readable-stream/lib/_stream_writable.js
deleted file mode 100644
index b0b02200cd72336c17fe871bb7f3ec3872dd802d..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/_stream_writable.js
+++ /dev/null
@@ -1,687 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// A bit simpler than readable streams.
-// Implement an async ._write(chunk, encoding, cb), and it'll handle all
-// the drain event emission and buffering.
-
-'use strict';
-
-/**/
-
-var pna = require('process-nextick-args');
-/**/
-
-module.exports = Writable;
-
-/* */
-function WriteReq(chunk, encoding, cb) {
- this.chunk = chunk;
- this.encoding = encoding;
- this.callback = cb;
- this.next = null;
-}
-
-// It seems a linked list but it is not
-// there will be only 2 of these for each stream
-function CorkedRequest(state) {
- var _this = this;
-
- this.next = null;
- this.entry = null;
- this.finish = function () {
- onCorkedFinish(_this, state);
- };
-}
-/* */
-
-/**/
-var asyncWrite = !process.browser && ['v0.10', 'v0.9.'].indexOf(process.version.slice(0, 5)) > -1 ? setImmediate : pna.nextTick;
-/**/
-
-/**/
-var Duplex;
-/**/
-
-Writable.WritableState = WritableState;
-
-/**/
-var util = Object.create(require('core-util-is'));
-util.inherits = require('inherits');
-/**/
-
-/**/
-var internalUtil = {
- deprecate: require('util-deprecate')
-};
-/**/
-
-/**/
-var Stream = require('./internal/streams/stream');
-/**/
-
-/**/
-
-var Buffer = require('safe-buffer').Buffer;
-var OurUint8Array = global.Uint8Array || function () {};
-function _uint8ArrayToBuffer(chunk) {
- return Buffer.from(chunk);
-}
-function _isUint8Array(obj) {
- return Buffer.isBuffer(obj) || obj instanceof OurUint8Array;
-}
-
-/**/
-
-var destroyImpl = require('./internal/streams/destroy');
-
-util.inherits(Writable, Stream);
-
-function nop() {}
-
-function WritableState(options, stream) {
- Duplex = Duplex || require('./_stream_duplex');
-
- options = options || {};
-
- // Duplex streams are both readable and writable, but share
- // the same options object.
- // However, some cases require setting options to different
- // values for the readable and the writable sides of the duplex stream.
- // These options can be provided separately as readableXXX and writableXXX.
- var isDuplex = stream instanceof Duplex;
-
- // object stream flag to indicate whether or not this stream
- // contains buffers or objects.
- this.objectMode = !!options.objectMode;
-
- if (isDuplex) this.objectMode = this.objectMode || !!options.writableObjectMode;
-
- // the point at which write() starts returning false
- // Note: 0 is a valid value, means that we always return false if
- // the entire buffer is not flushed immediately on write()
- var hwm = options.highWaterMark;
- var writableHwm = options.writableHighWaterMark;
- var defaultHwm = this.objectMode ? 16 : 16 * 1024;
-
- if (hwm || hwm === 0) this.highWaterMark = hwm;else if (isDuplex && (writableHwm || writableHwm === 0)) this.highWaterMark = writableHwm;else this.highWaterMark = defaultHwm;
-
- // cast to ints.
- this.highWaterMark = Math.floor(this.highWaterMark);
-
- // if _final has been called
- this.finalCalled = false;
-
- // drain event flag.
- this.needDrain = false;
- // at the start of calling end()
- this.ending = false;
- // when end() has been called, and returned
- this.ended = false;
- // when 'finish' is emitted
- this.finished = false;
-
- // has it been destroyed
- this.destroyed = false;
-
- // should we decode strings into buffers before passing to _write?
- // this is here so that some node-core streams can optimize string
- // handling at a lower level.
- var noDecode = options.decodeStrings === false;
- this.decodeStrings = !noDecode;
-
- // Crypto is kind of old and crusty. Historically, its default string
- // encoding is 'binary' so we have to make this configurable.
- // Everything else in the universe uses 'utf8', though.
- this.defaultEncoding = options.defaultEncoding || 'utf8';
-
- // not an actual buffer we keep track of, but a measurement
- // of how much we're waiting to get pushed to some underlying
- // socket or file.
- this.length = 0;
-
- // a flag to see when we're in the middle of a write.
- this.writing = false;
-
- // when true all writes will be buffered until .uncork() call
- this.corked = 0;
-
- // a flag to be able to tell if the onwrite cb is called immediately,
- // or on a later tick. We set this to true at first, because any
- // actions that shouldn't happen until "later" should generally also
- // not happen before the first write call.
- this.sync = true;
-
- // a flag to know if we're processing previously buffered items, which
- // may call the _write() callback in the same tick, so that we don't
- // end up in an overlapped onwrite situation.
- this.bufferProcessing = false;
-
- // the callback that's passed to _write(chunk,cb)
- this.onwrite = function (er) {
- onwrite(stream, er);
- };
-
- // the callback that the user supplies to write(chunk,encoding,cb)
- this.writecb = null;
-
- // the amount that is being written when _write is called.
- this.writelen = 0;
-
- this.bufferedRequest = null;
- this.lastBufferedRequest = null;
-
- // number of pending user-supplied write callbacks
- // this must be 0 before 'finish' can be emitted
- this.pendingcb = 0;
-
- // emit prefinish if the only thing we're waiting for is _write cbs
- // This is relevant for synchronous Transform streams
- this.prefinished = false;
-
- // True if the error was already emitted and should not be thrown again
- this.errorEmitted = false;
-
- // count buffered requests
- this.bufferedRequestCount = 0;
-
- // allocate the first CorkedRequest, there is always
- // one allocated and free to use, and we maintain at most two
- this.corkedRequestsFree = new CorkedRequest(this);
-}
-
-WritableState.prototype.getBuffer = function getBuffer() {
- var current = this.bufferedRequest;
- var out = [];
- while (current) {
- out.push(current);
- current = current.next;
- }
- return out;
-};
-
-(function () {
- try {
- Object.defineProperty(WritableState.prototype, 'buffer', {
- get: internalUtil.deprecate(function () {
- return this.getBuffer();
- }, '_writableState.buffer is deprecated. Use _writableState.getBuffer ' + 'instead.', 'DEP0003')
- });
- } catch (_) {}
-})();
-
-// Test _writableState for inheritance to account for Duplex streams,
-// whose prototype chain only points to Readable.
-var realHasInstance;
-if (typeof Symbol === 'function' && Symbol.hasInstance && typeof Function.prototype[Symbol.hasInstance] === 'function') {
- realHasInstance = Function.prototype[Symbol.hasInstance];
- Object.defineProperty(Writable, Symbol.hasInstance, {
- value: function (object) {
- if (realHasInstance.call(this, object)) return true;
- if (this !== Writable) return false;
-
- return object && object._writableState instanceof WritableState;
- }
- });
-} else {
- realHasInstance = function (object) {
- return object instanceof this;
- };
-}
-
-function Writable(options) {
- Duplex = Duplex || require('./_stream_duplex');
-
- // Writable ctor is applied to Duplexes, too.
- // `realHasInstance` is necessary because using plain `instanceof`
- // would return false, as no `_writableState` property is attached.
-
- // Trying to use the custom `instanceof` for Writable here will also break the
- // Node.js LazyTransform implementation, which has a non-trivial getter for
- // `_writableState` that would lead to infinite recursion.
- if (!realHasInstance.call(Writable, this) && !(this instanceof Duplex)) {
- return new Writable(options);
- }
-
- this._writableState = new WritableState(options, this);
-
- // legacy.
- this.writable = true;
-
- if (options) {
- if (typeof options.write === 'function') this._write = options.write;
-
- if (typeof options.writev === 'function') this._writev = options.writev;
-
- if (typeof options.destroy === 'function') this._destroy = options.destroy;
-
- if (typeof options.final === 'function') this._final = options.final;
- }
-
- Stream.call(this);
-}
-
-// Otherwise people can pipe Writable streams, which is just wrong.
-Writable.prototype.pipe = function () {
- this.emit('error', new Error('Cannot pipe, not readable'));
-};
-
-function writeAfterEnd(stream, cb) {
- var er = new Error('write after end');
- // TODO: defer error events consistently everywhere, not just the cb
- stream.emit('error', er);
- pna.nextTick(cb, er);
-}
-
-// Checks that a user-supplied chunk is valid, especially for the particular
-// mode the stream is in. Currently this means that `null` is never accepted
-// and undefined/non-string values are only allowed in object mode.
-function validChunk(stream, state, chunk, cb) {
- var valid = true;
- var er = false;
-
- if (chunk === null) {
- er = new TypeError('May not write null values to stream');
- } else if (typeof chunk !== 'string' && chunk !== undefined && !state.objectMode) {
- er = new TypeError('Invalid non-string/buffer chunk');
- }
- if (er) {
- stream.emit('error', er);
- pna.nextTick(cb, er);
- valid = false;
- }
- return valid;
-}
-
-Writable.prototype.write = function (chunk, encoding, cb) {
- var state = this._writableState;
- var ret = false;
- var isBuf = !state.objectMode && _isUint8Array(chunk);
-
- if (isBuf && !Buffer.isBuffer(chunk)) {
- chunk = _uint8ArrayToBuffer(chunk);
- }
-
- if (typeof encoding === 'function') {
- cb = encoding;
- encoding = null;
- }
-
- if (isBuf) encoding = 'buffer';else if (!encoding) encoding = state.defaultEncoding;
-
- if (typeof cb !== 'function') cb = nop;
-
- if (state.ended) writeAfterEnd(this, cb);else if (isBuf || validChunk(this, state, chunk, cb)) {
- state.pendingcb++;
- ret = writeOrBuffer(this, state, isBuf, chunk, encoding, cb);
- }
-
- return ret;
-};
-
-Writable.prototype.cork = function () {
- var state = this._writableState;
-
- state.corked++;
-};
-
-Writable.prototype.uncork = function () {
- var state = this._writableState;
-
- if (state.corked) {
- state.corked--;
-
- if (!state.writing && !state.corked && !state.finished && !state.bufferProcessing && state.bufferedRequest) clearBuffer(this, state);
- }
-};
-
-Writable.prototype.setDefaultEncoding = function setDefaultEncoding(encoding) {
- // node::ParseEncoding() requires lower case.
- if (typeof encoding === 'string') encoding = encoding.toLowerCase();
- if (!(['hex', 'utf8', 'utf-8', 'ascii', 'binary', 'base64', 'ucs2', 'ucs-2', 'utf16le', 'utf-16le', 'raw'].indexOf((encoding + '').toLowerCase()) > -1)) throw new TypeError('Unknown encoding: ' + encoding);
- this._writableState.defaultEncoding = encoding;
- return this;
-};
-
-function decodeChunk(state, chunk, encoding) {
- if (!state.objectMode && state.decodeStrings !== false && typeof chunk === 'string') {
- chunk = Buffer.from(chunk, encoding);
- }
- return chunk;
-}
-
-Object.defineProperty(Writable.prototype, 'writableHighWaterMark', {
- // making it explicit this property is not enumerable
- // because otherwise some prototype manipulation in
- // userland will fail
- enumerable: false,
- get: function () {
- return this._writableState.highWaterMark;
- }
-});
-
-// if we're already writing something, then just put this
-// in the queue, and wait our turn. Otherwise, call _write
-// If we return false, then we need a drain event, so set that flag.
-function writeOrBuffer(stream, state, isBuf, chunk, encoding, cb) {
- if (!isBuf) {
- var newChunk = decodeChunk(state, chunk, encoding);
- if (chunk !== newChunk) {
- isBuf = true;
- encoding = 'buffer';
- chunk = newChunk;
- }
- }
- var len = state.objectMode ? 1 : chunk.length;
-
- state.length += len;
-
- var ret = state.length < state.highWaterMark;
- // we must ensure that previous needDrain will not be reset to false.
- if (!ret) state.needDrain = true;
-
- if (state.writing || state.corked) {
- var last = state.lastBufferedRequest;
- state.lastBufferedRequest = {
- chunk: chunk,
- encoding: encoding,
- isBuf: isBuf,
- callback: cb,
- next: null
- };
- if (last) {
- last.next = state.lastBufferedRequest;
- } else {
- state.bufferedRequest = state.lastBufferedRequest;
- }
- state.bufferedRequestCount += 1;
- } else {
- doWrite(stream, state, false, len, chunk, encoding, cb);
- }
-
- return ret;
-}
-
-function doWrite(stream, state, writev, len, chunk, encoding, cb) {
- state.writelen = len;
- state.writecb = cb;
- state.writing = true;
- state.sync = true;
- if (writev) stream._writev(chunk, state.onwrite);else stream._write(chunk, encoding, state.onwrite);
- state.sync = false;
-}
-
-function onwriteError(stream, state, sync, er, cb) {
- --state.pendingcb;
-
- if (sync) {
- // defer the callback if we are being called synchronously
- // to avoid piling up things on the stack
- pna.nextTick(cb, er);
- // this can emit finish, and it will always happen
- // after error
- pna.nextTick(finishMaybe, stream, state);
- stream._writableState.errorEmitted = true;
- stream.emit('error', er);
- } else {
- // the caller expect this to happen before if
- // it is async
- cb(er);
- stream._writableState.errorEmitted = true;
- stream.emit('error', er);
- // this can emit finish, but finish must
- // always follow error
- finishMaybe(stream, state);
- }
-}
-
-function onwriteStateUpdate(state) {
- state.writing = false;
- state.writecb = null;
- state.length -= state.writelen;
- state.writelen = 0;
-}
-
-function onwrite(stream, er) {
- var state = stream._writableState;
- var sync = state.sync;
- var cb = state.writecb;
-
- onwriteStateUpdate(state);
-
- if (er) onwriteError(stream, state, sync, er, cb);else {
- // Check if we're actually ready to finish, but don't emit yet
- var finished = needFinish(state);
-
- if (!finished && !state.corked && !state.bufferProcessing && state.bufferedRequest) {
- clearBuffer(stream, state);
- }
-
- if (sync) {
- /**/
- asyncWrite(afterWrite, stream, state, finished, cb);
- /**/
- } else {
- afterWrite(stream, state, finished, cb);
- }
- }
-}
-
-function afterWrite(stream, state, finished, cb) {
- if (!finished) onwriteDrain(stream, state);
- state.pendingcb--;
- cb();
- finishMaybe(stream, state);
-}
-
-// Must force callback to be called on nextTick, so that we don't
-// emit 'drain' before the write() consumer gets the 'false' return
-// value, and has a chance to attach a 'drain' listener.
-function onwriteDrain(stream, state) {
- if (state.length === 0 && state.needDrain) {
- state.needDrain = false;
- stream.emit('drain');
- }
-}
-
-// if there's something in the buffer waiting, then process it
-function clearBuffer(stream, state) {
- state.bufferProcessing = true;
- var entry = state.bufferedRequest;
-
- if (stream._writev && entry && entry.next) {
- // Fast case, write everything using _writev()
- var l = state.bufferedRequestCount;
- var buffer = new Array(l);
- var holder = state.corkedRequestsFree;
- holder.entry = entry;
-
- var count = 0;
- var allBuffers = true;
- while (entry) {
- buffer[count] = entry;
- if (!entry.isBuf) allBuffers = false;
- entry = entry.next;
- count += 1;
- }
- buffer.allBuffers = allBuffers;
-
- doWrite(stream, state, true, state.length, buffer, '', holder.finish);
-
- // doWrite is almost always async, defer these to save a bit of time
- // as the hot path ends with doWrite
- state.pendingcb++;
- state.lastBufferedRequest = null;
- if (holder.next) {
- state.corkedRequestsFree = holder.next;
- holder.next = null;
- } else {
- state.corkedRequestsFree = new CorkedRequest(state);
- }
- state.bufferedRequestCount = 0;
- } else {
- // Slow case, write chunks one-by-one
- while (entry) {
- var chunk = entry.chunk;
- var encoding = entry.encoding;
- var cb = entry.callback;
- var len = state.objectMode ? 1 : chunk.length;
-
- doWrite(stream, state, false, len, chunk, encoding, cb);
- entry = entry.next;
- state.bufferedRequestCount--;
- // if we didn't call the onwrite immediately, then
- // it means that we need to wait until it does.
- // also, that means that the chunk and cb are currently
- // being processed, so move the buffer counter past them.
- if (state.writing) {
- break;
- }
- }
-
- if (entry === null) state.lastBufferedRequest = null;
- }
-
- state.bufferedRequest = entry;
- state.bufferProcessing = false;
-}
-
-Writable.prototype._write = function (chunk, encoding, cb) {
- cb(new Error('_write() is not implemented'));
-};
-
-Writable.prototype._writev = null;
-
-Writable.prototype.end = function (chunk, encoding, cb) {
- var state = this._writableState;
-
- if (typeof chunk === 'function') {
- cb = chunk;
- chunk = null;
- encoding = null;
- } else if (typeof encoding === 'function') {
- cb = encoding;
- encoding = null;
- }
-
- if (chunk !== null && chunk !== undefined) this.write(chunk, encoding);
-
- // .end() fully uncorks
- if (state.corked) {
- state.corked = 1;
- this.uncork();
- }
-
- // ignore unnecessary end() calls.
- if (!state.ending && !state.finished) endWritable(this, state, cb);
-};
-
-function needFinish(state) {
- return state.ending && state.length === 0 && state.bufferedRequest === null && !state.finished && !state.writing;
-}
-function callFinal(stream, state) {
- stream._final(function (err) {
- state.pendingcb--;
- if (err) {
- stream.emit('error', err);
- }
- state.prefinished = true;
- stream.emit('prefinish');
- finishMaybe(stream, state);
- });
-}
-function prefinish(stream, state) {
- if (!state.prefinished && !state.finalCalled) {
- if (typeof stream._final === 'function') {
- state.pendingcb++;
- state.finalCalled = true;
- pna.nextTick(callFinal, stream, state);
- } else {
- state.prefinished = true;
- stream.emit('prefinish');
- }
- }
-}
-
-function finishMaybe(stream, state) {
- var need = needFinish(state);
- if (need) {
- prefinish(stream, state);
- if (state.pendingcb === 0) {
- state.finished = true;
- stream.emit('finish');
- }
- }
- return need;
-}
-
-function endWritable(stream, state, cb) {
- state.ending = true;
- finishMaybe(stream, state);
- if (cb) {
- if (state.finished) pna.nextTick(cb);else stream.once('finish', cb);
- }
- state.ended = true;
- stream.writable = false;
-}
-
-function onCorkedFinish(corkReq, state, err) {
- var entry = corkReq.entry;
- corkReq.entry = null;
- while (entry) {
- var cb = entry.callback;
- state.pendingcb--;
- cb(err);
- entry = entry.next;
- }
- if (state.corkedRequestsFree) {
- state.corkedRequestsFree.next = corkReq;
- } else {
- state.corkedRequestsFree = corkReq;
- }
-}
-
-Object.defineProperty(Writable.prototype, 'destroyed', {
- get: function () {
- if (this._writableState === undefined) {
- return false;
- }
- return this._writableState.destroyed;
- },
- set: function (value) {
- // we ignore the value if the stream
- // has not been initialized yet
- if (!this._writableState) {
- return;
- }
-
- // backward compatibility, the user is explicitly
- // managing destroyed
- this._writableState.destroyed = value;
- }
-});
-
-Writable.prototype.destroy = destroyImpl.destroy;
-Writable.prototype._undestroy = destroyImpl.undestroy;
-Writable.prototype._destroy = function (err, cb) {
- this.end();
- cb(err);
-};
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/BufferList.js b/node_modules/through2/node_modules/readable-stream/lib/internal/streams/BufferList.js
deleted file mode 100644
index aefc68bd90b9c2bd7da278323bcd42a7aad8b853..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/BufferList.js
+++ /dev/null
@@ -1,79 +0,0 @@
-'use strict';
-
-function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
-
-var Buffer = require('safe-buffer').Buffer;
-var util = require('util');
-
-function copyBuffer(src, target, offset) {
- src.copy(target, offset);
-}
-
-module.exports = function () {
- function BufferList() {
- _classCallCheck(this, BufferList);
-
- this.head = null;
- this.tail = null;
- this.length = 0;
- }
-
- BufferList.prototype.push = function push(v) {
- var entry = { data: v, next: null };
- if (this.length > 0) this.tail.next = entry;else this.head = entry;
- this.tail = entry;
- ++this.length;
- };
-
- BufferList.prototype.unshift = function unshift(v) {
- var entry = { data: v, next: this.head };
- if (this.length === 0) this.tail = entry;
- this.head = entry;
- ++this.length;
- };
-
- BufferList.prototype.shift = function shift() {
- if (this.length === 0) return;
- var ret = this.head.data;
- if (this.length === 1) this.head = this.tail = null;else this.head = this.head.next;
- --this.length;
- return ret;
- };
-
- BufferList.prototype.clear = function clear() {
- this.head = this.tail = null;
- this.length = 0;
- };
-
- BufferList.prototype.join = function join(s) {
- if (this.length === 0) return '';
- var p = this.head;
- var ret = '' + p.data;
- while (p = p.next) {
- ret += s + p.data;
- }return ret;
- };
-
- BufferList.prototype.concat = function concat(n) {
- if (this.length === 0) return Buffer.alloc(0);
- if (this.length === 1) return this.head.data;
- var ret = Buffer.allocUnsafe(n >>> 0);
- var p = this.head;
- var i = 0;
- while (p) {
- copyBuffer(p.data, ret, i);
- i += p.data.length;
- p = p.next;
- }
- return ret;
- };
-
- return BufferList;
-}();
-
-if (util && util.inspect && util.inspect.custom) {
- module.exports.prototype[util.inspect.custom] = function () {
- var obj = util.inspect({ length: this.length });
- return this.constructor.name + ' ' + obj;
- };
-}
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/destroy.js b/node_modules/through2/node_modules/readable-stream/lib/internal/streams/destroy.js
deleted file mode 100644
index 5a0a0d88cec6f30f054f14ba6253d4359d86c434..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/destroy.js
+++ /dev/null
@@ -1,74 +0,0 @@
-'use strict';
-
-/**/
-
-var pna = require('process-nextick-args');
-/**/
-
-// undocumented cb() API, needed for core, not for public API
-function destroy(err, cb) {
- var _this = this;
-
- var readableDestroyed = this._readableState && this._readableState.destroyed;
- var writableDestroyed = this._writableState && this._writableState.destroyed;
-
- if (readableDestroyed || writableDestroyed) {
- if (cb) {
- cb(err);
- } else if (err && (!this._writableState || !this._writableState.errorEmitted)) {
- pna.nextTick(emitErrorNT, this, err);
- }
- return this;
- }
-
- // we set destroyed to true before firing error callbacks in order
- // to make it re-entrance safe in case destroy() is called within callbacks
-
- if (this._readableState) {
- this._readableState.destroyed = true;
- }
-
- // if this is a duplex stream mark the writable part as destroyed as well
- if (this._writableState) {
- this._writableState.destroyed = true;
- }
-
- this._destroy(err || null, function (err) {
- if (!cb && err) {
- pna.nextTick(emitErrorNT, _this, err);
- if (_this._writableState) {
- _this._writableState.errorEmitted = true;
- }
- } else if (cb) {
- cb(err);
- }
- });
-
- return this;
-}
-
-function undestroy() {
- if (this._readableState) {
- this._readableState.destroyed = false;
- this._readableState.reading = false;
- this._readableState.ended = false;
- this._readableState.endEmitted = false;
- }
-
- if (this._writableState) {
- this._writableState.destroyed = false;
- this._writableState.ended = false;
- this._writableState.ending = false;
- this._writableState.finished = false;
- this._writableState.errorEmitted = false;
- }
-}
-
-function emitErrorNT(self, err) {
- self.emit('error', err);
-}
-
-module.exports = {
- destroy: destroy,
- undestroy: undestroy
-};
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/stream-browser.js b/node_modules/through2/node_modules/readable-stream/lib/internal/streams/stream-browser.js
deleted file mode 100644
index 9332a3fdae7060505c0a081614e697fa6cb56dc0..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/stream-browser.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('events').EventEmitter;
diff --git a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/stream.js b/node_modules/through2/node_modules/readable-stream/lib/internal/streams/stream.js
deleted file mode 100644
index ce2ad5b6ee57f4778a1f4838f7970093c7941c1c..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/lib/internal/streams/stream.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('stream');
diff --git a/node_modules/through2/node_modules/readable-stream/package.json b/node_modules/through2/node_modules/readable-stream/package.json
deleted file mode 100644
index b9ff86704ef1b35bbb457556a0dbdfafa48b6723..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/package.json
+++ /dev/null
@@ -1,152 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "readable-stream",
- "raw": "readable-stream@^2.2.2",
- "rawSpec": "^2.2.2",
- "scope": null,
- "spec": ">=2.2.2 <3.0.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-stream"
- ],
- [
- {
- "name": "readable-stream",
- "raw": "readable-stream@~2.3.6",
- "rawSpec": "~2.3.6",
- "scope": null,
- "spec": ">=2.3.6 <2.4.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2"
- ]
- ],
- "_from": "readable-stream@~2.3.6",
- "_hasShrinkwrap": false,
- "_id": "readable-stream@2.3.7",
- "_inCache": true,
- "_installable": true,
- "_location": "/through2/readable-stream",
- "_nodeVersion": "10.17.0",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/readable-stream_2.3.7_1578244418937_0.3738956004243521"
- },
- "_npmUser": {
- "email": "hello@matteocollina.com",
- "name": "matteo.collina"
- },
- "_npmVersion": "6.13.4",
- "_phantomChildren": {},
- "_requested": {
- "name": "readable-stream",
- "raw": "readable-stream@~2.3.6",
- "rawSpec": "~2.3.6",
- "scope": null,
- "spec": ">=2.3.6 <2.4.0",
- "type": "range"
- },
- "_requiredBy": [
- "/through2"
- ],
- "_resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
- "_shasum": "1eca1cf711aef814c04f62252a36a62f6cb23b57",
- "_shrinkwrap": null,
- "_spec": "readable-stream@~2.3.6",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2",
- "browser": {
- "./duplex.js": "./duplex-browser.js",
- "./lib/internal/streams/stream.js": "./lib/internal/streams/stream-browser.js",
- "./readable.js": "./readable-browser.js",
- "./writable.js": "./writable-browser.js",
- "util": false
- },
- "bugs": {
- "url": "https://github.com/nodejs/readable-stream/issues"
- },
- "dependencies": {
- "core-util-is": "~1.0.0",
- "inherits": "~2.0.3",
- "isarray": "~1.0.0",
- "process-nextick-args": "~2.0.0",
- "safe-buffer": "~5.1.1",
- "string_decoder": "~1.1.1",
- "util-deprecate": "~1.0.1"
- },
- "description": "Streams3, a user-land copy of the stream library from Node.js",
- "devDependencies": {
- "assert": "^1.4.0",
- "babel-polyfill": "^6.9.1",
- "buffer": "^4.9.0",
- "lolex": "^2.3.2",
- "nyc": "^6.4.0",
- "tap": "^0.7.0",
- "tape": "^4.8.0"
- },
- "directories": {},
- "dist": {
- "fileCount": 24,
- "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
- "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJeEhlDCRA9TVsSAnZWagAAUqkP/33VFphjNq5vk5/8TqHZ\n6T6ItDSwoY1VyDEuN26uFHRBZRXztM5BC5spq5q8h/KcQiVxshlTjL+PoXI9\nFgXB1kkm57NY95FhCYcVlDw4QmBl1IRlzVWglN/0J2YQ+wSpYiXLXTcWSAp9\nPt5Abrb68H22oI8+KqQ+T9WTFyDG75yTJHFWBQyXcYG98OU/v0bk3nnB/Oh6\nztpy2pD5EayiEY81YTJ6vySHIKYxAySWdesIO9gQKwaqDdQRmfvsaEra90vf\n5St+XpRx1M0oPFCBjjj6aEhRgvq5rRm81GUQOKFvIWZacFFgdwpqJn1kzCrM\n6tQKONWApacjucn2FeAOwPno1HRQ3/T4NzWVwUcc4fuwi6fXa1E0zcalRSws\nlfuL+mtQkxv7fPanzqZu8J542hgss81ZahaHR1p/d705GTkdiZV3CYBN2O/a\nQDSCgjPUAQpyfOUlkSsNnB8GNWGU3hVOabsSpH3BPrGlMdBm4ML9ZiSaSn1c\nqb/EldTv0pGkwfx4+uOxH8QXMZLwd7nKMluEoBZQtFeAEIiaFwqgV1rB9Pi9\ncBPAMrDIlwlHV9CxOHiH/WsVBb0Uc/vBeRerEpgaEP3iglkkF3w4kaD3PRhG\n9GTiyJcjpDQugMRZehfcQODpj5mlYpYgSxK1xkRQ137BTZ4Px4T+lgYM16Nl\nQ1gV\r\n=Ka0O\r\n-----END PGP SIGNATURE-----\r\n",
- "shasum": "1eca1cf711aef814c04f62252a36a62f6cb23b57",
- "tarball": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
- "unpackedSize": 87719
- },
- "gitHead": "2eb8861e029107b7e079e22c826835cad6ae7854",
- "homepage": "https://github.com/nodejs/readable-stream#readme",
- "keywords": [
- "readable",
- "stream",
- "pipe"
- ],
- "license": "MIT",
- "main": "readable.js",
- "maintainers": [
- {
- "email": "calvin.metcalf@gmail.com",
- "name": "cwmma"
- },
- {
- "email": "i@izs.me",
- "name": "isaacs"
- },
- {
- "email": "hello@matteocollina.com",
- "name": "matteo.collina"
- },
- {
- "email": "build@iojs.org",
- "name": "nodejs-foundation"
- },
- {
- "email": "r@va.gg",
- "name": "rvagg"
- },
- {
- "email": "nathan@tootallnate.net",
- "name": "tootallnate"
- }
- ],
- "name": "readable-stream",
- "nyc": {
- "include": [
- "lib/**.js"
- ]
- },
- "optionalDependencies": {},
- "readme": "# readable-stream\n\n***Node-core v8.11.1 streams for userland*** [![Build Status](https://travis-ci.org/nodejs/readable-stream.svg?branch=master)](https://travis-ci.org/nodejs/readable-stream)\n\n\n[![NPM](https://nodei.co/npm/readable-stream.png?downloads=true&downloadRank=true)](https://nodei.co/npm/readable-stream/)\n[![NPM](https://nodei.co/npm-dl/readable-stream.png?&months=6&height=3)](https://nodei.co/npm/readable-stream/)\n\n\n[![Sauce Test Status](https://saucelabs.com/browser-matrix/readable-stream.svg)](https://saucelabs.com/u/readable-stream)\n\n```bash\nnpm install --save readable-stream\n```\n\n***Node-core streams for userland***\n\nThis package is a mirror of the Streams2 and Streams3 implementations in\nNode-core.\n\nFull documentation may be found on the [Node.js website](https://nodejs.org/dist/v8.11.1/docs/api/stream.html).\n\nIf you want to guarantee a stable streams base, regardless of what version of\nNode you, or the users of your libraries are using, use **readable-stream** *only* and avoid the *\"stream\"* module in Node-core, for background see [this blogpost](http://r.va.gg/2014/06/why-i-dont-use-nodes-core-stream-module.html).\n\nAs of version 2.0.0 **readable-stream** uses semantic versioning.\n\n# Streams Working Group\n\n`readable-stream` is maintained by the Streams Working Group, which\noversees the development and maintenance of the Streams API within\nNode.js. The responsibilities of the Streams Working Group include:\n\n* Addressing stream issues on the Node.js issue tracker.\n* Authoring and editing stream documentation within the Node.js project.\n* Reviewing changes to stream subclasses within the Node.js project.\n* Redirecting changes to streams from the Node.js project to this\n project.\n* Assisting in the implementation of stream providers within Node.js.\n* Recommending versions of `readable-stream` to be included in Node.js.\n* Messaging about the future of streams to give the community advance\n notice of changes.\n\n\n## Team Members\n\n* **Chris Dickinson** ([@chrisdickinson](https://github.com/chrisdickinson)) <christopher.s.dickinson@gmail.com>\n - Release GPG key: 9554F04D7259F04124DE6B476D5A82AC7E37093B\n* **Calvin Metcalf** ([@calvinmetcalf](https://github.com/calvinmetcalf)) <calvin.metcalf@gmail.com>\n - Release GPG key: F3EF5F62A87FC27A22E643F714CE4FF5015AA242\n* **Rod Vagg** ([@rvagg](https://github.com/rvagg)) <rod@vagg.org>\n - Release GPG key: DD8F2338BAE7501E3DD5AC78C273792F7D83545D\n* **Sam Newman** ([@sonewman](https://github.com/sonewman)) <newmansam@outlook.com>\n* **Mathias Buus** ([@mafintosh](https://github.com/mafintosh)) <mathiasbuus@gmail.com>\n* **Domenic Denicola** ([@domenic](https://github.com/domenic)) <d@domenic.me>\n* **Matteo Collina** ([@mcollina](https://github.com/mcollina)) <matteo.collina@gmail.com>\n - Release GPG key: 3ABC01543F22DD2239285CDD818674489FBC127E\n* **Irina Shestak** ([@lrlna](https://github.com/lrlna)) <shestak.irina@gmail.com>\n",
- "readmeFilename": "README.md",
- "repository": {
- "type": "git",
- "url": "git://github.com/nodejs/readable-stream.git"
- },
- "scripts": {
- "ci": "tap test/parallel/*.js test/ours/*.js --tap | tee test.tap && node test/verify-dependencies.js",
- "cover": "nyc npm test",
- "report": "nyc report --reporter=lcov",
- "test": "tap test/parallel/*.js test/ours/*.js && node test/verify-dependencies.js"
- },
- "version": "2.3.7"
-}
diff --git a/node_modules/through2/node_modules/readable-stream/passthrough.js b/node_modules/through2/node_modules/readable-stream/passthrough.js
deleted file mode 100644
index ffd791d7ff275a58d537ea89153175a23edee5fb..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/passthrough.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('./readable').PassThrough
diff --git a/node_modules/through2/node_modules/readable-stream/readable-browser.js b/node_modules/through2/node_modules/readable-stream/readable-browser.js
deleted file mode 100644
index e50372592ee6c63a7fc43cb912dd9639e3fa7eb1..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/readable-browser.js
+++ /dev/null
@@ -1,7 +0,0 @@
-exports = module.exports = require('./lib/_stream_readable.js');
-exports.Stream = exports;
-exports.Readable = exports;
-exports.Writable = require('./lib/_stream_writable.js');
-exports.Duplex = require('./lib/_stream_duplex.js');
-exports.Transform = require('./lib/_stream_transform.js');
-exports.PassThrough = require('./lib/_stream_passthrough.js');
diff --git a/node_modules/through2/node_modules/readable-stream/readable.js b/node_modules/through2/node_modules/readable-stream/readable.js
deleted file mode 100644
index ec89ec53306497adae0014c4a8aba6d51d1aff6c..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/readable.js
+++ /dev/null
@@ -1,19 +0,0 @@
-var Stream = require('stream');
-if (process.env.READABLE_STREAM === 'disable' && Stream) {
- module.exports = Stream;
- exports = module.exports = Stream.Readable;
- exports.Readable = Stream.Readable;
- exports.Writable = Stream.Writable;
- exports.Duplex = Stream.Duplex;
- exports.Transform = Stream.Transform;
- exports.PassThrough = Stream.PassThrough;
- exports.Stream = Stream;
-} else {
- exports = module.exports = require('./lib/_stream_readable.js');
- exports.Stream = Stream || exports;
- exports.Readable = exports;
- exports.Writable = require('./lib/_stream_writable.js');
- exports.Duplex = require('./lib/_stream_duplex.js');
- exports.Transform = require('./lib/_stream_transform.js');
- exports.PassThrough = require('./lib/_stream_passthrough.js');
-}
diff --git a/node_modules/through2/node_modules/readable-stream/transform.js b/node_modules/through2/node_modules/readable-stream/transform.js
deleted file mode 100644
index b1baba26da03dc8bbc5d9da33cd55f3f88c99115..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/transform.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('./readable').Transform
diff --git a/node_modules/through2/node_modules/readable-stream/writable-browser.js b/node_modules/through2/node_modules/readable-stream/writable-browser.js
deleted file mode 100644
index ebdde6a85dcb19bfdbfc2ec2e34b13a54c0e5bf0..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/writable-browser.js
+++ /dev/null
@@ -1 +0,0 @@
-module.exports = require('./lib/_stream_writable.js');
diff --git a/node_modules/through2/node_modules/readable-stream/writable.js b/node_modules/through2/node_modules/readable-stream/writable.js
deleted file mode 100644
index 3211a6f80d1abc9db7099cd1e8fa200ad2ccfdbe..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/readable-stream/writable.js
+++ /dev/null
@@ -1,8 +0,0 @@
-var Stream = require("stream")
-var Writable = require("./lib/_stream_writable.js")
-
-if (process.env.READABLE_STREAM === 'disable') {
- module.exports = Stream && Stream.Writable || Writable
-} else {
- module.exports = Writable
-}
diff --git a/node_modules/through2/node_modules/safe-buffer/LICENSE b/node_modules/through2/node_modules/safe-buffer/LICENSE
deleted file mode 100644
index 0c068ceecbd48fc4e8279e6451793fec2bf12178..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/safe-buffer/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) Feross Aboukhadijeh
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/node_modules/through2/node_modules/safe-buffer/README.md b/node_modules/through2/node_modules/safe-buffer/README.md
deleted file mode 100644
index e9a81afd0406f030ba21169f0c7a1dba70b3a93b..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/safe-buffer/README.md
+++ /dev/null
@@ -1,584 +0,0 @@
-# safe-buffer [![travis][travis-image]][travis-url] [![npm][npm-image]][npm-url] [![downloads][downloads-image]][downloads-url] [![javascript style guide][standard-image]][standard-url]
-
-[travis-image]: https://img.shields.io/travis/feross/safe-buffer/master.svg
-[travis-url]: https://travis-ci.org/feross/safe-buffer
-[npm-image]: https://img.shields.io/npm/v/safe-buffer.svg
-[npm-url]: https://npmjs.org/package/safe-buffer
-[downloads-image]: https://img.shields.io/npm/dm/safe-buffer.svg
-[downloads-url]: https://npmjs.org/package/safe-buffer
-[standard-image]: https://img.shields.io/badge/code_style-standard-brightgreen.svg
-[standard-url]: https://standardjs.com
-
-#### Safer Node.js Buffer API
-
-**Use the new Node.js Buffer APIs (`Buffer.from`, `Buffer.alloc`,
-`Buffer.allocUnsafe`, `Buffer.allocUnsafeSlow`) in all versions of Node.js.**
-
-**Uses the built-in implementation when available.**
-
-## install
-
-```
-npm install safe-buffer
-```
-
-## usage
-
-The goal of this package is to provide a safe replacement for the node.js `Buffer`.
-
-It's a drop-in replacement for `Buffer`. You can use it by adding one `require` line to
-the top of your node.js modules:
-
-```js
-var Buffer = require('safe-buffer').Buffer
-
-// Existing buffer code will continue to work without issues:
-
-new Buffer('hey', 'utf8')
-new Buffer([1, 2, 3], 'utf8')
-new Buffer(obj)
-new Buffer(16) // create an uninitialized buffer (potentially unsafe)
-
-// But you can use these new explicit APIs to make clear what you want:
-
-Buffer.from('hey', 'utf8') // convert from many types to a Buffer
-Buffer.alloc(16) // create a zero-filled buffer (safe)
-Buffer.allocUnsafe(16) // create an uninitialized buffer (potentially unsafe)
-```
-
-## api
-
-### Class Method: Buffer.from(array)
-
-
-* `array` {Array}
-
-Allocates a new `Buffer` using an `array` of octets.
-
-```js
-const buf = Buffer.from([0x62,0x75,0x66,0x66,0x65,0x72]);
- // creates a new Buffer containing ASCII bytes
- // ['b','u','f','f','e','r']
-```
-
-A `TypeError` will be thrown if `array` is not an `Array`.
-
-### Class Method: Buffer.from(arrayBuffer[, byteOffset[, length]])
-
-
-* `arrayBuffer` {ArrayBuffer} The `.buffer` property of a `TypedArray` or
- a `new ArrayBuffer()`
-* `byteOffset` {Number} Default: `0`
-* `length` {Number} Default: `arrayBuffer.length - byteOffset`
-
-When passed a reference to the `.buffer` property of a `TypedArray` instance,
-the newly created `Buffer` will share the same allocated memory as the
-TypedArray.
-
-```js
-const arr = new Uint16Array(2);
-arr[0] = 5000;
-arr[1] = 4000;
-
-const buf = Buffer.from(arr.buffer); // shares the memory with arr;
-
-console.log(buf);
- // Prints:
-
-// changing the TypedArray changes the Buffer also
-arr[1] = 6000;
-
-console.log(buf);
- // Prints:
-```
-
-The optional `byteOffset` and `length` arguments specify a memory range within
-the `arrayBuffer` that will be shared by the `Buffer`.
-
-```js
-const ab = new ArrayBuffer(10);
-const buf = Buffer.from(ab, 0, 2);
-console.log(buf.length);
- // Prints: 2
-```
-
-A `TypeError` will be thrown if `arrayBuffer` is not an `ArrayBuffer`.
-
-### Class Method: Buffer.from(buffer)
-
-
-* `buffer` {Buffer}
-
-Copies the passed `buffer` data onto a new `Buffer` instance.
-
-```js
-const buf1 = Buffer.from('buffer');
-const buf2 = Buffer.from(buf1);
-
-buf1[0] = 0x61;
-console.log(buf1.toString());
- // 'auffer'
-console.log(buf2.toString());
- // 'buffer' (copy is not changed)
-```
-
-A `TypeError` will be thrown if `buffer` is not a `Buffer`.
-
-### Class Method: Buffer.from(str[, encoding])
-
-
-* `str` {String} String to encode.
-* `encoding` {String} Encoding to use, Default: `'utf8'`
-
-Creates a new `Buffer` containing the given JavaScript string `str`. If
-provided, the `encoding` parameter identifies the character encoding.
-If not provided, `encoding` defaults to `'utf8'`.
-
-```js
-const buf1 = Buffer.from('this is a tést');
-console.log(buf1.toString());
- // prints: this is a tést
-console.log(buf1.toString('ascii'));
- // prints: this is a tC)st
-
-const buf2 = Buffer.from('7468697320697320612074c3a97374', 'hex');
-console.log(buf2.toString());
- // prints: this is a tést
-```
-
-A `TypeError` will be thrown if `str` is not a string.
-
-### Class Method: Buffer.alloc(size[, fill[, encoding]])
-
-
-* `size` {Number}
-* `fill` {Value} Default: `undefined`
-* `encoding` {String} Default: `utf8`
-
-Allocates a new `Buffer` of `size` bytes. If `fill` is `undefined`, the
-`Buffer` will be *zero-filled*.
-
-```js
-const buf = Buffer.alloc(5);
-console.log(buf);
- //
-```
-
-The `size` must be less than or equal to the value of
-`require('buffer').kMaxLength` (on 64-bit architectures, `kMaxLength` is
-`(2^31)-1`). Otherwise, a [`RangeError`][] is thrown. A zero-length Buffer will
-be created if a `size` less than or equal to 0 is specified.
-
-If `fill` is specified, the allocated `Buffer` will be initialized by calling
-`buf.fill(fill)`. See [`buf.fill()`][] for more information.
-
-```js
-const buf = Buffer.alloc(5, 'a');
-console.log(buf);
- //
-```
-
-If both `fill` and `encoding` are specified, the allocated `Buffer` will be
-initialized by calling `buf.fill(fill, encoding)`. For example:
-
-```js
-const buf = Buffer.alloc(11, 'aGVsbG8gd29ybGQ=', 'base64');
-console.log(buf);
- //
-```
-
-Calling `Buffer.alloc(size)` can be significantly slower than the alternative
-`Buffer.allocUnsafe(size)` but ensures that the newly created `Buffer` instance
-contents will *never contain sensitive data*.
-
-A `TypeError` will be thrown if `size` is not a number.
-
-### Class Method: Buffer.allocUnsafe(size)
-
-
-* `size` {Number}
-
-Allocates a new *non-zero-filled* `Buffer` of `size` bytes. The `size` must
-be less than or equal to the value of `require('buffer').kMaxLength` (on 64-bit
-architectures, `kMaxLength` is `(2^31)-1`). Otherwise, a [`RangeError`][] is
-thrown. A zero-length Buffer will be created if a `size` less than or equal to
-0 is specified.
-
-The underlying memory for `Buffer` instances created in this way is *not
-initialized*. The contents of the newly created `Buffer` are unknown and
-*may contain sensitive data*. Use [`buf.fill(0)`][] to initialize such
-`Buffer` instances to zeroes.
-
-```js
-const buf = Buffer.allocUnsafe(5);
-console.log(buf);
- //
- // (octets will be different, every time)
-buf.fill(0);
-console.log(buf);
- //
-```
-
-A `TypeError` will be thrown if `size` is not a number.
-
-Note that the `Buffer` module pre-allocates an internal `Buffer` instance of
-size `Buffer.poolSize` that is used as a pool for the fast allocation of new
-`Buffer` instances created using `Buffer.allocUnsafe(size)` (and the deprecated
-`new Buffer(size)` constructor) only when `size` is less than or equal to
-`Buffer.poolSize >> 1` (floor of `Buffer.poolSize` divided by two). The default
-value of `Buffer.poolSize` is `8192` but can be modified.
-
-Use of this pre-allocated internal memory pool is a key difference between
-calling `Buffer.alloc(size, fill)` vs. `Buffer.allocUnsafe(size).fill(fill)`.
-Specifically, `Buffer.alloc(size, fill)` will *never* use the internal Buffer
-pool, while `Buffer.allocUnsafe(size).fill(fill)` *will* use the internal
-Buffer pool if `size` is less than or equal to half `Buffer.poolSize`. The
-difference is subtle but can be important when an application requires the
-additional performance that `Buffer.allocUnsafe(size)` provides.
-
-### Class Method: Buffer.allocUnsafeSlow(size)
-
-
-* `size` {Number}
-
-Allocates a new *non-zero-filled* and non-pooled `Buffer` of `size` bytes. The
-`size` must be less than or equal to the value of
-`require('buffer').kMaxLength` (on 64-bit architectures, `kMaxLength` is
-`(2^31)-1`). Otherwise, a [`RangeError`][] is thrown. A zero-length Buffer will
-be created if a `size` less than or equal to 0 is specified.
-
-The underlying memory for `Buffer` instances created in this way is *not
-initialized*. The contents of the newly created `Buffer` are unknown and
-*may contain sensitive data*. Use [`buf.fill(0)`][] to initialize such
-`Buffer` instances to zeroes.
-
-When using `Buffer.allocUnsafe()` to allocate new `Buffer` instances,
-allocations under 4KB are, by default, sliced from a single pre-allocated
-`Buffer`. This allows applications to avoid the garbage collection overhead of
-creating many individually allocated Buffers. This approach improves both
-performance and memory usage by eliminating the need to track and cleanup as
-many `Persistent` objects.
-
-However, in the case where a developer may need to retain a small chunk of
-memory from a pool for an indeterminate amount of time, it may be appropriate
-to create an un-pooled Buffer instance using `Buffer.allocUnsafeSlow()` then
-copy out the relevant bits.
-
-```js
-// need to keep around a few small chunks of memory
-const store = [];
-
-socket.on('readable', () => {
- const data = socket.read();
- // allocate for retained data
- const sb = Buffer.allocUnsafeSlow(10);
- // copy the data into the new allocation
- data.copy(sb, 0, 0, 10);
- store.push(sb);
-});
-```
-
-Use of `Buffer.allocUnsafeSlow()` should be used only as a last resort *after*
-a developer has observed undue memory retention in their applications.
-
-A `TypeError` will be thrown if `size` is not a number.
-
-### All the Rest
-
-The rest of the `Buffer` API is exactly the same as in node.js.
-[See the docs](https://nodejs.org/api/buffer.html).
-
-
-## Related links
-
-- [Node.js issue: Buffer(number) is unsafe](https://github.com/nodejs/node/issues/4660)
-- [Node.js Enhancement Proposal: Buffer.from/Buffer.alloc/Buffer.zalloc/Buffer() soft-deprecate](https://github.com/nodejs/node-eps/pull/4)
-
-## Why is `Buffer` unsafe?
-
-Today, the node.js `Buffer` constructor is overloaded to handle many different argument
-types like `String`, `Array`, `Object`, `TypedArrayView` (`Uint8Array`, etc.),
-`ArrayBuffer`, and also `Number`.
-
-The API is optimized for convenience: you can throw any type at it, and it will try to do
-what you want.
-
-Because the Buffer constructor is so powerful, you often see code like this:
-
-```js
-// Convert UTF-8 strings to hex
-function toHex (str) {
- return new Buffer(str).toString('hex')
-}
-```
-
-***But what happens if `toHex` is called with a `Number` argument?***
-
-### Remote Memory Disclosure
-
-If an attacker can make your program call the `Buffer` constructor with a `Number`
-argument, then they can make it allocate uninitialized memory from the node.js process.
-This could potentially disclose TLS private keys, user data, or database passwords.
-
-When the `Buffer` constructor is passed a `Number` argument, it returns an
-**UNINITIALIZED** block of memory of the specified `size`. When you create a `Buffer` like
-this, you **MUST** overwrite the contents before returning it to the user.
-
-From the [node.js docs](https://nodejs.org/api/buffer.html#buffer_new_buffer_size):
-
-> `new Buffer(size)`
->
-> - `size` Number
->
-> The underlying memory for `Buffer` instances created in this way is not initialized.
-> **The contents of a newly created `Buffer` are unknown and could contain sensitive
-> data.** Use `buf.fill(0)` to initialize a Buffer to zeroes.
-
-(Emphasis our own.)
-
-Whenever the programmer intended to create an uninitialized `Buffer` you often see code
-like this:
-
-```js
-var buf = new Buffer(16)
-
-// Immediately overwrite the uninitialized buffer with data from another buffer
-for (var i = 0; i < buf.length; i++) {
- buf[i] = otherBuf[i]
-}
-```
-
-
-### Would this ever be a problem in real code?
-
-Yes. It's surprisingly common to forget to check the type of your variables in a
-dynamically-typed language like JavaScript.
-
-Usually the consequences of assuming the wrong type is that your program crashes with an
-uncaught exception. But the failure mode for forgetting to check the type of arguments to
-the `Buffer` constructor is more catastrophic.
-
-Here's an example of a vulnerable service that takes a JSON payload and converts it to
-hex:
-
-```js
-// Take a JSON payload {str: "some string"} and convert it to hex
-var server = http.createServer(function (req, res) {
- var data = ''
- req.setEncoding('utf8')
- req.on('data', function (chunk) {
- data += chunk
- })
- req.on('end', function () {
- var body = JSON.parse(data)
- res.end(new Buffer(body.str).toString('hex'))
- })
-})
-
-server.listen(8080)
-```
-
-In this example, an http client just has to send:
-
-```json
-{
- "str": 1000
-}
-```
-
-and it will get back 1,000 bytes of uninitialized memory from the server.
-
-This is a very serious bug. It's similar in severity to the
-[the Heartbleed bug](http://heartbleed.com/) that allowed disclosure of OpenSSL process
-memory by remote attackers.
-
-
-### Which real-world packages were vulnerable?
-
-#### [`bittorrent-dht`](https://www.npmjs.com/package/bittorrent-dht)
-
-[Mathias Buus](https://github.com/mafintosh) and I
-([Feross Aboukhadijeh](http://feross.org/)) found this issue in one of our own packages,
-[`bittorrent-dht`](https://www.npmjs.com/package/bittorrent-dht). The bug would allow
-anyone on the internet to send a series of messages to a user of `bittorrent-dht` and get
-them to reveal 20 bytes at a time of uninitialized memory from the node.js process.
-
-Here's
-[the commit](https://github.com/feross/bittorrent-dht/commit/6c7da04025d5633699800a99ec3fbadf70ad35b8)
-that fixed it. We released a new fixed version, created a
-[Node Security Project disclosure](https://nodesecurity.io/advisories/68), and deprecated all
-vulnerable versions on npm so users will get a warning to upgrade to a newer version.
-
-#### [`ws`](https://www.npmjs.com/package/ws)
-
-That got us wondering if there were other vulnerable packages. Sure enough, within a short
-period of time, we found the same issue in [`ws`](https://www.npmjs.com/package/ws), the
-most popular WebSocket implementation in node.js.
-
-If certain APIs were called with `Number` parameters instead of `String` or `Buffer` as
-expected, then uninitialized server memory would be disclosed to the remote peer.
-
-These were the vulnerable methods:
-
-```js
-socket.send(number)
-socket.ping(number)
-socket.pong(number)
-```
-
-Here's a vulnerable socket server with some echo functionality:
-
-```js
-server.on('connection', function (socket) {
- socket.on('message', function (message) {
- message = JSON.parse(message)
- if (message.type === 'echo') {
- socket.send(message.data) // send back the user's message
- }
- })
-})
-```
-
-`socket.send(number)` called on the server, will disclose server memory.
-
-Here's [the release](https://github.com/websockets/ws/releases/tag/1.0.1) where the issue
-was fixed, with a more detailed explanation. Props to
-[Arnout Kazemier](https://github.com/3rd-Eden) for the quick fix. Here's the
-[Node Security Project disclosure](https://nodesecurity.io/advisories/67).
-
-
-### What's the solution?
-
-It's important that node.js offers a fast way to get memory otherwise performance-critical
-applications would needlessly get a lot slower.
-
-But we need a better way to *signal our intent* as programmers. **When we want
-uninitialized memory, we should request it explicitly.**
-
-Sensitive functionality should not be packed into a developer-friendly API that loosely
-accepts many different types. This type of API encourages the lazy practice of passing
-variables in without checking the type very carefully.
-
-#### A new API: `Buffer.allocUnsafe(number)`
-
-The functionality of creating buffers with uninitialized memory should be part of another
-API. We propose `Buffer.allocUnsafe(number)`. This way, it's not part of an API that
-frequently gets user input of all sorts of different types passed into it.
-
-```js
-var buf = Buffer.allocUnsafe(16) // careful, uninitialized memory!
-
-// Immediately overwrite the uninitialized buffer with data from another buffer
-for (var i = 0; i < buf.length; i++) {
- buf[i] = otherBuf[i]
-}
-```
-
-
-### How do we fix node.js core?
-
-We sent [a PR to node.js core](https://github.com/nodejs/node/pull/4514) (merged as
-`semver-major`) which defends against one case:
-
-```js
-var str = 16
-new Buffer(str, 'utf8')
-```
-
-In this situation, it's implied that the programmer intended the first argument to be a
-string, since they passed an encoding as a second argument. Today, node.js will allocate
-uninitialized memory in the case of `new Buffer(number, encoding)`, which is probably not
-what the programmer intended.
-
-But this is only a partial solution, since if the programmer does `new Buffer(variable)`
-(without an `encoding` parameter) there's no way to know what they intended. If `variable`
-is sometimes a number, then uninitialized memory will sometimes be returned.
-
-### What's the real long-term fix?
-
-We could deprecate and remove `new Buffer(number)` and use `Buffer.allocUnsafe(number)` when
-we need uninitialized memory. But that would break 1000s of packages.
-
-~~We believe the best solution is to:~~
-
-~~1. Change `new Buffer(number)` to return safe, zeroed-out memory~~
-
-~~2. Create a new API for creating uninitialized Buffers. We propose: `Buffer.allocUnsafe(number)`~~
-
-#### Update
-
-We now support adding three new APIs:
-
-- `Buffer.from(value)` - convert from any type to a buffer
-- `Buffer.alloc(size)` - create a zero-filled buffer
-- `Buffer.allocUnsafe(size)` - create an uninitialized buffer with given size
-
-This solves the core problem that affected `ws` and `bittorrent-dht` which is
-`Buffer(variable)` getting tricked into taking a number argument.
-
-This way, existing code continues working and the impact on the npm ecosystem will be
-minimal. Over time, npm maintainers can migrate performance-critical code to use
-`Buffer.allocUnsafe(number)` instead of `new Buffer(number)`.
-
-
-### Conclusion
-
-We think there's a serious design issue with the `Buffer` API as it exists today. It
-promotes insecure software by putting high-risk functionality into a convenient API
-with friendly "developer ergonomics".
-
-This wasn't merely a theoretical exercise because we found the issue in some of the
-most popular npm packages.
-
-Fortunately, there's an easy fix that can be applied today. Use `safe-buffer` in place of
-`buffer`.
-
-```js
-var Buffer = require('safe-buffer').Buffer
-```
-
-Eventually, we hope that node.js core can switch to this new, safer behavior. We believe
-the impact on the ecosystem would be minimal since it's not a breaking change.
-Well-maintained, popular packages would be updated to use `Buffer.alloc` quickly, while
-older, insecure packages would magically become safe from this attack vector.
-
-
-## links
-
-- [Node.js PR: buffer: throw if both length and enc are passed](https://github.com/nodejs/node/pull/4514)
-- [Node Security Project disclosure for `ws`](https://nodesecurity.io/advisories/67)
-- [Node Security Project disclosure for`bittorrent-dht`](https://nodesecurity.io/advisories/68)
-
-
-## credit
-
-The original issues in `bittorrent-dht`
-([disclosure](https://nodesecurity.io/advisories/68)) and
-`ws` ([disclosure](https://nodesecurity.io/advisories/67)) were discovered by
-[Mathias Buus](https://github.com/mafintosh) and
-[Feross Aboukhadijeh](http://feross.org/).
-
-Thanks to [Adam Baldwin](https://github.com/evilpacket) for helping disclose these issues
-and for his work running the [Node Security Project](https://nodesecurity.io/).
-
-Thanks to [John Hiesey](https://github.com/jhiesey) for proofreading this README and
-auditing the code.
-
-
-## license
-
-MIT. Copyright (C) [Feross Aboukhadijeh](http://feross.org)
diff --git a/node_modules/through2/node_modules/safe-buffer/index.d.ts b/node_modules/through2/node_modules/safe-buffer/index.d.ts
deleted file mode 100644
index e9fed809a5ab515658d6e71f7ba5f631be769be4..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/safe-buffer/index.d.ts
+++ /dev/null
@@ -1,187 +0,0 @@
-declare module "safe-buffer" {
- export class Buffer {
- length: number
- write(string: string, offset?: number, length?: number, encoding?: string): number;
- toString(encoding?: string, start?: number, end?: number): string;
- toJSON(): { type: 'Buffer', data: any[] };
- equals(otherBuffer: Buffer): boolean;
- compare(otherBuffer: Buffer, targetStart?: number, targetEnd?: number, sourceStart?: number, sourceEnd?: number): number;
- copy(targetBuffer: Buffer, targetStart?: number, sourceStart?: number, sourceEnd?: number): number;
- slice(start?: number, end?: number): Buffer;
- writeUIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
- writeUIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
- writeIntLE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
- writeIntBE(value: number, offset: number, byteLength: number, noAssert?: boolean): number;
- readUIntLE(offset: number, byteLength: number, noAssert?: boolean): number;
- readUIntBE(offset: number, byteLength: number, noAssert?: boolean): number;
- readIntLE(offset: number, byteLength: number, noAssert?: boolean): number;
- readIntBE(offset: number, byteLength: number, noAssert?: boolean): number;
- readUInt8(offset: number, noAssert?: boolean): number;
- readUInt16LE(offset: number, noAssert?: boolean): number;
- readUInt16BE(offset: number, noAssert?: boolean): number;
- readUInt32LE(offset: number, noAssert?: boolean): number;
- readUInt32BE(offset: number, noAssert?: boolean): number;
- readInt8(offset: number, noAssert?: boolean): number;
- readInt16LE(offset: number, noAssert?: boolean): number;
- readInt16BE(offset: number, noAssert?: boolean): number;
- readInt32LE(offset: number, noAssert?: boolean): number;
- readInt32BE(offset: number, noAssert?: boolean): number;
- readFloatLE(offset: number, noAssert?: boolean): number;
- readFloatBE(offset: number, noAssert?: boolean): number;
- readDoubleLE(offset: number, noAssert?: boolean): number;
- readDoubleBE(offset: number, noAssert?: boolean): number;
- swap16(): Buffer;
- swap32(): Buffer;
- swap64(): Buffer;
- writeUInt8(value: number, offset: number, noAssert?: boolean): number;
- writeUInt16LE(value: number, offset: number, noAssert?: boolean): number;
- writeUInt16BE(value: number, offset: number, noAssert?: boolean): number;
- writeUInt32LE(value: number, offset: number, noAssert?: boolean): number;
- writeUInt32BE(value: number, offset: number, noAssert?: boolean): number;
- writeInt8(value: number, offset: number, noAssert?: boolean): number;
- writeInt16LE(value: number, offset: number, noAssert?: boolean): number;
- writeInt16BE(value: number, offset: number, noAssert?: boolean): number;
- writeInt32LE(value: number, offset: number, noAssert?: boolean): number;
- writeInt32BE(value: number, offset: number, noAssert?: boolean): number;
- writeFloatLE(value: number, offset: number, noAssert?: boolean): number;
- writeFloatBE(value: number, offset: number, noAssert?: boolean): number;
- writeDoubleLE(value: number, offset: number, noAssert?: boolean): number;
- writeDoubleBE(value: number, offset: number, noAssert?: boolean): number;
- fill(value: any, offset?: number, end?: number): this;
- indexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number;
- lastIndexOf(value: string | number | Buffer, byteOffset?: number, encoding?: string): number;
- includes(value: string | number | Buffer, byteOffset?: number, encoding?: string): boolean;
-
- /**
- * Allocates a new buffer containing the given {str}.
- *
- * @param str String to store in buffer.
- * @param encoding encoding to use, optional. Default is 'utf8'
- */
- constructor (str: string, encoding?: string);
- /**
- * Allocates a new buffer of {size} octets.
- *
- * @param size count of octets to allocate.
- */
- constructor (size: number);
- /**
- * Allocates a new buffer containing the given {array} of octets.
- *
- * @param array The octets to store.
- */
- constructor (array: Uint8Array);
- /**
- * Produces a Buffer backed by the same allocated memory as
- * the given {ArrayBuffer}.
- *
- *
- * @param arrayBuffer The ArrayBuffer with which to share memory.
- */
- constructor (arrayBuffer: ArrayBuffer);
- /**
- * Allocates a new buffer containing the given {array} of octets.
- *
- * @param array The octets to store.
- */
- constructor (array: any[]);
- /**
- * Copies the passed {buffer} data onto a new {Buffer} instance.
- *
- * @param buffer The buffer to copy.
- */
- constructor (buffer: Buffer);
- prototype: Buffer;
- /**
- * Allocates a new Buffer using an {array} of octets.
- *
- * @param array
- */
- static from(array: any[]): Buffer;
- /**
- * When passed a reference to the .buffer property of a TypedArray instance,
- * the newly created Buffer will share the same allocated memory as the TypedArray.
- * The optional {byteOffset} and {length} arguments specify a memory range
- * within the {arrayBuffer} that will be shared by the Buffer.
- *
- * @param arrayBuffer The .buffer property of a TypedArray or a new ArrayBuffer()
- * @param byteOffset
- * @param length
- */
- static from(arrayBuffer: ArrayBuffer, byteOffset?: number, length?: number): Buffer;
- /**
- * Copies the passed {buffer} data onto a new Buffer instance.
- *
- * @param buffer
- */
- static from(buffer: Buffer): Buffer;
- /**
- * Creates a new Buffer containing the given JavaScript string {str}.
- * If provided, the {encoding} parameter identifies the character encoding.
- * If not provided, {encoding} defaults to 'utf8'.
- *
- * @param str
- */
- static from(str: string, encoding?: string): Buffer;
- /**
- * Returns true if {obj} is a Buffer
- *
- * @param obj object to test.
- */
- static isBuffer(obj: any): obj is Buffer;
- /**
- * Returns true if {encoding} is a valid encoding argument.
- * Valid string encodings in Node 0.12: 'ascii'|'utf8'|'utf16le'|'ucs2'(alias of 'utf16le')|'base64'|'binary'(deprecated)|'hex'
- *
- * @param encoding string to test.
- */
- static isEncoding(encoding: string): boolean;
- /**
- * Gives the actual byte length of a string. encoding defaults to 'utf8'.
- * This is not the same as String.prototype.length since that returns the number of characters in a string.
- *
- * @param string string to test.
- * @param encoding encoding used to evaluate (defaults to 'utf8')
- */
- static byteLength(string: string, encoding?: string): number;
- /**
- * Returns a buffer which is the result of concatenating all the buffers in the list together.
- *
- * If the list has no items, or if the totalLength is 0, then it returns a zero-length buffer.
- * If the list has exactly one item, then the first item of the list is returned.
- * If the list has more than one item, then a new Buffer is created.
- *
- * @param list An array of Buffer objects to concatenate
- * @param totalLength Total length of the buffers when concatenated.
- * If totalLength is not provided, it is read from the buffers in the list. However, this adds an additional loop to the function, so it is faster to provide the length explicitly.
- */
- static concat(list: Buffer[], totalLength?: number): Buffer;
- /**
- * The same as buf1.compare(buf2).
- */
- static compare(buf1: Buffer, buf2: Buffer): number;
- /**
- * Allocates a new buffer of {size} octets.
- *
- * @param size count of octets to allocate.
- * @param fill if specified, buffer will be initialized by calling buf.fill(fill).
- * If parameter is omitted, buffer will be filled with zeros.
- * @param encoding encoding used for call to buf.fill while initalizing
- */
- static alloc(size: number, fill?: string | Buffer | number, encoding?: string): Buffer;
- /**
- * Allocates a new buffer of {size} octets, leaving memory not initialized, so the contents
- * of the newly created Buffer are unknown and may contain sensitive data.
- *
- * @param size count of octets to allocate
- */
- static allocUnsafe(size: number): Buffer;
- /**
- * Allocates a new non-pooled buffer of {size} octets, leaving memory not initialized, so the contents
- * of the newly created Buffer are unknown and may contain sensitive data.
- *
- * @param size count of octets to allocate
- */
- static allocUnsafeSlow(size: number): Buffer;
- }
-}
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/safe-buffer/index.js b/node_modules/through2/node_modules/safe-buffer/index.js
deleted file mode 100644
index 22438dabbbceef6954a1a7a68038f8c440a90c79..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/safe-buffer/index.js
+++ /dev/null
@@ -1,62 +0,0 @@
-/* eslint-disable node/no-deprecated-api */
-var buffer = require('buffer')
-var Buffer = buffer.Buffer
-
-// alternative to using Object.keys for old browsers
-function copyProps (src, dst) {
- for (var key in src) {
- dst[key] = src[key]
- }
-}
-if (Buffer.from && Buffer.alloc && Buffer.allocUnsafe && Buffer.allocUnsafeSlow) {
- module.exports = buffer
-} else {
- // Copy properties from require('buffer')
- copyProps(buffer, exports)
- exports.Buffer = SafeBuffer
-}
-
-function SafeBuffer (arg, encodingOrOffset, length) {
- return Buffer(arg, encodingOrOffset, length)
-}
-
-// Copy static methods from Buffer
-copyProps(Buffer, SafeBuffer)
-
-SafeBuffer.from = function (arg, encodingOrOffset, length) {
- if (typeof arg === 'number') {
- throw new TypeError('Argument must not be a number')
- }
- return Buffer(arg, encodingOrOffset, length)
-}
-
-SafeBuffer.alloc = function (size, fill, encoding) {
- if (typeof size !== 'number') {
- throw new TypeError('Argument must be a number')
- }
- var buf = Buffer(size)
- if (fill !== undefined) {
- if (typeof encoding === 'string') {
- buf.fill(fill, encoding)
- } else {
- buf.fill(fill)
- }
- } else {
- buf.fill(0)
- }
- return buf
-}
-
-SafeBuffer.allocUnsafe = function (size) {
- if (typeof size !== 'number') {
- throw new TypeError('Argument must be a number')
- }
- return Buffer(size)
-}
-
-SafeBuffer.allocUnsafeSlow = function (size) {
- if (typeof size !== 'number') {
- throw new TypeError('Argument must be a number')
- }
- return buffer.SlowBuffer(size)
-}
diff --git a/node_modules/through2/node_modules/safe-buffer/package.json b/node_modules/through2/node_modules/safe-buffer/package.json
deleted file mode 100644
index f46c38e8ad1d0384279315aebb8b9382863ba128..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/safe-buffer/package.json
+++ /dev/null
@@ -1,118 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "safe-buffer",
- "raw": "safe-buffer@~5.1.1",
- "rawSpec": "~5.1.1",
- "scope": null,
- "spec": ">=5.1.1 <5.2.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-stream/node_modules/readable-stream"
- ],
- [
- {
- "name": "safe-buffer",
- "raw": "safe-buffer@~5.1.1",
- "rawSpec": "~5.1.1",
- "scope": null,
- "spec": ">=5.1.1 <5.2.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2/node_modules/readable-stream"
- ]
- ],
- "_from": "safe-buffer@~5.1.1",
- "_hasShrinkwrap": false,
- "_id": "safe-buffer@5.1.2",
- "_inCache": true,
- "_installable": true,
- "_location": "/through2/safe-buffer",
- "_nodeVersion": "8.11.1",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/safe-buffer_5.1.2_1524687024555_0.6520279716197115"
- },
- "_npmUser": {
- "email": "feross@feross.org",
- "name": "feross"
- },
- "_npmVersion": "6.0.0",
- "_phantomChildren": {},
- "_requested": {
- "name": "safe-buffer",
- "raw": "safe-buffer@~5.1.1",
- "rawSpec": "~5.1.1",
- "scope": null,
- "spec": ">=5.1.1 <5.2.0",
- "type": "range"
- },
- "_requiredBy": [
- "/through2/readable-stream",
- "/through2/string_decoder"
- ],
- "_resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "_shasum": "991ec69d296e0313747d59bdfd2b745c35f8828d",
- "_shrinkwrap": null,
- "_spec": "safe-buffer@~5.1.1",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2/node_modules/readable-stream",
- "author": {
- "email": "feross@feross.org",
- "name": "Feross Aboukhadijeh",
- "url": "http://feross.org"
- },
- "bugs": {
- "url": "https://github.com/feross/safe-buffer/issues"
- },
- "dependencies": {},
- "description": "Safer Node.js Buffer API",
- "devDependencies": {
- "standard": "*",
- "tape": "^4.0.0"
- },
- "directories": {},
- "dist": {
- "fileCount": 5,
- "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
- "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJa4OCyCRA9TVsSAnZWagAAwf8QAIlCcf+WlqWgpiGufGgi\n+P81J+YsGVk8haOYIgZX8FTI10RjPKiGN6R11R2B8YMJhxk3kX6O2SO/pl6A\n3tuw9/n+HBQClzobhWovJ6aymO+ozlaxxWADx0DrqKhoDOfpPLoIjaWNS2q2\nh1VxErOXagn/JP11Le89LHqcZs3s1jYyH/cAvA7ygaHvHaDhAiMMimAdz8Ze\n07VKtAM6PSnBYODducjXNTWdqotsRnzmqWEQrV2OQsk8OudJ3+YBZG8szsQ2\nVGrOLeQyj7g+q/WrjFi6I4S+Eg0dYYUg73X3PaAFzGXf7VIjDDNkFjwmtx5u\nUxQ/HczXVzHkcVOfBzbNk4rPTx9o6gr+oDIvWeAgOPYIv1YT5bnMHZ2w7M+8\n4b+SPqVeWHcNfnUhLcHmxim2TWkl0DD+oeYSucsOZ65dJdihMlNiOlVe5XXl\nIPJbfOqNcWCzkibq7pQmTSeM52l8JSekpNZGi3RQQkbrVR+cR3F53VrDzvZe\n3Jqwg7hGlGXyXl5i3TPL0oqD15n/1+wLxZEBVrqM5SeQzvD8l1iDv+3sPViP\ng3msNBtX8NH+Sf4kfCJzHRGestQb0zpVa8wvKeQBBlmpv1kqAgnuPR3k39hI\ncjpqjm2vkmAetW4XnOnCuuMk4SEqejwXh0w1TAHlFLqhywxgSZhH6SJhTbgQ\nKQZ7\r\n=EOr+\r\n-----END PGP SIGNATURE-----\r\n",
- "shasum": "991ec69d296e0313747d59bdfd2b745c35f8828d",
- "tarball": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
- "unpackedSize": 31686
- },
- "gitHead": "649435cc8e2d1f3ecdc7caf323f1cb1187307a16",
- "homepage": "https://github.com/feross/safe-buffer",
- "keywords": [
- "buffer",
- "buffer allocate",
- "node security",
- "safe",
- "safe-buffer",
- "security",
- "uninitialized"
- ],
- "license": "MIT",
- "main": "index.js",
- "maintainers": [
- {
- "email": "feross@feross.org",
- "name": "feross"
- },
- {
- "email": "mathiasbuus@gmail.com",
- "name": "mafintosh"
- }
- ],
- "name": "safe-buffer",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/feross/safe-buffer.git"
- },
- "scripts": {
- "test": "standard && tape test/*.js"
- },
- "types": "index.d.ts",
- "version": "5.1.2"
-}
diff --git a/node_modules/through2/node_modules/string_decoder/.travis.yml b/node_modules/through2/node_modules/string_decoder/.travis.yml
deleted file mode 100644
index 3347a7254650582da5339323466f84fe079fc270..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/string_decoder/.travis.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-sudo: false
-language: node_js
-before_install:
- - npm install -g npm@2
- - test $NPM_LEGACY && npm install -g npm@latest-3 || npm install npm -g
-notifications:
- email: false
-matrix:
- fast_finish: true
- include:
- - node_js: '0.8'
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: '0.10'
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: '0.11'
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: '0.12'
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: 1
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: 2
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: 3
- env:
- - TASK=test
- - NPM_LEGACY=true
- - node_js: 4
- env: TASK=test
- - node_js: 5
- env: TASK=test
- - node_js: 6
- env: TASK=test
- - node_js: 7
- env: TASK=test
- - node_js: 8
- env: TASK=test
- - node_js: 9
- env: TASK=test
diff --git a/node_modules/through2/node_modules/string_decoder/LICENSE b/node_modules/through2/node_modules/string_decoder/LICENSE
deleted file mode 100644
index 778edb20730ef48c01002248f4d51e7752c13487..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/string_decoder/LICENSE
+++ /dev/null
@@ -1,48 +0,0 @@
-Node.js is licensed for use as follows:
-
-"""
-Copyright Node.js contributors. All rights reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
-This license applies to parts of Node.js originating from the
-https://github.com/joyent/node repository:
-
-"""
-Copyright Joyent, Inc. and other Node contributors. All rights reserved.
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
-"""
-
diff --git a/node_modules/through2/node_modules/string_decoder/README.md b/node_modules/through2/node_modules/string_decoder/README.md
deleted file mode 100644
index 5fd58315ed588027742dde690a31cd0a2610649d..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/string_decoder/README.md
+++ /dev/null
@@ -1,47 +0,0 @@
-# string_decoder
-
-***Node-core v8.9.4 string_decoder for userland***
-
-
-[![NPM](https://nodei.co/npm/string_decoder.png?downloads=true&downloadRank=true)](https://nodei.co/npm/string_decoder/)
-[![NPM](https://nodei.co/npm-dl/string_decoder.png?&months=6&height=3)](https://nodei.co/npm/string_decoder/)
-
-
-```bash
-npm install --save string_decoder
-```
-
-***Node-core string_decoder for userland***
-
-This package is a mirror of the string_decoder implementation in Node-core.
-
-Full documentation may be found on the [Node.js website](https://nodejs.org/dist/v8.9.4/docs/api/).
-
-As of version 1.0.0 **string_decoder** uses semantic versioning.
-
-## Previous versions
-
-Previous version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10.
-
-## Update
-
-The *build/* directory contains a build script that will scrape the source from the [nodejs/node](https://github.com/nodejs/node) repo given a specific Node version.
-
-## Streams Working Group
-
-`string_decoder` is maintained by the Streams Working Group, which
-oversees the development and maintenance of the Streams API within
-Node.js. The responsibilities of the Streams Working Group include:
-
-* Addressing stream issues on the Node.js issue tracker.
-* Authoring and editing stream documentation within the Node.js project.
-* Reviewing changes to stream subclasses within the Node.js project.
-* Redirecting changes to streams from the Node.js project to this
- project.
-* Assisting in the implementation of stream providers within Node.js.
-* Recommending versions of `readable-stream` to be included in Node.js.
-* Messaging about the future of streams to give the community advance
- notice of changes.
-
-See [readable-stream](https://github.com/nodejs/readable-stream) for
-more details.
diff --git a/node_modules/through2/node_modules/string_decoder/lib/string_decoder.js b/node_modules/through2/node_modules/string_decoder/lib/string_decoder.js
deleted file mode 100644
index 2e89e63f7933e42b8ba543ede35d2a8fa3e4f100..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/string_decoder/lib/string_decoder.js
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright Joyent, Inc. and other Node contributors.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a
-// copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to permit
-// persons to whom the Software is furnished to do so, subject to the
-// following conditions:
-//
-// The above copyright notice and this permission notice shall be included
-// in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
-// NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
-// DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
-// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
-// USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-'use strict';
-
-/**/
-
-var Buffer = require('safe-buffer').Buffer;
-/**/
-
-var isEncoding = Buffer.isEncoding || function (encoding) {
- encoding = '' + encoding;
- switch (encoding && encoding.toLowerCase()) {
- case 'hex':case 'utf8':case 'utf-8':case 'ascii':case 'binary':case 'base64':case 'ucs2':case 'ucs-2':case 'utf16le':case 'utf-16le':case 'raw':
- return true;
- default:
- return false;
- }
-};
-
-function _normalizeEncoding(enc) {
- if (!enc) return 'utf8';
- var retried;
- while (true) {
- switch (enc) {
- case 'utf8':
- case 'utf-8':
- return 'utf8';
- case 'ucs2':
- case 'ucs-2':
- case 'utf16le':
- case 'utf-16le':
- return 'utf16le';
- case 'latin1':
- case 'binary':
- return 'latin1';
- case 'base64':
- case 'ascii':
- case 'hex':
- return enc;
- default:
- if (retried) return; // undefined
- enc = ('' + enc).toLowerCase();
- retried = true;
- }
- }
-};
-
-// Do not cache `Buffer.isEncoding` when checking encoding names as some
-// modules monkey-patch it to support additional encodings
-function normalizeEncoding(enc) {
- var nenc = _normalizeEncoding(enc);
- if (typeof nenc !== 'string' && (Buffer.isEncoding === isEncoding || !isEncoding(enc))) throw new Error('Unknown encoding: ' + enc);
- return nenc || enc;
-}
-
-// StringDecoder provides an interface for efficiently splitting a series of
-// buffers into a series of JS strings without breaking apart multi-byte
-// characters.
-exports.StringDecoder = StringDecoder;
-function StringDecoder(encoding) {
- this.encoding = normalizeEncoding(encoding);
- var nb;
- switch (this.encoding) {
- case 'utf16le':
- this.text = utf16Text;
- this.end = utf16End;
- nb = 4;
- break;
- case 'utf8':
- this.fillLast = utf8FillLast;
- nb = 4;
- break;
- case 'base64':
- this.text = base64Text;
- this.end = base64End;
- nb = 3;
- break;
- default:
- this.write = simpleWrite;
- this.end = simpleEnd;
- return;
- }
- this.lastNeed = 0;
- this.lastTotal = 0;
- this.lastChar = Buffer.allocUnsafe(nb);
-}
-
-StringDecoder.prototype.write = function (buf) {
- if (buf.length === 0) return '';
- var r;
- var i;
- if (this.lastNeed) {
- r = this.fillLast(buf);
- if (r === undefined) return '';
- i = this.lastNeed;
- this.lastNeed = 0;
- } else {
- i = 0;
- }
- if (i < buf.length) return r ? r + this.text(buf, i) : this.text(buf, i);
- return r || '';
-};
-
-StringDecoder.prototype.end = utf8End;
-
-// Returns only complete characters in a Buffer
-StringDecoder.prototype.text = utf8Text;
-
-// Attempts to complete a partial non-UTF-8 character using bytes from a Buffer
-StringDecoder.prototype.fillLast = function (buf) {
- if (this.lastNeed <= buf.length) {
- buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, this.lastNeed);
- return this.lastChar.toString(this.encoding, 0, this.lastTotal);
- }
- buf.copy(this.lastChar, this.lastTotal - this.lastNeed, 0, buf.length);
- this.lastNeed -= buf.length;
-};
-
-// Checks the type of a UTF-8 byte, whether it's ASCII, a leading byte, or a
-// continuation byte. If an invalid byte is detected, -2 is returned.
-function utf8CheckByte(byte) {
- if (byte <= 0x7F) return 0;else if (byte >> 5 === 0x06) return 2;else if (byte >> 4 === 0x0E) return 3;else if (byte >> 3 === 0x1E) return 4;
- return byte >> 6 === 0x02 ? -1 : -2;
-}
-
-// Checks at most 3 bytes at the end of a Buffer in order to detect an
-// incomplete multi-byte UTF-8 character. The total number of bytes (2, 3, or 4)
-// needed to complete the UTF-8 character (if applicable) are returned.
-function utf8CheckIncomplete(self, buf, i) {
- var j = buf.length - 1;
- if (j < i) return 0;
- var nb = utf8CheckByte(buf[j]);
- if (nb >= 0) {
- if (nb > 0) self.lastNeed = nb - 1;
- return nb;
- }
- if (--j < i || nb === -2) return 0;
- nb = utf8CheckByte(buf[j]);
- if (nb >= 0) {
- if (nb > 0) self.lastNeed = nb - 2;
- return nb;
- }
- if (--j < i || nb === -2) return 0;
- nb = utf8CheckByte(buf[j]);
- if (nb >= 0) {
- if (nb > 0) {
- if (nb === 2) nb = 0;else self.lastNeed = nb - 3;
- }
- return nb;
- }
- return 0;
-}
-
-// Validates as many continuation bytes for a multi-byte UTF-8 character as
-// needed or are available. If we see a non-continuation byte where we expect
-// one, we "replace" the validated continuation bytes we've seen so far with
-// a single UTF-8 replacement character ('\ufffd'), to match v8's UTF-8 decoding
-// behavior. The continuation byte check is included three times in the case
-// where all of the continuation bytes for a character exist in the same buffer.
-// It is also done this way as a slight performance increase instead of using a
-// loop.
-function utf8CheckExtraBytes(self, buf, p) {
- if ((buf[0] & 0xC0) !== 0x80) {
- self.lastNeed = 0;
- return '\ufffd';
- }
- if (self.lastNeed > 1 && buf.length > 1) {
- if ((buf[1] & 0xC0) !== 0x80) {
- self.lastNeed = 1;
- return '\ufffd';
- }
- if (self.lastNeed > 2 && buf.length > 2) {
- if ((buf[2] & 0xC0) !== 0x80) {
- self.lastNeed = 2;
- return '\ufffd';
- }
- }
- }
-}
-
-// Attempts to complete a multi-byte UTF-8 character using bytes from a Buffer.
-function utf8FillLast(buf) {
- var p = this.lastTotal - this.lastNeed;
- var r = utf8CheckExtraBytes(this, buf, p);
- if (r !== undefined) return r;
- if (this.lastNeed <= buf.length) {
- buf.copy(this.lastChar, p, 0, this.lastNeed);
- return this.lastChar.toString(this.encoding, 0, this.lastTotal);
- }
- buf.copy(this.lastChar, p, 0, buf.length);
- this.lastNeed -= buf.length;
-}
-
-// Returns all complete UTF-8 characters in a Buffer. If the Buffer ended on a
-// partial character, the character's bytes are buffered until the required
-// number of bytes are available.
-function utf8Text(buf, i) {
- var total = utf8CheckIncomplete(this, buf, i);
- if (!this.lastNeed) return buf.toString('utf8', i);
- this.lastTotal = total;
- var end = buf.length - (total - this.lastNeed);
- buf.copy(this.lastChar, 0, end);
- return buf.toString('utf8', i, end);
-}
-
-// For UTF-8, a replacement character is added when ending on a partial
-// character.
-function utf8End(buf) {
- var r = buf && buf.length ? this.write(buf) : '';
- if (this.lastNeed) return r + '\ufffd';
- return r;
-}
-
-// UTF-16LE typically needs two bytes per character, but even if we have an even
-// number of bytes available, we need to check if we end on a leading/high
-// surrogate. In that case, we need to wait for the next two bytes in order to
-// decode the last character properly.
-function utf16Text(buf, i) {
- if ((buf.length - i) % 2 === 0) {
- var r = buf.toString('utf16le', i);
- if (r) {
- var c = r.charCodeAt(r.length - 1);
- if (c >= 0xD800 && c <= 0xDBFF) {
- this.lastNeed = 2;
- this.lastTotal = 4;
- this.lastChar[0] = buf[buf.length - 2];
- this.lastChar[1] = buf[buf.length - 1];
- return r.slice(0, -1);
- }
- }
- return r;
- }
- this.lastNeed = 1;
- this.lastTotal = 2;
- this.lastChar[0] = buf[buf.length - 1];
- return buf.toString('utf16le', i, buf.length - 1);
-}
-
-// For UTF-16LE we do not explicitly append special replacement characters if we
-// end on a partial character, we simply let v8 handle that.
-function utf16End(buf) {
- var r = buf && buf.length ? this.write(buf) : '';
- if (this.lastNeed) {
- var end = this.lastTotal - this.lastNeed;
- return r + this.lastChar.toString('utf16le', 0, end);
- }
- return r;
-}
-
-function base64Text(buf, i) {
- var n = (buf.length - i) % 3;
- if (n === 0) return buf.toString('base64', i);
- this.lastNeed = 3 - n;
- this.lastTotal = 3;
- if (n === 1) {
- this.lastChar[0] = buf[buf.length - 1];
- } else {
- this.lastChar[0] = buf[buf.length - 2];
- this.lastChar[1] = buf[buf.length - 1];
- }
- return buf.toString('base64', i, buf.length - n);
-}
-
-function base64End(buf) {
- var r = buf && buf.length ? this.write(buf) : '';
- if (this.lastNeed) return r + this.lastChar.toString('base64', 0, 3 - this.lastNeed);
- return r;
-}
-
-// Pass bytes on through for single-byte encodings (e.g. ascii, latin1, hex)
-function simpleWrite(buf) {
- return buf.toString(this.encoding);
-}
-
-function simpleEnd(buf) {
- return buf && buf.length ? this.write(buf) : '';
-}
\ No newline at end of file
diff --git a/node_modules/through2/node_modules/string_decoder/package.json b/node_modules/through2/node_modules/string_decoder/package.json
deleted file mode 100644
index a934a2201150ec4214b20771c1aa5e543c555590..0000000000000000000000000000000000000000
--- a/node_modules/through2/node_modules/string_decoder/package.json
+++ /dev/null
@@ -1,124 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "string_decoder",
- "raw": "string_decoder@~1.1.1",
- "rawSpec": "~1.1.1",
- "scope": null,
- "spec": ">=1.1.1 <1.2.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-stream/node_modules/readable-stream"
- ],
- [
- {
- "name": "string_decoder",
- "raw": "string_decoder@~1.1.1",
- "rawSpec": "~1.1.1",
- "scope": null,
- "spec": ">=1.1.1 <1.2.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2/node_modules/readable-stream"
- ]
- ],
- "_from": "string_decoder@~1.1.1",
- "_hasShrinkwrap": false,
- "_id": "string_decoder@1.1.1",
- "_inCache": true,
- "_installable": true,
- "_location": "/through2/string_decoder",
- "_nodeVersion": "8.10.0",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/string_decoder_1.1.1_1522397654739_0.2722524344416213"
- },
- "_npmUser": {
- "email": "hello@matteocollina.com",
- "name": "matteo.collina"
- },
- "_npmVersion": "5.8.0",
- "_phantomChildren": {},
- "_requested": {
- "name": "string_decoder",
- "raw": "string_decoder@~1.1.1",
- "rawSpec": "~1.1.1",
- "scope": null,
- "spec": ">=1.1.1 <1.2.0",
- "type": "range"
- },
- "_requiredBy": [
- "/through2/readable-stream"
- ],
- "_resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
- "_shasum": "9cf1611ba62685d7030ae9e4ba34149c3af03fc8",
- "_shrinkwrap": null,
- "_spec": "string_decoder@~1.1.1",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2/node_modules/readable-stream",
- "bugs": {
- "url": "https://github.com/nodejs/string_decoder/issues"
- },
- "dependencies": {
- "safe-buffer": "~5.1.0"
- },
- "description": "The string_decoder module from Node core",
- "devDependencies": {
- "babel-polyfill": "^6.23.0",
- "core-util-is": "^1.0.2",
- "inherits": "^2.0.3",
- "tap": "~0.4.8"
- },
- "directories": {},
- "dist": {
- "fileCount": 5,
- "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
- "shasum": "9cf1611ba62685d7030ae9e4ba34149c3af03fc8",
- "tarball": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
- "unpackedSize": 15298
- },
- "gitHead": "18c7f89c894ced5f610505bb006dfde9a3d1ac5e",
- "homepage": "https://github.com/nodejs/string_decoder",
- "keywords": [
- "string",
- "decoder",
- "browser",
- "browserify"
- ],
- "license": "MIT",
- "main": "lib/string_decoder.js",
- "maintainers": [
- {
- "email": "calvin.metcalf@gmail.com",
- "name": "cwmma"
- },
- {
- "email": "hello@matteocollina.com",
- "name": "matteo.collina"
- },
- {
- "email": "build@iojs.org",
- "name": "nodejs-foundation"
- },
- {
- "email": "rod@vagg.org",
- "name": "rvagg"
- },
- {
- "email": "substack@gmail.com",
- "name": "substack"
- }
- ],
- "name": "string_decoder",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/nodejs/string_decoder.git"
- },
- "scripts": {
- "ci": "tap test/parallel/*.js test/ours/*.js --tap | tee test.tap && node test/verify-dependencies.js",
- "test": "tap test/parallel/*.js && node test/verify-dependencies"
- },
- "version": "1.1.1"
-}
diff --git a/node_modules/through2/package.json b/node_modules/through2/package.json
deleted file mode 100644
index ac6988926352bc1478f89c289b81ba1fc88b6544..0000000000000000000000000000000000000000
--- a/node_modules/through2/package.json
+++ /dev/null
@@ -1,115 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "through2",
- "raw": "through2@^2.0.0",
- "rawSpec": "^2.0.0",
- "scope": null,
- "spec": ">=2.0.0 <3.0.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/gulp-header"
- ]
- ],
- "_from": "through2@>=2.0.0 <3.0.0",
- "_hasShrinkwrap": false,
- "_id": "through2@2.0.5",
- "_inCache": true,
- "_installable": true,
- "_location": "/through2",
- "_nodeVersion": "10.13.0",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/through2_2.0.5_1541541791587_0.6666046444978868"
- },
- "_npmUser": {
- "email": "r@va.gg",
- "name": "rvagg"
- },
- "_npmVersion": "6.4.1",
- "_phantomChildren": {
- "core-util-is": "1.0.2",
- "inherits": "2.0.4",
- "isarray": "1.0.0",
- "process-nextick-args": "2.0.1",
- "util-deprecate": "1.0.2"
- },
- "_requested": {
- "name": "through2",
- "raw": "through2@^2.0.0",
- "rawSpec": "^2.0.0",
- "scope": null,
- "spec": ">=2.0.0 <3.0.0",
- "type": "range"
- },
- "_requiredBy": [
- "/gulp-header"
- ],
- "_resolved": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
- "_shasum": "01c1e39eb31d07cb7d03a96a70823260b23132cd",
- "_shrinkwrap": null,
- "_spec": "through2@^2.0.0",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/gulp-header",
- "author": {
- "email": "r@va.gg",
- "name": "Rod Vagg",
- "url": "https://github.com/rvagg"
- },
- "bugs": {
- "url": "https://github.com/rvagg/through2/issues"
- },
- "dependencies": {
- "readable-stream": "~2.3.6",
- "xtend": "~4.0.1"
- },
- "description": "A tiny wrapper around Node streams2 Transform to avoid explicit subclassing noise",
- "devDependencies": {
- "bl": "~2.0.1",
- "faucet": "0.0.1",
- "nyc": "~13.1.0",
- "safe-buffer": "~5.1.2",
- "stream-spigot": "~3.0.6",
- "tape": "~4.9.1"
- },
- "directories": {},
- "dist": {
- "fileCount": 4,
- "integrity": "sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ==",
- "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJb4g+gCRA9TVsSAnZWagAAozIQAJ9G33BOIVzbaoS4KejZ\nPTu12HkLyxVwCU0iCwR2+BMFw8y0JcNr2eZPNWqSV3beHDPDHY+WRgJesCjP\nzJ5Jv+8hwFLSXG4Kc407ZWV3ten0AV1xlHJjGsY6WjVD6D2KLZiq4mAHQY8m\nWgUSoHSeEvkcgqYLKhFJLfASHlzhwIVbK0P8PfhSulzVBKXKqy229IRTiIY7\nHQXDM0VtBEqnV2rv6+UqvBjY0yTdFWwB4P8x/glNiCBaCMKtzdMMoQvFeIcI\ndbge0qZt2jRZ0ZbRrCF/RBACWomZRpTpGMB7O314GwFPhU3oJwrAyiHcnn4I\n0XXu/56Y8Tw6UqX8LmW2ABr2Ysovxozk9SS0tYeNcPniSWGrTkMlV5UbzSw8\ntPMl1Ph1oQcNYC/jYzetTthA5NNTmuIg0z7QsF9lkGle4Qjg1gWkmVpVEdHH\n0VkDrSM0LZTv+24S1+hwolAtx5NuYT8yImDvOAlaPY1FBTsXEgO6QBjskaOt\neMfvKCB94TLR0IBoJ4Ckr4XDivnNjcSVUwkURj4LTDxY0aKYbnEXmpDqVQZ4\n2iTRR19d33Nkmz6I3VmEGWdYoRpkN7CX3FJ2x1cBgTtNVGftqsJGN92XAWVO\n+/4j+zc2nz6uaWnCWYqiaqXiik1CCITvXMwCi13DWPkVa2azbBrAU3hqshb0\n4rd6\r\n=rL4g\r\n-----END PGP SIGNATURE-----\r\n",
- "shasum": "01c1e39eb31d07cb7d03a96a70823260b23132cd",
- "tarball": "https://registry.npmjs.org/through2/-/through2-2.0.5.tgz",
- "unpackedSize": 9649
- },
- "gitHead": "72a3cefc0ff1ad5cc178be04eb927f40166226f1",
- "homepage": "https://github.com/rvagg/through2#readme",
- "keywords": [
- "stream",
- "streams2",
- "through",
- "transform"
- ],
- "license": "MIT",
- "main": "through2.js",
- "maintainers": [
- {
- "email": "bryce@ravenwall.com",
- "name": "bryce"
- },
- {
- "email": "rod@vagg.org",
- "name": "rvagg"
- }
- ],
- "name": "through2",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git+https://github.com/rvagg/through2.git"
- },
- "scripts": {
- "test": "node test/test.js | faucet"
- },
- "version": "2.0.5"
-}
diff --git a/node_modules/through2/through2.js b/node_modules/through2/through2.js
deleted file mode 100644
index 6baa6a1e8ff831ecc87a32ead711735acb6c4288..0000000000000000000000000000000000000000
--- a/node_modules/through2/through2.js
+++ /dev/null
@@ -1,96 +0,0 @@
-var Transform = require('readable-stream').Transform
- , inherits = require('util').inherits
- , xtend = require('xtend')
-
-function DestroyableTransform(opts) {
- Transform.call(this, opts)
- this._destroyed = false
-}
-
-inherits(DestroyableTransform, Transform)
-
-DestroyableTransform.prototype.destroy = function(err) {
- if (this._destroyed) return
- this._destroyed = true
-
- var self = this
- process.nextTick(function() {
- if (err)
- self.emit('error', err)
- self.emit('close')
- })
-}
-
-// a noop _transform function
-function noop (chunk, enc, callback) {
- callback(null, chunk)
-}
-
-
-// create a new export function, used by both the main export and
-// the .ctor export, contains common logic for dealing with arguments
-function through2 (construct) {
- return function (options, transform, flush) {
- if (typeof options == 'function') {
- flush = transform
- transform = options
- options = {}
- }
-
- if (typeof transform != 'function')
- transform = noop
-
- if (typeof flush != 'function')
- flush = null
-
- return construct(options, transform, flush)
- }
-}
-
-
-// main export, just make me a transform stream!
-module.exports = through2(function (options, transform, flush) {
- var t2 = new DestroyableTransform(options)
-
- t2._transform = transform
-
- if (flush)
- t2._flush = flush
-
- return t2
-})
-
-
-// make me a reusable prototype that I can `new`, or implicitly `new`
-// with a constructor call
-module.exports.ctor = through2(function (options, transform, flush) {
- function Through2 (override) {
- if (!(this instanceof Through2))
- return new Through2(override)
-
- this.options = xtend(options, override)
-
- DestroyableTransform.call(this, this.options)
- }
-
- inherits(Through2, DestroyableTransform)
-
- Through2.prototype._transform = transform
-
- if (flush)
- Through2.prototype._flush = flush
-
- return Through2
-})
-
-
-module.exports.obj = through2(function (options, transform, flush) {
- var t2 = new DestroyableTransform(xtend({ objectMode: true, highWaterMark: 16 }, options))
-
- t2._transform = transform
-
- if (flush)
- t2._flush = flush
-
- return t2
-})
diff --git a/node_modules/toml/.jshintrc b/node_modules/toml/.jshintrc
deleted file mode 100644
index 96747b1a673fa022f2142399b64ba9851b2be1f7..0000000000000000000000000000000000000000
--- a/node_modules/toml/.jshintrc
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "node": true,
- "browser": true,
- "browserify": true,
- "curly": true,
- "eqeqeq": true,
- "eqnull": false,
- "latedef": "nofunc",
- "newcap": true,
- "noarg": true,
- "undef": true,
- "strict": true,
- "trailing": true,
- "smarttabs": true,
- "indent": 2,
- "quotmark": true,
- "laxbreak": true
-}
diff --git a/node_modules/toml/.travis.yml b/node_modules/toml/.travis.yml
deleted file mode 100644
index f46aeb8ce098221cd10bd1c415b8a1d8d3d9c744..0000000000000000000000000000000000000000
--- a/node_modules/toml/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: node_js
-sudo: false
-node_js:
- - "4.1"
- - "4.0"
- - "0.12"
- - "0.10"
diff --git a/node_modules/toml/CHANGELOG.md b/node_modules/toml/CHANGELOG.md
deleted file mode 100644
index 65b4db69aa78b84062c2f9cd6e3af22c92282ec2..0000000000000000000000000000000000000000
--- a/node_modules/toml/CHANGELOG.md
+++ /dev/null
@@ -1,116 +0,0 @@
-2.3.0 - July 13 2015
-====================
-
-* Correctly handle quoted keys ([#21](https://github.com/BinaryMuse/toml-node/issues/21))
-
-2.2.3 - June 8 2015
-===================
-
-* Support empty inline tables ([#24](https://github.com/BinaryMuse/toml-node/issues/24))
-* Do not allow implicit table definitions to replace value ([#23](https://github.com/BinaryMuse/toml-node/issues/23))
-* Don't allow tables to replace inline tables ([#25](https://github.com/BinaryMuse/toml-node/issues/25))
-
-2.2.2 - April 3 2015
-====================
-
-* Correctly handle newlines at beginning of string ([#22](https://github.com/BinaryMuse/toml-node/issues/22))
-
-2.2.1 - March 17 2015
-=====================
-
-* Parse dates generated by Date#toISOString() ([#20](https://github.com/BinaryMuse/toml-node/issues/20))
-
-2.2.0 - Feb 26 2015
-===================
-
-* Support TOML spec v0.4.0
-
-2.1.0 - Jan 7 2015
-==================
-
-* Support TOML spec v0.3.1
-
-2.0.6 - May 23 2014
-===================
-
-### Bug Fixes
-
-* Fix support for empty arrays with newlines ([#13](https://github.com/BinaryMuse/toml-node/issues/13))
-
-2.0.5 - May 5 2014
-==================
-
-### Bug Fixes
-
-* Fix loop iteration leak, by [sebmck](https://github.com/sebmck) ([#12](https://github.com/BinaryMuse/toml-node/pull/12))
-
-### Development
-
-* Tests now run JSHint on `lib/compiler.js`
-
-2.0.4 - Mar 9 2014
-==================
-
-### Bug Fixes
-
-* Fix failure on duplicate table name inside table array ([#11](https://github.com/BinaryMuse/toml-node/issues/11))
-
-2.0.2 - Feb 23 2014
-===================
-
-### Bug Fixes
-
-* Fix absence of errors when table path starts or ends with period
-
-2.0.1 - Feb 23 2014
-===================
-
-### Bug Fixes
-
-* Fix incorrect messaging in array type errors
-* Fix missing error when overwriting key with table array
-
-2.0.0 - Feb 23 2014
-===================
-
-### Features
-
-* Add support for [version 0.2 of the TOML spec](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) ([#9](https://github.com/BinaryMuse/toml-node/issues/9))
-
-### Internals
-
-* Upgrade to PEG.js v0.8 and rewrite compiler; parser is now considerably faster (from ~7000ms to ~1000ms to parse `example.toml` 1000 times on Node.js v0.10)
-
-1.0.4 - Aug 17 2013
-===================
-
-### Bug Fixes
-
-* Fix support for empty arrays
-
-1.0.3 - Aug 17 2013
-===================
-
-### Bug Fixes
-
-* Fix typo in array type error message
-* Fix single-element arrays with no trailing commas
-
-1.0.2 - Aug 17 2013
-===================
-
-### Bug Fixes
-
-* Fix errors on lines that contain only whitespace ([#7](https://github.com/BinaryMuse/toml-node/issues/7))
-
-1.0.1 - Aug 17 2013
-===================
-
-### Internals
-
-* Remove old code remaining from the remove streaming API
-
-1.0.0 - Aug 17 2013
-===================
-
-Initial stable release
diff --git a/node_modules/toml/LICENSE b/node_modules/toml/LICENSE
deleted file mode 100644
index 44ae2bfc4048f2e2e4ee6d5253725b0d83587c97..0000000000000000000000000000000000000000
--- a/node_modules/toml/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2012 Michelle Tilley
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
diff --git a/node_modules/toml/README.md b/node_modules/toml/README.md
deleted file mode 100644
index ff4dc5877b035acdea35b7bf39f2fea57c9d8e2c..0000000000000000000000000000000000000000
--- a/node_modules/toml/README.md
+++ /dev/null
@@ -1,93 +0,0 @@
-TOML Parser for Node.js
-=======================
-
-[![Build Status](https://travis-ci.org/BinaryMuse/toml-node.png?branch=master)](https://travis-ci.org/BinaryMuse/toml-node)
-
-[![NPM](https://nodei.co/npm/toml.png?downloads=true)](https://nodei.co/npm/toml/)
-
-If you haven't heard of TOML, well you're just missing out. [Go check it out now.](https://github.com/mojombo/toml) Back? Good.
-
-TOML Spec Support
------------------
-
-toml-node supports version 0.4.0 the TOML spec as specified by [mojombo/toml@v0.4.0](https://github.com/mojombo/toml/blob/master/versions/en/toml-v0.4.0.md)
-
-Installation
-------------
-
-toml-node is available via npm.
-
- npm install toml
-
-toml-node also works with browser module bundlers like Browserify and webpack.
-
-Usage
------
-
-### Standalone
-
-Say you have some awesome TOML in a variable called `someTomlString`. Maybe it came from the web; maybe it came from a file; wherever it came from, it came asynchronously! Let's turn that sucker into a JavaScript object.
-
-```javascript
-var toml = require('toml');
-var data = toml.parse(someTomlString);
-console.dir(data);
-```
-
-`toml.parse` throws an exception in the case of a parsing error; such exceptions have a `line` and `column` property on them to help identify the offending text.
-
-```javascript
-try {
- toml.parse(someCrazyKnuckleHeadedTrblToml);
-} catch (e) {
- console.error("Parsing error on line " + e.line + ", column " + e.column +
- ": " + e.message);
-}
-```
-
-### Streaming
-
-As of toml-node version 1.0, the streaming interface has been removed. Instead, use a module like [concat-stream](https://npmjs.org/package/concat-stream):
-
-```javascript
-var toml = require('toml');
-var concat = require('concat-stream');
-var fs = require('fs');
-
-fs.createReadStream('tomlFile.toml', 'utf8').pipe(concat(function(data) {
- var parsed = toml.parse(data);
-}));
-```
-
-Thanks [@ForbesLindesay](https://github.com/ForbesLindesay) for the suggestion.
-
-### Requiring with Node.js
-
-You can use the [toml-require package](https://github.com/BinaryMuse/toml-require) to `require()` your `.toml` files with Node.js
-
-Live Demo
----------
-
-You can experiment with TOML online at http://binarymuse.github.io/toml-node/, which uses the latest version of this library.
-
-Building & Testing
-------------------
-
-toml-node uses [the PEG.js parser generator](http://pegjs.majda.cz/).
-
- npm install
- npm run build
- npm test
-
-Any changes to `src/toml.peg` requires a regeneration of the parser with `npm run build`.
-
-toml-node is tested on Travis CI and is tested against:
-
- * Node 0.10
- * Node 0.12
- * Latest stable io.js
-
-License
--------
-
-toml-node is licensed under the MIT license agreement. See the LICENSE file for more information.
diff --git a/node_modules/toml/benchmark.js b/node_modules/toml/benchmark.js
deleted file mode 100644
index 99fba1d3dba609e36824c294cba264becb7cff9d..0000000000000000000000000000000000000000
--- a/node_modules/toml/benchmark.js
+++ /dev/null
@@ -1,12 +0,0 @@
-var toml = require('./index');
-var fs = require('fs');
-var data = fs.readFileSync('./test/example.toml', 'utf8');
-
-var iterations = 1000;
-
-var start = new Date();
-for(var i = 0; i < iterations; i++) {
- toml.parse(data);
-}
-var end = new Date();
-console.log("%s iterations in %sms", iterations, end - start);
diff --git a/node_modules/toml/index.d.ts b/node_modules/toml/index.d.ts
deleted file mode 100644
index 7e9052b4ed72978a5907a709ac71567128486b09..0000000000000000000000000000000000000000
--- a/node_modules/toml/index.d.ts
+++ /dev/null
@@ -1,3 +0,0 @@
-declare module 'toml' {
- export function parse(input: string): any;
-}
diff --git a/node_modules/toml/index.js b/node_modules/toml/index.js
deleted file mode 100644
index 6caf44a08fee0539e3f5899c7c625c450d970951..0000000000000000000000000000000000000000
--- a/node_modules/toml/index.js
+++ /dev/null
@@ -1,9 +0,0 @@
-var parser = require('./lib/parser');
-var compiler = require('./lib/compiler');
-
-module.exports = {
- parse: function(input) {
- var nodes = parser.parse(input.toString());
- return compiler.compile(nodes);
- }
-};
diff --git a/node_modules/toml/lib/compiler.js b/node_modules/toml/lib/compiler.js
deleted file mode 100644
index 10dc59a1123c098cabc217e2c22b9662599f9442..0000000000000000000000000000000000000000
--- a/node_modules/toml/lib/compiler.js
+++ /dev/null
@@ -1,195 +0,0 @@
-"use strict";
-function compile(nodes) {
- var assignedPaths = [];
- var valueAssignments = [];
- var currentPath = "";
- var data = {};
- var context = data;
- var arrayMode = false;
-
- return reduce(nodes);
-
- function reduce(nodes) {
- var node;
- for (var i = 0; i < nodes.length; i++) {
- node = nodes[i];
- switch (node.type) {
- case "Assign":
- assign(node);
- break;
- case "ObjectPath":
- setPath(node);
- break;
- case "ArrayPath":
- addTableArray(node);
- break;
- }
- }
-
- return data;
- }
-
- function genError(err, line, col) {
- var ex = new Error(err);
- ex.line = line;
- ex.column = col;
- throw ex;
- }
-
- function assign(node) {
- var key = node.key;
- var value = node.value;
- var line = node.line;
- var column = node.column;
-
- var fullPath;
- if (currentPath) {
- fullPath = currentPath + "." + key;
- } else {
- fullPath = key;
- }
- if (typeof context[key] !== "undefined") {
- genError("Cannot redefine existing key '" + fullPath + "'.", line, column);
- }
-
- context[key] = reduceValueNode(value);
-
- if (!pathAssigned(fullPath)) {
- assignedPaths.push(fullPath);
- valueAssignments.push(fullPath);
- }
- }
-
-
- function pathAssigned(path) {
- return assignedPaths.indexOf(path) !== -1;
- }
-
- function reduceValueNode(node) {
- if (node.type === "Array") {
- return reduceArrayWithTypeChecking(node.value);
- } else if (node.type === "InlineTable") {
- return reduceInlineTableNode(node.value);
- } else {
- return node.value;
- }
- }
-
- function reduceInlineTableNode(values) {
- var obj = {};
- for (var i = 0; i < values.length; i++) {
- var val = values[i];
- if (val.value.type === "InlineTable") {
- obj[val.key] = reduceInlineTableNode(val.value.value);
- } else if (val.type === "InlineTableValue") {
- obj[val.key] = reduceValueNode(val.value);
- }
- }
-
- return obj;
- }
-
- function setPath(node) {
- var path = node.value;
- var quotedPath = path.map(quoteDottedString).join(".");
- var line = node.line;
- var column = node.column;
-
- if (pathAssigned(quotedPath)) {
- genError("Cannot redefine existing key '" + path + "'.", line, column);
- }
- assignedPaths.push(quotedPath);
- context = deepRef(data, path, {}, line, column);
- currentPath = path;
- }
-
- function addTableArray(node) {
- var path = node.value;
- var quotedPath = path.map(quoteDottedString).join(".");
- var line = node.line;
- var column = node.column;
-
- if (!pathAssigned(quotedPath)) {
- assignedPaths.push(quotedPath);
- }
- assignedPaths = assignedPaths.filter(function(p) {
- return p.indexOf(quotedPath) !== 0;
- });
- assignedPaths.push(quotedPath);
- context = deepRef(data, path, [], line, column);
- currentPath = quotedPath;
-
- if (context instanceof Array) {
- var newObj = {};
- context.push(newObj);
- context = newObj;
- } else {
- genError("Cannot redefine existing key '" + path + "'.", line, column);
- }
- }
-
- // Given a path 'a.b.c', create (as necessary) `start.a`,
- // `start.a.b`, and `start.a.b.c`, assigning `value` to `start.a.b.c`.
- // If `a` or `b` are arrays and have items in them, the last item in the
- // array is used as the context for the next sub-path.
- function deepRef(start, keys, value, line, column) {
- var traversed = [];
- var traversedPath = "";
- var path = keys.join(".");
- var ctx = start;
-
- for (var i = 0; i < keys.length; i++) {
- var key = keys[i];
- traversed.push(key);
- traversedPath = traversed.join(".");
- if (typeof ctx[key] === "undefined") {
- if (i === keys.length - 1) {
- ctx[key] = value;
- } else {
- ctx[key] = {};
- }
- } else if (i !== keys.length - 1 && valueAssignments.indexOf(traversedPath) > -1) {
- // already a non-object value at key, can't be used as part of a new path
- genError("Cannot redefine existing key '" + traversedPath + "'.", line, column);
- }
-
- ctx = ctx[key];
- if (ctx instanceof Array && ctx.length && i < keys.length - 1) {
- ctx = ctx[ctx.length - 1];
- }
- }
-
- return ctx;
- }
-
- function reduceArrayWithTypeChecking(array) {
- // Ensure that all items in the array are of the same type
- var firstType = null;
- for (var i = 0; i < array.length; i++) {
- var node = array[i];
- if (firstType === null) {
- firstType = node.type;
- } else {
- if (node.type !== firstType) {
- genError("Cannot add value of type " + node.type + " to array of type " +
- firstType + ".", node.line, node.column);
- }
- }
- }
-
- // Recursively reduce array of nodes into array of the nodes' values
- return array.map(reduceValueNode);
- }
-
- function quoteDottedString(str) {
- if (str.indexOf(".") > -1) {
- return "\"" + str + "\"";
- } else {
- return str;
- }
- }
-}
-
-module.exports = {
- compile: compile
-};
diff --git a/node_modules/toml/lib/parser.js b/node_modules/toml/lib/parser.js
deleted file mode 100644
index 69cbd6fd6b52a3af9af838676bd478dc716c17bf..0000000000000000000000000000000000000000
--- a/node_modules/toml/lib/parser.js
+++ /dev/null
@@ -1,3841 +0,0 @@
-module.exports = (function() {
- /*
- * Generated by PEG.js 0.8.0.
- *
- * http://pegjs.majda.cz/
- */
-
- function peg$subclass(child, parent) {
- function ctor() { this.constructor = child; }
- ctor.prototype = parent.prototype;
- child.prototype = new ctor();
- }
-
- function SyntaxError(message, expected, found, offset, line, column) {
- this.message = message;
- this.expected = expected;
- this.found = found;
- this.offset = offset;
- this.line = line;
- this.column = column;
-
- this.name = "SyntaxError";
- }
-
- peg$subclass(SyntaxError, Error);
-
- function parse(input) {
- var options = arguments.length > 1 ? arguments[1] : {},
-
- peg$FAILED = {},
-
- peg$startRuleFunctions = { start: peg$parsestart },
- peg$startRuleFunction = peg$parsestart,
-
- peg$c0 = [],
- peg$c1 = function() { return nodes },
- peg$c2 = peg$FAILED,
- peg$c3 = "#",
- peg$c4 = { type: "literal", value: "#", description: "\"#\"" },
- peg$c5 = void 0,
- peg$c6 = { type: "any", description: "any character" },
- peg$c7 = "[",
- peg$c8 = { type: "literal", value: "[", description: "\"[\"" },
- peg$c9 = "]",
- peg$c10 = { type: "literal", value: "]", description: "\"]\"" },
- peg$c11 = function(name) { addNode(node('ObjectPath', name, line, column)) },
- peg$c12 = function(name) { addNode(node('ArrayPath', name, line, column)) },
- peg$c13 = function(parts, name) { return parts.concat(name) },
- peg$c14 = function(name) { return [name] },
- peg$c15 = function(name) { return name },
- peg$c16 = ".",
- peg$c17 = { type: "literal", value: ".", description: "\".\"" },
- peg$c18 = "=",
- peg$c19 = { type: "literal", value: "=", description: "\"=\"" },
- peg$c20 = function(key, value) { addNode(node('Assign', value, line, column, key)) },
- peg$c21 = function(chars) { return chars.join('') },
- peg$c22 = function(node) { return node.value },
- peg$c23 = "\"\"\"",
- peg$c24 = { type: "literal", value: "\"\"\"", description: "\"\\\"\\\"\\\"\"" },
- peg$c25 = null,
- peg$c26 = function(chars) { return node('String', chars.join(''), line, column) },
- peg$c27 = "\"",
- peg$c28 = { type: "literal", value: "\"", description: "\"\\\"\"" },
- peg$c29 = "'''",
- peg$c30 = { type: "literal", value: "'''", description: "\"'''\"" },
- peg$c31 = "'",
- peg$c32 = { type: "literal", value: "'", description: "\"'\"" },
- peg$c33 = function(char) { return char },
- peg$c34 = function(char) { return char},
- peg$c35 = "\\",
- peg$c36 = { type: "literal", value: "\\", description: "\"\\\\\"" },
- peg$c37 = function() { return '' },
- peg$c38 = "e",
- peg$c39 = { type: "literal", value: "e", description: "\"e\"" },
- peg$c40 = "E",
- peg$c41 = { type: "literal", value: "E", description: "\"E\"" },
- peg$c42 = function(left, right) { return node('Float', parseFloat(left + 'e' + right), line, column) },
- peg$c43 = function(text) { return node('Float', parseFloat(text), line, column) },
- peg$c44 = "+",
- peg$c45 = { type: "literal", value: "+", description: "\"+\"" },
- peg$c46 = function(digits) { return digits.join('') },
- peg$c47 = "-",
- peg$c48 = { type: "literal", value: "-", description: "\"-\"" },
- peg$c49 = function(digits) { return '-' + digits.join('') },
- peg$c50 = function(text) { return node('Integer', parseInt(text, 10), line, column) },
- peg$c51 = "true",
- peg$c52 = { type: "literal", value: "true", description: "\"true\"" },
- peg$c53 = function() { return node('Boolean', true, line, column) },
- peg$c54 = "false",
- peg$c55 = { type: "literal", value: "false", description: "\"false\"" },
- peg$c56 = function() { return node('Boolean', false, line, column) },
- peg$c57 = function() { return node('Array', [], line, column) },
- peg$c58 = function(value) { return node('Array', value ? [value] : [], line, column) },
- peg$c59 = function(values) { return node('Array', values, line, column) },
- peg$c60 = function(values, value) { return node('Array', values.concat(value), line, column) },
- peg$c61 = function(value) { return value },
- peg$c62 = ",",
- peg$c63 = { type: "literal", value: ",", description: "\",\"" },
- peg$c64 = "{",
- peg$c65 = { type: "literal", value: "{", description: "\"{\"" },
- peg$c66 = "}",
- peg$c67 = { type: "literal", value: "}", description: "\"}\"" },
- peg$c68 = function(values) { return node('InlineTable', values, line, column) },
- peg$c69 = function(key, value) { return node('InlineTableValue', value, line, column, key) },
- peg$c70 = function(digits) { return "." + digits },
- peg$c71 = function(date) { return date.join('') },
- peg$c72 = ":",
- peg$c73 = { type: "literal", value: ":", description: "\":\"" },
- peg$c74 = function(time) { return time.join('') },
- peg$c75 = "T",
- peg$c76 = { type: "literal", value: "T", description: "\"T\"" },
- peg$c77 = "Z",
- peg$c78 = { type: "literal", value: "Z", description: "\"Z\"" },
- peg$c79 = function(date, time) { return node('Date', new Date(date + "T" + time + "Z"), line, column) },
- peg$c80 = function(date, time) { return node('Date', new Date(date + "T" + time), line, column) },
- peg$c81 = /^[ \t]/,
- peg$c82 = { type: "class", value: "[ \\t]", description: "[ \\t]" },
- peg$c83 = "\n",
- peg$c84 = { type: "literal", value: "\n", description: "\"\\n\"" },
- peg$c85 = "\r",
- peg$c86 = { type: "literal", value: "\r", description: "\"\\r\"" },
- peg$c87 = /^[0-9a-f]/i,
- peg$c88 = { type: "class", value: "[0-9a-f]i", description: "[0-9a-f]i" },
- peg$c89 = /^[0-9]/,
- peg$c90 = { type: "class", value: "[0-9]", description: "[0-9]" },
- peg$c91 = "_",
- peg$c92 = { type: "literal", value: "_", description: "\"_\"" },
- peg$c93 = function() { return "" },
- peg$c94 = /^[A-Za-z0-9_\-]/,
- peg$c95 = { type: "class", value: "[A-Za-z0-9_\\-]", description: "[A-Za-z0-9_\\-]" },
- peg$c96 = function(d) { return d.join('') },
- peg$c97 = "\\\"",
- peg$c98 = { type: "literal", value: "\\\"", description: "\"\\\\\\\"\"" },
- peg$c99 = function() { return '"' },
- peg$c100 = "\\\\",
- peg$c101 = { type: "literal", value: "\\\\", description: "\"\\\\\\\\\"" },
- peg$c102 = function() { return '\\' },
- peg$c103 = "\\b",
- peg$c104 = { type: "literal", value: "\\b", description: "\"\\\\b\"" },
- peg$c105 = function() { return '\b' },
- peg$c106 = "\\t",
- peg$c107 = { type: "literal", value: "\\t", description: "\"\\\\t\"" },
- peg$c108 = function() { return '\t' },
- peg$c109 = "\\n",
- peg$c110 = { type: "literal", value: "\\n", description: "\"\\\\n\"" },
- peg$c111 = function() { return '\n' },
- peg$c112 = "\\f",
- peg$c113 = { type: "literal", value: "\\f", description: "\"\\\\f\"" },
- peg$c114 = function() { return '\f' },
- peg$c115 = "\\r",
- peg$c116 = { type: "literal", value: "\\r", description: "\"\\\\r\"" },
- peg$c117 = function() { return '\r' },
- peg$c118 = "\\U",
- peg$c119 = { type: "literal", value: "\\U", description: "\"\\\\U\"" },
- peg$c120 = function(digits) { return convertCodePoint(digits.join('')) },
- peg$c121 = "\\u",
- peg$c122 = { type: "literal", value: "\\u", description: "\"\\\\u\"" },
-
- peg$currPos = 0,
- peg$reportedPos = 0,
- peg$cachedPos = 0,
- peg$cachedPosDetails = { line: 1, column: 1, seenCR: false },
- peg$maxFailPos = 0,
- peg$maxFailExpected = [],
- peg$silentFails = 0,
-
- peg$cache = {},
- peg$result;
-
- if ("startRule" in options) {
- if (!(options.startRule in peg$startRuleFunctions)) {
- throw new Error("Can't start parsing from rule \"" + options.startRule + "\".");
- }
-
- peg$startRuleFunction = peg$startRuleFunctions[options.startRule];
- }
-
- function text() {
- return input.substring(peg$reportedPos, peg$currPos);
- }
-
- function offset() {
- return peg$reportedPos;
- }
-
- function line() {
- return peg$computePosDetails(peg$reportedPos).line;
- }
-
- function column() {
- return peg$computePosDetails(peg$reportedPos).column;
- }
-
- function expected(description) {
- throw peg$buildException(
- null,
- [{ type: "other", description: description }],
- peg$reportedPos
- );
- }
-
- function error(message) {
- throw peg$buildException(message, null, peg$reportedPos);
- }
-
- function peg$computePosDetails(pos) {
- function advance(details, startPos, endPos) {
- var p, ch;
-
- for (p = startPos; p < endPos; p++) {
- ch = input.charAt(p);
- if (ch === "\n") {
- if (!details.seenCR) { details.line++; }
- details.column = 1;
- details.seenCR = false;
- } else if (ch === "\r" || ch === "\u2028" || ch === "\u2029") {
- details.line++;
- details.column = 1;
- details.seenCR = true;
- } else {
- details.column++;
- details.seenCR = false;
- }
- }
- }
-
- if (peg$cachedPos !== pos) {
- if (peg$cachedPos > pos) {
- peg$cachedPos = 0;
- peg$cachedPosDetails = { line: 1, column: 1, seenCR: false };
- }
- advance(peg$cachedPosDetails, peg$cachedPos, pos);
- peg$cachedPos = pos;
- }
-
- return peg$cachedPosDetails;
- }
-
- function peg$fail(expected) {
- if (peg$currPos < peg$maxFailPos) { return; }
-
- if (peg$currPos > peg$maxFailPos) {
- peg$maxFailPos = peg$currPos;
- peg$maxFailExpected = [];
- }
-
- peg$maxFailExpected.push(expected);
- }
-
- function peg$buildException(message, expected, pos) {
- function cleanupExpected(expected) {
- var i = 1;
-
- expected.sort(function(a, b) {
- if (a.description < b.description) {
- return -1;
- } else if (a.description > b.description) {
- return 1;
- } else {
- return 0;
- }
- });
-
- while (i < expected.length) {
- if (expected[i - 1] === expected[i]) {
- expected.splice(i, 1);
- } else {
- i++;
- }
- }
- }
-
- function buildMessage(expected, found) {
- function stringEscape(s) {
- function hex(ch) { return ch.charCodeAt(0).toString(16).toUpperCase(); }
-
- return s
- .replace(/\\/g, '\\\\')
- .replace(/"/g, '\\"')
- .replace(/\x08/g, '\\b')
- .replace(/\t/g, '\\t')
- .replace(/\n/g, '\\n')
- .replace(/\f/g, '\\f')
- .replace(/\r/g, '\\r')
- .replace(/[\x00-\x07\x0B\x0E\x0F]/g, function(ch) { return '\\x0' + hex(ch); })
- .replace(/[\x10-\x1F\x80-\xFF]/g, function(ch) { return '\\x' + hex(ch); })
- .replace(/[\u0180-\u0FFF]/g, function(ch) { return '\\u0' + hex(ch); })
- .replace(/[\u1080-\uFFFF]/g, function(ch) { return '\\u' + hex(ch); });
- }
-
- var expectedDescs = new Array(expected.length),
- expectedDesc, foundDesc, i;
-
- for (i = 0; i < expected.length; i++) {
- expectedDescs[i] = expected[i].description;
- }
-
- expectedDesc = expected.length > 1
- ? expectedDescs.slice(0, -1).join(", ")
- + " or "
- + expectedDescs[expected.length - 1]
- : expectedDescs[0];
-
- foundDesc = found ? "\"" + stringEscape(found) + "\"" : "end of input";
-
- return "Expected " + expectedDesc + " but " + foundDesc + " found.";
- }
-
- var posDetails = peg$computePosDetails(pos),
- found = pos < input.length ? input.charAt(pos) : null;
-
- if (expected !== null) {
- cleanupExpected(expected);
- }
-
- return new SyntaxError(
- message !== null ? message : buildMessage(expected, found),
- expected,
- found,
- pos,
- posDetails.line,
- posDetails.column
- );
- }
-
- function peg$parsestart() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 0,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseline();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseline();
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c1();
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseline() {
- var s0, s1, s2, s3, s4, s5, s6;
-
- var key = peg$currPos * 49 + 1,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parseexpression();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- s4 = [];
- s5 = peg$parsecomment();
- while (s5 !== peg$FAILED) {
- s4.push(s5);
- s5 = peg$parsecomment();
- }
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parseNL();
- if (s6 !== peg$FAILED) {
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parseNL();
- }
- } else {
- s5 = peg$c2;
- }
- if (s5 === peg$FAILED) {
- s5 = peg$parseEOF();
- }
- if (s5 !== peg$FAILED) {
- s1 = [s1, s2, s3, s4, s5];
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- if (s2 !== peg$FAILED) {
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- } else {
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseNL();
- if (s3 !== peg$FAILED) {
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseNL();
- }
- } else {
- s2 = peg$c2;
- }
- if (s2 === peg$FAILED) {
- s2 = peg$parseEOF();
- }
- if (s2 !== peg$FAILED) {
- s1 = [s1, s2];
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$parseNL();
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseexpression() {
- var s0;
-
- var key = peg$currPos * 49 + 2,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parsecomment();
- if (s0 === peg$FAILED) {
- s0 = peg$parsepath();
- if (s0 === peg$FAILED) {
- s0 = peg$parsetablearray();
- if (s0 === peg$FAILED) {
- s0 = peg$parseassignment();
- }
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsecomment() {
- var s0, s1, s2, s3, s4, s5;
-
- var key = peg$currPos * 49 + 3,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 35) {
- s1 = peg$c3;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c4); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$currPos;
- s4 = peg$currPos;
- peg$silentFails++;
- s5 = peg$parseNL();
- if (s5 === peg$FAILED) {
- s5 = peg$parseEOF();
- }
- peg$silentFails--;
- if (s5 === peg$FAILED) {
- s4 = peg$c5;
- } else {
- peg$currPos = s4;
- s4 = peg$c2;
- }
- if (s4 !== peg$FAILED) {
- if (input.length > peg$currPos) {
- s5 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s5 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- if (s5 !== peg$FAILED) {
- s4 = [s4, s5];
- s3 = s4;
- } else {
- peg$currPos = s3;
- s3 = peg$c2;
- }
- } else {
- peg$currPos = s3;
- s3 = peg$c2;
- }
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$currPos;
- s4 = peg$currPos;
- peg$silentFails++;
- s5 = peg$parseNL();
- if (s5 === peg$FAILED) {
- s5 = peg$parseEOF();
- }
- peg$silentFails--;
- if (s5 === peg$FAILED) {
- s4 = peg$c5;
- } else {
- peg$currPos = s4;
- s4 = peg$c2;
- }
- if (s4 !== peg$FAILED) {
- if (input.length > peg$currPos) {
- s5 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s5 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- if (s5 !== peg$FAILED) {
- s4 = [s4, s5];
- s3 = s4;
- } else {
- peg$currPos = s3;
- s3 = peg$c2;
- }
- } else {
- peg$currPos = s3;
- s3 = peg$c2;
- }
- }
- if (s2 !== peg$FAILED) {
- s1 = [s1, s2];
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsepath() {
- var s0, s1, s2, s3, s4, s5;
-
- var key = peg$currPos * 49 + 4,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 91) {
- s1 = peg$c7;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseS();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseS();
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$parsetable_key();
- if (s3 !== peg$FAILED) {
- s4 = [];
- s5 = peg$parseS();
- while (s5 !== peg$FAILED) {
- s4.push(s5);
- s5 = peg$parseS();
- }
- if (s4 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s5 = peg$c9;
- peg$currPos++;
- } else {
- s5 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c11(s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsetablearray() {
- var s0, s1, s2, s3, s4, s5, s6, s7;
-
- var key = peg$currPos * 49 + 5,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 91) {
- s1 = peg$c7;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s1 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 91) {
- s2 = peg$c7;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- s4 = peg$parsetable_key();
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parseS();
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parseS();
- }
- if (s5 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s6 = peg$c9;
- peg$currPos++;
- } else {
- s6 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s6 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s7 = peg$c9;
- peg$currPos++;
- } else {
- s7 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s7 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c12(s4);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsetable_key() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 6,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parsedot_ended_table_key_part();
- if (s2 !== peg$FAILED) {
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parsedot_ended_table_key_part();
- }
- } else {
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsetable_key_part();
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c13(s1, s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$parsetable_key_part();
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c14(s1);
- }
- s0 = s1;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsetable_key_part() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 7,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsekey();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c15(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsequoted_key();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c15(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsedot_ended_table_key_part() {
- var s0, s1, s2, s3, s4, s5, s6;
-
- var key = peg$currPos * 49 + 8,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsekey();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 46) {
- s4 = peg$c16;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parseS();
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parseS();
- }
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c15(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsequoted_key();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 46) {
- s4 = peg$c16;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parseS();
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parseS();
- }
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c15(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseassignment() {
- var s0, s1, s2, s3, s4, s5;
-
- var key = peg$currPos * 49 + 9,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$parsekey();
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseS();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseS();
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 61) {
- s3 = peg$c18;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c19); }
- }
- if (s3 !== peg$FAILED) {
- s4 = [];
- s5 = peg$parseS();
- while (s5 !== peg$FAILED) {
- s4.push(s5);
- s5 = peg$parseS();
- }
- if (s4 !== peg$FAILED) {
- s5 = peg$parsevalue();
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c20(s1, s5);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$parsequoted_key();
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseS();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseS();
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 61) {
- s3 = peg$c18;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c19); }
- }
- if (s3 !== peg$FAILED) {
- s4 = [];
- s5 = peg$parseS();
- while (s5 !== peg$FAILED) {
- s4.push(s5);
- s5 = peg$parseS();
- }
- if (s4 !== peg$FAILED) {
- s5 = peg$parsevalue();
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c20(s1, s5);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsekey() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 10,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseASCII_BASIC();
- if (s2 !== peg$FAILED) {
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseASCII_BASIC();
- }
- } else {
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c21(s1);
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsequoted_key() {
- var s0, s1;
-
- var key = peg$currPos * 49 + 11,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$parsedouble_quoted_single_line_string();
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c22(s1);
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$parsesingle_quoted_single_line_string();
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c22(s1);
- }
- s0 = s1;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsevalue() {
- var s0;
-
- var key = peg$currPos * 49 + 12,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parsestring();
- if (s0 === peg$FAILED) {
- s0 = peg$parsedatetime();
- if (s0 === peg$FAILED) {
- s0 = peg$parsefloat();
- if (s0 === peg$FAILED) {
- s0 = peg$parseinteger();
- if (s0 === peg$FAILED) {
- s0 = peg$parseboolean();
- if (s0 === peg$FAILED) {
- s0 = peg$parsearray();
- if (s0 === peg$FAILED) {
- s0 = peg$parseinline_table();
- }
- }
- }
- }
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsestring() {
- var s0;
-
- var key = peg$currPos * 49 + 13,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parsedouble_quoted_multiline_string();
- if (s0 === peg$FAILED) {
- s0 = peg$parsedouble_quoted_single_line_string();
- if (s0 === peg$FAILED) {
- s0 = peg$parsesingle_quoted_multiline_string();
- if (s0 === peg$FAILED) {
- s0 = peg$parsesingle_quoted_single_line_string();
- }
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsedouble_quoted_multiline_string() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 14,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 3) === peg$c23) {
- s1 = peg$c23;
- peg$currPos += 3;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c24); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parseNL();
- if (s2 === peg$FAILED) {
- s2 = peg$c25;
- }
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parsemultiline_string_char();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parsemultiline_string_char();
- }
- if (s3 !== peg$FAILED) {
- if (input.substr(peg$currPos, 3) === peg$c23) {
- s4 = peg$c23;
- peg$currPos += 3;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c24); }
- }
- if (s4 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c26(s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsedouble_quoted_single_line_string() {
- var s0, s1, s2, s3;
-
- var key = peg$currPos * 49 + 15,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 34) {
- s1 = peg$c27;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c28); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parsestring_char();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parsestring_char();
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 34) {
- s3 = peg$c27;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c28); }
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c26(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsesingle_quoted_multiline_string() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 16,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 3) === peg$c29) {
- s1 = peg$c29;
- peg$currPos += 3;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c30); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parseNL();
- if (s2 === peg$FAILED) {
- s2 = peg$c25;
- }
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parsemultiline_literal_char();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parsemultiline_literal_char();
- }
- if (s3 !== peg$FAILED) {
- if (input.substr(peg$currPos, 3) === peg$c29) {
- s4 = peg$c29;
- peg$currPos += 3;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c30); }
- }
- if (s4 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c26(s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsesingle_quoted_single_line_string() {
- var s0, s1, s2, s3;
-
- var key = peg$currPos * 49 + 17,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 39) {
- s1 = peg$c31;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c32); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseliteral_char();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseliteral_char();
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 39) {
- s3 = peg$c31;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c32); }
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c26(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsestring_char() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 18,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parseESCAPED();
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$currPos;
- peg$silentFails++;
- if (input.charCodeAt(peg$currPos) === 34) {
- s2 = peg$c27;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c28); }
- }
- peg$silentFails--;
- if (s2 === peg$FAILED) {
- s1 = peg$c5;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- if (input.length > peg$currPos) {
- s2 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c33(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseliteral_char() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 19,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$currPos;
- peg$silentFails++;
- if (input.charCodeAt(peg$currPos) === 39) {
- s2 = peg$c31;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c32); }
- }
- peg$silentFails--;
- if (s2 === peg$FAILED) {
- s1 = peg$c5;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- if (input.length > peg$currPos) {
- s2 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c33(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsemultiline_string_char() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 20,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parseESCAPED();
- if (s0 === peg$FAILED) {
- s0 = peg$parsemultiline_string_delim();
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$currPos;
- peg$silentFails++;
- if (input.substr(peg$currPos, 3) === peg$c23) {
- s2 = peg$c23;
- peg$currPos += 3;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c24); }
- }
- peg$silentFails--;
- if (s2 === peg$FAILED) {
- s1 = peg$c5;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- if (input.length > peg$currPos) {
- s2 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c34(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsemultiline_string_delim() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 21,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 92) {
- s1 = peg$c35;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c36); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parseNL();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseNLS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseNLS();
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c37();
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsemultiline_literal_char() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 22,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$currPos;
- peg$silentFails++;
- if (input.substr(peg$currPos, 3) === peg$c29) {
- s2 = peg$c29;
- peg$currPos += 3;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c30); }
- }
- peg$silentFails--;
- if (s2 === peg$FAILED) {
- s1 = peg$c5;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- if (input.length > peg$currPos) {
- s2 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c33(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsefloat() {
- var s0, s1, s2, s3;
-
- var key = peg$currPos * 49 + 23,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$parsefloat_text();
- if (s1 === peg$FAILED) {
- s1 = peg$parseinteger_text();
- }
- if (s1 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 101) {
- s2 = peg$c38;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c39); }
- }
- if (s2 === peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 69) {
- s2 = peg$c40;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c41); }
- }
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$parseinteger_text();
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c42(s1, s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$parsefloat_text();
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c43(s1);
- }
- s0 = s1;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsefloat_text() {
- var s0, s1, s2, s3, s4, s5;
-
- var key = peg$currPos * 49 + 24,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 43) {
- s1 = peg$c44;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c45); }
- }
- if (s1 === peg$FAILED) {
- s1 = peg$c25;
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$currPos;
- s3 = peg$parseDIGITS();
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 46) {
- s4 = peg$c16;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- if (s4 !== peg$FAILED) {
- s5 = peg$parseDIGITS();
- if (s5 !== peg$FAILED) {
- s3 = [s3, s4, s5];
- s2 = s3;
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c46(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 45) {
- s1 = peg$c47;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c48); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$currPos;
- s3 = peg$parseDIGITS();
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 46) {
- s4 = peg$c16;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- if (s4 !== peg$FAILED) {
- s5 = peg$parseDIGITS();
- if (s5 !== peg$FAILED) {
- s3 = [s3, s4, s5];
- s2 = s3;
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c49(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseinteger() {
- var s0, s1;
-
- var key = peg$currPos * 49 + 25,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$parseinteger_text();
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c50(s1);
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseinteger_text() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 26,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 43) {
- s1 = peg$c44;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c45); }
- }
- if (s1 === peg$FAILED) {
- s1 = peg$c25;
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseDIGIT_OR_UNDER();
- if (s3 !== peg$FAILED) {
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseDIGIT_OR_UNDER();
- }
- } else {
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$currPos;
- peg$silentFails++;
- if (input.charCodeAt(peg$currPos) === 46) {
- s4 = peg$c16;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- peg$silentFails--;
- if (s4 === peg$FAILED) {
- s3 = peg$c5;
- } else {
- peg$currPos = s3;
- s3 = peg$c2;
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c46(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 45) {
- s1 = peg$c47;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c48); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseDIGIT_OR_UNDER();
- if (s3 !== peg$FAILED) {
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseDIGIT_OR_UNDER();
- }
- } else {
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$currPos;
- peg$silentFails++;
- if (input.charCodeAt(peg$currPos) === 46) {
- s4 = peg$c16;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- peg$silentFails--;
- if (s4 === peg$FAILED) {
- s3 = peg$c5;
- } else {
- peg$currPos = s3;
- s3 = peg$c2;
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c49(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseboolean() {
- var s0, s1;
-
- var key = peg$currPos * 49 + 27,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 4) === peg$c51) {
- s1 = peg$c51;
- peg$currPos += 4;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c52); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c53();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 5) === peg$c54) {
- s1 = peg$c54;
- peg$currPos += 5;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c55); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c56();
- }
- s0 = s1;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsearray() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 28,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 91) {
- s1 = peg$c7;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parsearray_sep();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parsearray_sep();
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s3 = peg$c9;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c57();
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 91) {
- s1 = peg$c7;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsearray_value();
- if (s2 === peg$FAILED) {
- s2 = peg$c25;
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s3 = peg$c9;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c58(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 91) {
- s1 = peg$c7;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parsearray_value_list();
- if (s3 !== peg$FAILED) {
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parsearray_value_list();
- }
- } else {
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s3 = peg$c9;
- peg$currPos++;
- } else {
- s3 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c59(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 91) {
- s1 = peg$c7;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c8); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parsearray_value_list();
- if (s3 !== peg$FAILED) {
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parsearray_value_list();
- }
- } else {
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$parsearray_value();
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 93) {
- s4 = peg$c9;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c10); }
- }
- if (s4 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c60(s2, s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsearray_value() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 29,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parsearray_sep();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parsearray_sep();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsevalue();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parsearray_sep();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parsearray_sep();
- }
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c61(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsearray_value_list() {
- var s0, s1, s2, s3, s4, s5, s6;
-
- var key = peg$currPos * 49 + 30,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parsearray_sep();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parsearray_sep();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsevalue();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parsearray_sep();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parsearray_sep();
- }
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 44) {
- s4 = peg$c62;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c63); }
- }
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parsearray_sep();
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parsearray_sep();
- }
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c61(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsearray_sep() {
- var s0;
-
- var key = peg$currPos * 49 + 31,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parseS();
- if (s0 === peg$FAILED) {
- s0 = peg$parseNL();
- if (s0 === peg$FAILED) {
- s0 = peg$parsecomment();
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseinline_table() {
- var s0, s1, s2, s3, s4, s5;
-
- var key = peg$currPos * 49 + 32,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 123) {
- s1 = peg$c64;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c65); }
- }
- if (s1 !== peg$FAILED) {
- s2 = [];
- s3 = peg$parseS();
- while (s3 !== peg$FAILED) {
- s2.push(s3);
- s3 = peg$parseS();
- }
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseinline_table_assignment();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseinline_table_assignment();
- }
- if (s3 !== peg$FAILED) {
- s4 = [];
- s5 = peg$parseS();
- while (s5 !== peg$FAILED) {
- s4.push(s5);
- s5 = peg$parseS();
- }
- if (s4 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 125) {
- s5 = peg$c66;
- peg$currPos++;
- } else {
- s5 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c67); }
- }
- if (s5 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c68(s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseinline_table_assignment() {
- var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
-
- var key = peg$currPos * 49 + 33,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsekey();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 61) {
- s4 = peg$c18;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c19); }
- }
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parseS();
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parseS();
- }
- if (s5 !== peg$FAILED) {
- s6 = peg$parsevalue();
- if (s6 !== peg$FAILED) {
- s7 = [];
- s8 = peg$parseS();
- while (s8 !== peg$FAILED) {
- s7.push(s8);
- s8 = peg$parseS();
- }
- if (s7 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 44) {
- s8 = peg$c62;
- peg$currPos++;
- } else {
- s8 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c63); }
- }
- if (s8 !== peg$FAILED) {
- s9 = [];
- s10 = peg$parseS();
- while (s10 !== peg$FAILED) {
- s9.push(s10);
- s10 = peg$parseS();
- }
- if (s9 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c69(s2, s6);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseS();
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseS();
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parsekey();
- if (s2 !== peg$FAILED) {
- s3 = [];
- s4 = peg$parseS();
- while (s4 !== peg$FAILED) {
- s3.push(s4);
- s4 = peg$parseS();
- }
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 61) {
- s4 = peg$c18;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c19); }
- }
- if (s4 !== peg$FAILED) {
- s5 = [];
- s6 = peg$parseS();
- while (s6 !== peg$FAILED) {
- s5.push(s6);
- s6 = peg$parseS();
- }
- if (s5 !== peg$FAILED) {
- s6 = peg$parsevalue();
- if (s6 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c69(s2, s6);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsesecfragment() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 34,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 46) {
- s1 = peg$c16;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c17); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$parseDIGITS();
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c70(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsedate() {
- var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11;
-
- var key = peg$currPos * 49 + 35,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$currPos;
- s2 = peg$parseDIGIT_OR_UNDER();
- if (s2 !== peg$FAILED) {
- s3 = peg$parseDIGIT_OR_UNDER();
- if (s3 !== peg$FAILED) {
- s4 = peg$parseDIGIT_OR_UNDER();
- if (s4 !== peg$FAILED) {
- s5 = peg$parseDIGIT_OR_UNDER();
- if (s5 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 45) {
- s6 = peg$c47;
- peg$currPos++;
- } else {
- s6 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c48); }
- }
- if (s6 !== peg$FAILED) {
- s7 = peg$parseDIGIT_OR_UNDER();
- if (s7 !== peg$FAILED) {
- s8 = peg$parseDIGIT_OR_UNDER();
- if (s8 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 45) {
- s9 = peg$c47;
- peg$currPos++;
- } else {
- s9 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c48); }
- }
- if (s9 !== peg$FAILED) {
- s10 = peg$parseDIGIT_OR_UNDER();
- if (s10 !== peg$FAILED) {
- s11 = peg$parseDIGIT_OR_UNDER();
- if (s11 !== peg$FAILED) {
- s2 = [s2, s3, s4, s5, s6, s7, s8, s9, s10, s11];
- s1 = s2;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c71(s1);
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsetime() {
- var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
-
- var key = peg$currPos * 49 + 36,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$currPos;
- s2 = peg$parseDIGIT_OR_UNDER();
- if (s2 !== peg$FAILED) {
- s3 = peg$parseDIGIT_OR_UNDER();
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 58) {
- s4 = peg$c72;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c73); }
- }
- if (s4 !== peg$FAILED) {
- s5 = peg$parseDIGIT_OR_UNDER();
- if (s5 !== peg$FAILED) {
- s6 = peg$parseDIGIT_OR_UNDER();
- if (s6 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 58) {
- s7 = peg$c72;
- peg$currPos++;
- } else {
- s7 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c73); }
- }
- if (s7 !== peg$FAILED) {
- s8 = peg$parseDIGIT_OR_UNDER();
- if (s8 !== peg$FAILED) {
- s9 = peg$parseDIGIT_OR_UNDER();
- if (s9 !== peg$FAILED) {
- s10 = peg$parsesecfragment();
- if (s10 === peg$FAILED) {
- s10 = peg$c25;
- }
- if (s10 !== peg$FAILED) {
- s2 = [s2, s3, s4, s5, s6, s7, s8, s9, s10];
- s1 = s2;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c74(s1);
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsetime_with_offset() {
- var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16;
-
- var key = peg$currPos * 49 + 37,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$currPos;
- s2 = peg$parseDIGIT_OR_UNDER();
- if (s2 !== peg$FAILED) {
- s3 = peg$parseDIGIT_OR_UNDER();
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 58) {
- s4 = peg$c72;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c73); }
- }
- if (s4 !== peg$FAILED) {
- s5 = peg$parseDIGIT_OR_UNDER();
- if (s5 !== peg$FAILED) {
- s6 = peg$parseDIGIT_OR_UNDER();
- if (s6 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 58) {
- s7 = peg$c72;
- peg$currPos++;
- } else {
- s7 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c73); }
- }
- if (s7 !== peg$FAILED) {
- s8 = peg$parseDIGIT_OR_UNDER();
- if (s8 !== peg$FAILED) {
- s9 = peg$parseDIGIT_OR_UNDER();
- if (s9 !== peg$FAILED) {
- s10 = peg$parsesecfragment();
- if (s10 === peg$FAILED) {
- s10 = peg$c25;
- }
- if (s10 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 45) {
- s11 = peg$c47;
- peg$currPos++;
- } else {
- s11 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c48); }
- }
- if (s11 === peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 43) {
- s11 = peg$c44;
- peg$currPos++;
- } else {
- s11 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c45); }
- }
- }
- if (s11 !== peg$FAILED) {
- s12 = peg$parseDIGIT_OR_UNDER();
- if (s12 !== peg$FAILED) {
- s13 = peg$parseDIGIT_OR_UNDER();
- if (s13 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 58) {
- s14 = peg$c72;
- peg$currPos++;
- } else {
- s14 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c73); }
- }
- if (s14 !== peg$FAILED) {
- s15 = peg$parseDIGIT_OR_UNDER();
- if (s15 !== peg$FAILED) {
- s16 = peg$parseDIGIT_OR_UNDER();
- if (s16 !== peg$FAILED) {
- s2 = [s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16];
- s1 = s2;
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- } else {
- peg$currPos = s1;
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c74(s1);
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parsedatetime() {
- var s0, s1, s2, s3, s4;
-
- var key = peg$currPos * 49 + 38,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = peg$parsedate();
- if (s1 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 84) {
- s2 = peg$c75;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c76); }
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$parsetime();
- if (s3 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 90) {
- s4 = peg$c77;
- peg$currPos++;
- } else {
- s4 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c78); }
- }
- if (s4 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c79(s1, s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- s1 = peg$parsedate();
- if (s1 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 84) {
- s2 = peg$c75;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c76); }
- }
- if (s2 !== peg$FAILED) {
- s3 = peg$parsetime_with_offset();
- if (s3 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c80(s1, s3);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseS() {
- var s0;
-
- var key = peg$currPos * 49 + 39,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- if (peg$c81.test(input.charAt(peg$currPos))) {
- s0 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s0 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c82); }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseNL() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 40,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- if (input.charCodeAt(peg$currPos) === 10) {
- s0 = peg$c83;
- peg$currPos++;
- } else {
- s0 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c84); }
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 13) {
- s1 = peg$c85;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c86); }
- }
- if (s1 !== peg$FAILED) {
- if (input.charCodeAt(peg$currPos) === 10) {
- s2 = peg$c83;
- peg$currPos++;
- } else {
- s2 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c84); }
- }
- if (s2 !== peg$FAILED) {
- s1 = [s1, s2];
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseNLS() {
- var s0;
-
- var key = peg$currPos * 49 + 41,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$parseNL();
- if (s0 === peg$FAILED) {
- s0 = peg$parseS();
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseEOF() {
- var s0, s1;
-
- var key = peg$currPos * 49 + 42,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- peg$silentFails++;
- if (input.length > peg$currPos) {
- s1 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c6); }
- }
- peg$silentFails--;
- if (s1 === peg$FAILED) {
- s0 = peg$c5;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseHEX() {
- var s0;
-
- var key = peg$currPos * 49 + 43,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- if (peg$c87.test(input.charAt(peg$currPos))) {
- s0 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s0 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c88); }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseDIGIT_OR_UNDER() {
- var s0, s1;
-
- var key = peg$currPos * 49 + 44,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- if (peg$c89.test(input.charAt(peg$currPos))) {
- s0 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s0 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c90); }
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.charCodeAt(peg$currPos) === 95) {
- s1 = peg$c91;
- peg$currPos++;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c92); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c93();
- }
- s0 = s1;
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseASCII_BASIC() {
- var s0;
-
- var key = peg$currPos * 49 + 45,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- if (peg$c94.test(input.charAt(peg$currPos))) {
- s0 = input.charAt(peg$currPos);
- peg$currPos++;
- } else {
- s0 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c95); }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseDIGITS() {
- var s0, s1, s2;
-
- var key = peg$currPos * 49 + 46,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- s1 = [];
- s2 = peg$parseDIGIT_OR_UNDER();
- if (s2 !== peg$FAILED) {
- while (s2 !== peg$FAILED) {
- s1.push(s2);
- s2 = peg$parseDIGIT_OR_UNDER();
- }
- } else {
- s1 = peg$c2;
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c96(s1);
- }
- s0 = s1;
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseESCAPED() {
- var s0, s1;
-
- var key = peg$currPos * 49 + 47,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c97) {
- s1 = peg$c97;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c98); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c99();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c100) {
- s1 = peg$c100;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c101); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c102();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c103) {
- s1 = peg$c103;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c104); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c105();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c106) {
- s1 = peg$c106;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c107); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c108();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c109) {
- s1 = peg$c109;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c110); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c111();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c112) {
- s1 = peg$c112;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c113); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c114();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c115) {
- s1 = peg$c115;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c116); }
- }
- if (s1 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c117();
- }
- s0 = s1;
- if (s0 === peg$FAILED) {
- s0 = peg$parseESCAPED_UNICODE();
- }
- }
- }
- }
- }
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
- function peg$parseESCAPED_UNICODE() {
- var s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
-
- var key = peg$currPos * 49 + 48,
- cached = peg$cache[key];
-
- if (cached) {
- peg$currPos = cached.nextPos;
- return cached.result;
- }
-
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c118) {
- s1 = peg$c118;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c119); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$currPos;
- s3 = peg$parseHEX();
- if (s3 !== peg$FAILED) {
- s4 = peg$parseHEX();
- if (s4 !== peg$FAILED) {
- s5 = peg$parseHEX();
- if (s5 !== peg$FAILED) {
- s6 = peg$parseHEX();
- if (s6 !== peg$FAILED) {
- s7 = peg$parseHEX();
- if (s7 !== peg$FAILED) {
- s8 = peg$parseHEX();
- if (s8 !== peg$FAILED) {
- s9 = peg$parseHEX();
- if (s9 !== peg$FAILED) {
- s10 = peg$parseHEX();
- if (s10 !== peg$FAILED) {
- s3 = [s3, s4, s5, s6, s7, s8, s9, s10];
- s2 = s3;
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c120(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- if (s0 === peg$FAILED) {
- s0 = peg$currPos;
- if (input.substr(peg$currPos, 2) === peg$c121) {
- s1 = peg$c121;
- peg$currPos += 2;
- } else {
- s1 = peg$FAILED;
- if (peg$silentFails === 0) { peg$fail(peg$c122); }
- }
- if (s1 !== peg$FAILED) {
- s2 = peg$currPos;
- s3 = peg$parseHEX();
- if (s3 !== peg$FAILED) {
- s4 = peg$parseHEX();
- if (s4 !== peg$FAILED) {
- s5 = peg$parseHEX();
- if (s5 !== peg$FAILED) {
- s6 = peg$parseHEX();
- if (s6 !== peg$FAILED) {
- s3 = [s3, s4, s5, s6];
- s2 = s3;
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- } else {
- peg$currPos = s2;
- s2 = peg$c2;
- }
- if (s2 !== peg$FAILED) {
- peg$reportedPos = s0;
- s1 = peg$c120(s2);
- s0 = s1;
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- } else {
- peg$currPos = s0;
- s0 = peg$c2;
- }
- }
-
- peg$cache[key] = { nextPos: peg$currPos, result: s0 };
-
- return s0;
- }
-
-
- var nodes = [];
-
- function genError(err, line, col) {
- var ex = new Error(err);
- ex.line = line;
- ex.column = col;
- throw ex;
- }
-
- function addNode(node) {
- nodes.push(node);
- }
-
- function node(type, value, line, column, key) {
- var obj = { type: type, value: value, line: line(), column: column() };
- if (key) obj.key = key;
- return obj;
- }
-
- function convertCodePoint(str, line, col) {
- var num = parseInt("0x" + str);
-
- if (
- !isFinite(num) ||
- Math.floor(num) != num ||
- num < 0 ||
- num > 0x10FFFF ||
- (num > 0xD7FF && num < 0xE000)
- ) {
- genError("Invalid Unicode escape code: " + str, line, col);
- } else {
- return fromCodePoint(num);
- }
- }
-
- function fromCodePoint() {
- var MAX_SIZE = 0x4000;
- var codeUnits = [];
- var highSurrogate;
- var lowSurrogate;
- var index = -1;
- var length = arguments.length;
- if (!length) {
- return '';
- }
- var result = '';
- while (++index < length) {
- var codePoint = Number(arguments[index]);
- if (codePoint <= 0xFFFF) { // BMP code point
- codeUnits.push(codePoint);
- } else { // Astral code point; split in surrogate halves
- // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
- codePoint -= 0x10000;
- highSurrogate = (codePoint >> 10) + 0xD800;
- lowSurrogate = (codePoint % 0x400) + 0xDC00;
- codeUnits.push(highSurrogate, lowSurrogate);
- }
- if (index + 1 == length || codeUnits.length > MAX_SIZE) {
- result += String.fromCharCode.apply(null, codeUnits);
- codeUnits.length = 0;
- }
- }
- return result;
- }
-
-
- peg$result = peg$startRuleFunction();
-
- if (peg$result !== peg$FAILED && peg$currPos === input.length) {
- return peg$result;
- } else {
- if (peg$result !== peg$FAILED && peg$currPos < input.length) {
- peg$fail({ type: "end", description: "end of input" });
- }
-
- throw peg$buildException(null, peg$maxFailExpected, peg$maxFailPos);
- }
- }
-
- return {
- SyntaxError: SyntaxError,
- parse: parse
- };
-})();
diff --git a/node_modules/toml/package.json b/node_modules/toml/package.json
deleted file mode 100644
index 865551c2772f117aa06462fc6a0a0bbbacae126c..0000000000000000000000000000000000000000
--- a/node_modules/toml/package.json
+++ /dev/null
@@ -1,99 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "toml",
- "raw": "toml@^2.3.2",
- "rawSpec": "^2.3.2",
- "scope": null,
- "spec": ">=2.3.2 <3.0.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/gray-matter"
- ]
- ],
- "_from": "toml@>=2.3.2 <3.0.0",
- "_hasShrinkwrap": false,
- "_id": "toml@2.3.6",
- "_inCache": true,
- "_installable": true,
- "_location": "/toml",
- "_nodeVersion": "10.12.0",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/toml_2.3.6_1548879743559_0.6752333339904679"
- },
- "_npmUser": {
- "email": "michelle@michelletilley.net",
- "name": "binarymuse"
- },
- "_npmVersion": "6.7.0",
- "_phantomChildren": {},
- "_requested": {
- "name": "toml",
- "raw": "toml@^2.3.2",
- "rawSpec": "^2.3.2",
- "scope": null,
- "spec": ">=2.3.2 <3.0.0",
- "type": "range"
- },
- "_requiredBy": [
- "/gray-matter"
- ],
- "_resolved": "https://registry.npmjs.org/toml/-/toml-2.3.6.tgz",
- "_shasum": "25b0866483a9722474895559088b436fd11f861b",
- "_shrinkwrap": null,
- "_spec": "toml@^2.3.2",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/gray-matter",
- "author": {
- "email": "michelle@michelletilley.net",
- "name": "Michelle Tilley"
- },
- "bugs": {
- "url": "https://github.com/BinaryMuse/toml-node/issues"
- },
- "dependencies": {},
- "description": "TOML parser for Node.js (parses TOML spec v0.4.0)",
- "devDependencies": {
- "jshint": "*",
- "nodeunit": "~0.9.0",
- "pegjs": "~0.8.0"
- },
- "directories": {},
- "dist": {
- "fileCount": 24,
- "integrity": "sha512-gVweAectJU3ebq//Ferr2JUY4WKSDe5N+z0FvjDncLGyHmIDoxgY/2Ie4qfEIDm4IS7OA6Rmdm7pdEEdMcV/xQ==",
- "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJcUgeACRA9TVsSAnZWagAAmmsP/RcMyQZbzZfrl9NiHVEY\nDKAt5UGWq1k+QqeYjZeRlnyx5pFoEbZkErnXFPCwrVVC4V0g2dIXBcOXFGoj\nUSAYnJ08iUdMMyTsHhdfjto6oKvAXy9EuZ03g7UXHqpMgi1fw3TemW2e6Ixb\nSBBxukr7FCPdk98eEvWXqL3+AvEh/DCKGopSeQ5KPsJj7GiayomB8kUnPDEe\nJQY6uj29ub8mLRO1893RVXNKcuoF0O7DMUmtmtAk2TAQtMhLlCYTzkUiIMiv\nH29PeekJGkmqSRg8P+78MwoH6zAJCfaFjPCcynLdr9HZAfUcNR024tucbWtm\naj07N2FOGIsgejsmca2n75SbRQFl6IPm0m3m/Sk9yMXv+C8mLxaJOfrAbSGR\nl5VdfzF+BvlhJNbuA40b88l83OsikZN2V33IQ6YoC6kBn5urMH6naedOXw6b\nHN26eNPpNeLzi7XJeJmn/BtuqgqZTqOV2xI7B9DpBwc/3dnlkvkQ/hbgDoXz\nD7FEWYMqDr/EFnvHc8WxOsUHH7tIfU+IE8BAdcmw3J5NS32OGad9GV9o8M4A\n497J71qBlR4/px5Uvok/NyhB0e+V+SaJRscPMqfVoZgNC2hJrWtLUzEUadMm\nF59QfNEX/9Q1ZQnkiTUps2r/rHlYyyd2bJ98MryzK5W9LyT8z1vxWUpw8niR\n0DKz\r\n=l9TM\r\n-----END PGP SIGNATURE-----\r\n",
- "shasum": "25b0866483a9722474895559088b436fd11f861b",
- "tarball": "https://registry.npmjs.org/toml/-/toml-2.3.6.tgz",
- "unpackedSize": 143652
- },
- "gitHead": "0527511448c461e1ddbbb6b8579227783f905f05",
- "homepage": "https://github.com/BinaryMuse/toml-node#readme",
- "keywords": [
- "toml",
- "parser"
- ],
- "license": "MIT",
- "main": "index.js",
- "maintainers": [
- {
- "email": "brandon@brandontilley.com",
- "name": "binarymuse"
- }
- ],
- "name": "toml",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/BinaryMuse/toml-node.git"
- },
- "scripts": {
- "build": "pegjs --cache src/toml.pegjs lib/parser.js",
- "prepublish": "npm run build",
- "test": "jshint lib/compiler.js && nodeunit test/test_*.js"
- },
- "types": "index.d.ts",
- "version": "2.3.6"
-}
diff --git a/node_modules/toml/src/toml.pegjs b/node_modules/toml/src/toml.pegjs
deleted file mode 100644
index 70517078247683de55d2ee1e6e1bb5a3e3bb6090..0000000000000000000000000000000000000000
--- a/node_modules/toml/src/toml.pegjs
+++ /dev/null
@@ -1,231 +0,0 @@
-{
- var nodes = [];
-
- function genError(err, line, col) {
- var ex = new Error(err);
- ex.line = line;
- ex.column = col;
- throw ex;
- }
-
- function addNode(node) {
- nodes.push(node);
- }
-
- function node(type, value, line, column, key) {
- var obj = { type: type, value: value, line: line(), column: column() };
- if (key) obj.key = key;
- return obj;
- }
-
- function convertCodePoint(str, line, col) {
- var num = parseInt("0x" + str);
-
- if (
- !isFinite(num) ||
- Math.floor(num) != num ||
- num < 0 ||
- num > 0x10FFFF ||
- (num > 0xD7FF && num < 0xE000)
- ) {
- genError("Invalid Unicode escape code: " + str, line, col);
- } else {
- return fromCodePoint(num);
- }
- }
-
- function fromCodePoint() {
- var MAX_SIZE = 0x4000;
- var codeUnits = [];
- var highSurrogate;
- var lowSurrogate;
- var index = -1;
- var length = arguments.length;
- if (!length) {
- return '';
- }
- var result = '';
- while (++index < length) {
- var codePoint = Number(arguments[index]);
- if (codePoint <= 0xFFFF) { // BMP code point
- codeUnits.push(codePoint);
- } else { // Astral code point; split in surrogate halves
- // http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
- codePoint -= 0x10000;
- highSurrogate = (codePoint >> 10) + 0xD800;
- lowSurrogate = (codePoint % 0x400) + 0xDC00;
- codeUnits.push(highSurrogate, lowSurrogate);
- }
- if (index + 1 == length || codeUnits.length > MAX_SIZE) {
- result += String.fromCharCode.apply(null, codeUnits);
- codeUnits.length = 0;
- }
- }
- return result;
- }
-}
-
-start
- = line* { return nodes }
-
-line
- = S* expr:expression S* comment* (NL+ / EOF)
- / S+ (NL+ / EOF)
- / NL
-
-expression
- = comment / path / tablearray / assignment
-
-comment
- = '#' (!(NL / EOF) .)*
-
-path
- = '[' S* name:table_key S* ']' { addNode(node('ObjectPath', name, line, column)) }
-
-tablearray
- = '[' '[' S* name:table_key S* ']' ']' { addNode(node('ArrayPath', name, line, column)) }
-
-table_key
- = parts:dot_ended_table_key_part+ name:table_key_part { return parts.concat(name) }
- / name:table_key_part { return [name] }
-
-table_key_part
- = S* name:key S* { return name }
- / S* name:quoted_key S* { return name }
-
-dot_ended_table_key_part
- = S* name:key S* '.' S* { return name }
- / S* name:quoted_key S* '.' S* { return name }
-
-assignment
- = key:key S* '=' S* value:value { addNode(node('Assign', value, line, column, key)) }
- / key:quoted_key S* '=' S* value:value { addNode(node('Assign', value, line, column, key)) }
-
-key
- = chars:ASCII_BASIC+ { return chars.join('') }
-
-quoted_key
- = node:double_quoted_single_line_string { return node.value }
- / node:single_quoted_single_line_string { return node.value }
-
-value
- = string / datetime / float / integer / boolean / array / inline_table
-
-string
- = double_quoted_multiline_string
- / double_quoted_single_line_string
- / single_quoted_multiline_string
- / single_quoted_single_line_string
-
-double_quoted_multiline_string
- = '"""' NL? chars:multiline_string_char* '"""' { return node('String', chars.join(''), line, column) }
-double_quoted_single_line_string
- = '"' chars:string_char* '"' { return node('String', chars.join(''), line, column) }
-single_quoted_multiline_string
- = "'''" NL? chars:multiline_literal_char* "'''" { return node('String', chars.join(''), line, column) }
-single_quoted_single_line_string
- = "'" chars:literal_char* "'" { return node('String', chars.join(''), line, column) }
-
-string_char
- = ESCAPED / (!'"' char:. { return char })
-
-literal_char
- = (!"'" char:. { return char })
-
-multiline_string_char
- = ESCAPED / multiline_string_delim / (!'"""' char:. { return char})
-
-multiline_string_delim
- = '\\' NL NLS* { return '' }
-
-multiline_literal_char
- = (!"'''" char:. { return char })
-
-float
- = left:(float_text / integer_text) ('e' / 'E') right:integer_text { return node('Float', parseFloat(left + 'e' + right), line, column) }
- / text:float_text { return node('Float', parseFloat(text), line, column) }
-
-float_text
- = '+'? digits:(DIGITS '.' DIGITS) { return digits.join('') }
- / '-' digits:(DIGITS '.' DIGITS) { return '-' + digits.join('') }
-
-integer
- = text:integer_text { return node('Integer', parseInt(text, 10), line, column) }
-
-integer_text
- = '+'? digits:DIGIT+ !'.' { return digits.join('') }
- / '-' digits:DIGIT+ !'.' { return '-' + digits.join('') }
-
-boolean
- = 'true' { return node('Boolean', true, line, column) }
- / 'false' { return node('Boolean', false, line, column) }
-
-array
- = '[' array_sep* ']' { return node('Array', [], line, column) }
- / '[' value:array_value? ']' { return node('Array', value ? [value] : [], line, column) }
- / '[' values:array_value_list+ ']' { return node('Array', values, line, column) }
- / '[' values:array_value_list+ value:array_value ']' { return node('Array', values.concat(value), line, column) }
-
-array_value
- = array_sep* value:value array_sep* { return value }
-
-array_value_list
- = array_sep* value:value array_sep* ',' array_sep* { return value }
-
-array_sep
- = S / NL / comment
-
-inline_table
- = '{' S* values:inline_table_assignment* S* '}' { return node('InlineTable', values, line, column) }
-
-inline_table_assignment
- = S* key:key S* '=' S* value:value S* ',' S* { return node('InlineTableValue', value, line, column, key) }
- / S* key:key S* '=' S* value:value { return node('InlineTableValue', value, line, column, key) }
-
-secfragment
- = '.' digits:DIGITS { return "." + digits }
-
-date
- = date:(
- DIGIT DIGIT DIGIT DIGIT
- '-'
- DIGIT DIGIT
- '-'
- DIGIT DIGIT
- ) { return date.join('') }
-
-time
- = time:(DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT secfragment?) { return time.join('') }
-
-time_with_offset
- = time:(
- DIGIT DIGIT ':' DIGIT DIGIT ':' DIGIT DIGIT secfragment?
- ('-' / '+')
- DIGIT DIGIT ':' DIGIT DIGIT
- ) { return time.join('') }
-
-datetime
- = date:date 'T' time:time 'Z' { return node('Date', new Date(date + "T" + time + "Z"), line, column) }
- / date:date 'T' time:time_with_offset { return node('Date', new Date(date + "T" + time), line, column) }
-
-
-S = [ \t]
-NL = "\n" / "\r" "\n"
-NLS = NL / S
-EOF = !.
-HEX = [0-9a-f]i
-DIGIT = DIGIT_OR_UNDER
-DIGIT_OR_UNDER = [0-9]
- / '_' { return "" }
-ASCII_BASIC = [A-Za-z0-9_\-]
-DIGITS = d:DIGIT_OR_UNDER+ { return d.join('') }
-ESCAPED = '\\"' { return '"' }
- / '\\\\' { return '\\' }
- / '\\b' { return '\b' }
- / '\\t' { return '\t' }
- / '\\n' { return '\n' }
- / '\\f' { return '\f' }
- / '\\r' { return '\r' }
- / ESCAPED_UNICODE
-ESCAPED_UNICODE = "\\U" digits:(HEX HEX HEX HEX HEX HEX HEX HEX) { return convertCodePoint(digits.join('')) }
- / "\\u" digits:(HEX HEX HEX HEX) { return convertCodePoint(digits.join('')) }
diff --git a/node_modules/toml/test/bad.toml b/node_modules/toml/test/bad.toml
deleted file mode 100644
index d51c3f310deda5be034f53984aaf319091e20f57..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/bad.toml
+++ /dev/null
@@ -1,5 +0,0 @@
-[something]
-awesome = "this is"
-
-[something.awesome]
-this = "isn't"
diff --git a/node_modules/toml/test/example.toml b/node_modules/toml/test/example.toml
deleted file mode 100644
index ea9dc35d3bf6453127bb1cb40068d3a312d6d6f9..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/example.toml
+++ /dev/null
@@ -1,32 +0,0 @@
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\n\tLikes \"tater tots\" and beer and backslashes: \\"
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8003 ]
-connection_max = 5000
-connection_min = -2 # Don't ask me how
-max_temp = 87.1 # It's a float
-min_temp = -17.76
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
diff --git a/node_modules/toml/test/hard_example.toml b/node_modules/toml/test/hard_example.toml
deleted file mode 100644
index 38856c8737a8e8066b1204d85b676883e2a292c2..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/hard_example.toml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Test file for TOML
-# Only this one tries to emulate a TOML file written by a user of the kind of parser writers probably hate
-# This part you'll really hate
-
-[the]
-test_string = "You'll hate me after this - #" # " Annoying, isn't it?
-
- [the.hard]
- test_array = [ "] ", " # "] # ] There you go, parse this!
- test_array2 = [ "Test #11 ]proved that", "Experiment #9 was a success" ]
- # You didn't think it'd as easy as chucking out the last #, did you?
- another_test_string = " Same thing, but with a string #"
- harder_test_string = " And when \"'s are in the string, along with # \"" # "and comments are there too"
- # Things will get harder
-
- [the.hard."bit#"]
- "what?" = "You don't think some user won't do that?"
- multi_line_array = [
- "]",
- # ] Oh yes I did
- ]
-
-# Each of the following keygroups/key value pairs should produce an error. Uncomment to them to test
-
-#[error] if you didn't catch this, your parser is broken
-#string = "Anything other than tabs, spaces and newline after a keygroup or key value pair has ended should produce an error unless it is a comment" like this
-#array = [
-# "This might most likely happen in multiline arrays",
-# Like here,
-# "or here,
-# and here"
-# ] End of array comment, forgot the #
-#number = 3.14 pi <--again forgot the #
diff --git a/node_modules/toml/test/inline_tables.toml b/node_modules/toml/test/inline_tables.toml
deleted file mode 100644
index c91088eecff5e601319f6e9548fed9911d12611c..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/inline_tables.toml
+++ /dev/null
@@ -1,10 +0,0 @@
-name = { first = "Tom", last = "Preston-Werner" }
-point = { x = 1, y = 2 }
-nested = { x = { a = { b = 3 } } }
-
-points = [ { x = 1, y = 2, z = 3 },
- { x = 7, y = 8, z = 9 },
- { x = 2, y = 4, z = 8 } ]
-
-arrays = [ { x = [1, 2, 3], y = [4, 5, 6] },
- { x = [7, 8, 9], y = [0, 1, 2] } ]
diff --git a/node_modules/toml/test/literal_strings.toml b/node_modules/toml/test/literal_strings.toml
deleted file mode 100644
index 36772bb6e796c880c0e561005bcfee7bdfaaea1d..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/literal_strings.toml
+++ /dev/null
@@ -1,5 +0,0 @@
-# What you see is what you get.
-winpath = 'C:\Users\nodejs\templates'
-winpath2 = '\\ServerX\admin$\system32\'
-quoted = 'Tom "Dubs" Preston-Werner'
-regex = '<\i\c*\s*>'
diff --git a/node_modules/toml/test/multiline_eat_whitespace.toml b/node_modules/toml/test/multiline_eat_whitespace.toml
deleted file mode 100644
index 904c1707614cff08f5cab7d6021998d3f521f248..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/multiline_eat_whitespace.toml
+++ /dev/null
@@ -1,15 +0,0 @@
-# The following strings are byte-for-byte equivalent:
-key1 = "The quick brown fox jumps over the lazy dog."
-
-key2 = """
-The quick brown \
-
-
- fox jumps over \
- the lazy dog."""
-
-key3 = """\
- The quick brown \
- fox jumps over \
- the lazy dog.\
- """
diff --git a/node_modules/toml/test/multiline_literal_strings.toml b/node_modules/toml/test/multiline_literal_strings.toml
deleted file mode 100644
index bc88494c4dd4540ad0acd3e785454155300d1eba..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/multiline_literal_strings.toml
+++ /dev/null
@@ -1,7 +0,0 @@
-regex2 = '''I [dw]on't need \d{2} apples'''
-lines = '''
-The first newline is
-trimmed in raw strings.
- All other whitespace
- is preserved.
-'''
diff --git a/node_modules/toml/test/multiline_strings.toml b/node_modules/toml/test/multiline_strings.toml
deleted file mode 100644
index 6eb8c45af45b6d14c5e81cab9a198d393549e5c9..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/multiline_strings.toml
+++ /dev/null
@@ -1,6 +0,0 @@
-# The following strings are byte-for-byte equivalent:
-key1 = "One\nTwo"
-key2 = """One\nTwo"""
-key3 = """
-One
-Two"""
diff --git a/node_modules/toml/test/smoke.js b/node_modules/toml/test/smoke.js
deleted file mode 100644
index 7769f9c4f6802e5ce8f0692a8249040a7233d651..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/smoke.js
+++ /dev/null
@@ -1,22 +0,0 @@
-var fs = require('fs');
-var parser = require('../index');
-
-var codes = [
- "# test\n my.key=\"value\"\nother = 101\nthird = -37",
- "first = 1.2\nsecond = -56.02\nth = true\nfth = false",
- "time = 1979-05-27T07:32:00Z",
- "test = [\"one\", ]",
- "test = [[1, 2,], [true, false,],]",
- "[my.sub.path]\nkey = true\nother = -15.3\n[my.sub]\nkey=false",
- "arry = [\"one\", \"two\",\"thr\nee\", \"\\u03EA\"]",
- fs.readFileSync(__dirname + '/example.toml', 'utf8'),
- fs.readFileSync(__dirname + '/hard_example.toml', 'utf8')
-]
-
-console.log("=============================================");
-for(i in codes) {
- var code = codes[i];
- console.log(code + "\n");
- console.log(JSON.stringify(parser.parse(code)));
- console.log("=============================================");
-}
diff --git a/node_modules/toml/test/table_arrays_easy.toml b/node_modules/toml/test/table_arrays_easy.toml
deleted file mode 100644
index ac3883bbc4ffbbd2a5f81bde14ba959f6c7bbb39..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/table_arrays_easy.toml
+++ /dev/null
@@ -1,10 +0,0 @@
-[[products]]
-name = "Hammer"
-sku = 738594937
-
-[[products]]
-
-[[products]]
-name = "Nail"
-sku = 284758393
-color = "gray"
diff --git a/node_modules/toml/test/table_arrays_hard.toml b/node_modules/toml/test/table_arrays_hard.toml
deleted file mode 100644
index 2ade5409a100e7c259c96633ab0926900d978460..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/table_arrays_hard.toml
+++ /dev/null
@@ -1,31 +0,0 @@
-[[fruit]]
-name = "durian"
-variety = []
-
-[[fruit]]
-name = "apple"
-
- [fruit.physical]
- color = "red"
- shape = "round"
-
- [[fruit.variety]]
- name = "red delicious"
-
- [[fruit.variety]]
- name = "granny smith"
-
-[[fruit]]
-
-[[fruit]]
-name = "banana"
-
- [[fruit.variety]]
- name = "plantain"
-
-[[fruit]]
-name = "orange"
-
-[fruit.physical]
-color = "orange"
-shape = "round"
diff --git a/node_modules/toml/test/test_toml.js b/node_modules/toml/test/test_toml.js
deleted file mode 100644
index adbdc4a690abd5e56bb71a42c0db833ba4e7a994..0000000000000000000000000000000000000000
--- a/node_modules/toml/test/test_toml.js
+++ /dev/null
@@ -1,586 +0,0 @@
-var toml = require('../');
-var fs = require('fs');
-
-var assert = require("nodeunit").assert;
-
-assert.parsesToml = function(tomlStr, expected) {
- try {
- var actual = toml.parse(tomlStr);
- } catch (e) {
- var errInfo = "line: " + e.line + ", column: " + e.column;
- return assert.fail("TOML parse error: " + e.message, errInfo, null, "at", assert.parsesToml);
- }
- return assert.deepEqual(actual, expected);
-};
-
-var exampleExpected = {
- title: "TOML Example",
- owner: {
- name: "Tom Preston-Werner",
- organization: "GitHub",
- bio: "GitHub Cofounder & CEO\n\tLikes \"tater tots\" and beer and backslashes: \\",
- dob: new Date("1979-05-27T07:32:00Z")
- },
- database: {
- server: "192.168.1.1",
- ports: [8001, 8001, 8003],
- connection_max: 5000,
- connection_min: -2,
- max_temp: 87.1,
- min_temp: -17.76,
- enabled: true
- },
- servers: {
- alpha: {
- ip: "10.0.0.1",
- dc: "eqdc10"
- },
- beta: {
- ip: "10.0.0.2",
- dc: "eqdc10"
- }
- },
- clients: {
- data: [ ["gamma", "delta"], [1, 2] ]
- }
-};
-
-var hardExampleExpected = {
- the: {
- hard: {
- another_test_string: ' Same thing, but with a string #',
- 'bit#': {
- multi_line_array: [']'],
- 'what?': "You don't think some user won't do that?"
- },
- harder_test_string: " And when \"'s are in the string, along with # \"",
- test_array: ['] ', ' # '],
- test_array2: ['Test #11 ]proved that', 'Experiment #9 was a success']
- },
- test_string: "You'll hate me after this - #"
- }
-};
-
-var easyTableArrayExpected = {
- "products": [
- { "name": "Hammer", "sku": 738594937 },
- { },
- { "name": "Nail", "sku": 284758393, "color": "gray" }
- ]
-};
-
-var hardTableArrayExpected = {
- "fruit": [
- {
- "name": "durian",
- "variety": []
- },
- {
- "name": "apple",
- "physical": {
- "color": "red",
- "shape": "round"
- },
- "variety": [
- { "name": "red delicious" },
- { "name": "granny smith" }
- ]
- },
- {},
- {
- "name": "banana",
- "variety": [
- { "name": "plantain" }
- ]
- },
- {
- "name": "orange",
- "physical": {
- "color": "orange",
- "shape": "round"
- }
- }
- ]
-}
-
-var badInputs = [
- '[error] if you didn\'t catch this, your parser is broken',
- 'string = "Anything other than tabs, spaces and newline after a table or key value pair has ended should produce an error unless it is a comment" like this',
- 'array = [\n \"This might most likely happen in multiline arrays\",\n Like here,\n \"or here,\n and here\"\n ] End of array comment, forgot the #',
- 'number = 3.14 pi <--again forgot the #'
-];
-
-exports.testParsesExample = function(test) {
- var str = fs.readFileSync(__dirname + "/example.toml", 'utf-8')
- test.parsesToml(str, exampleExpected);
- test.done();
-};
-
-exports.testParsesHardExample = function(test) {
- var str = fs.readFileSync(__dirname + "/hard_example.toml", 'utf-8')
- test.parsesToml(str, hardExampleExpected);
- test.done();
-};
-
-exports.testEasyTableArrays = function(test) {
- var str = fs.readFileSync(__dirname + "/table_arrays_easy.toml", 'utf8')
- test.parsesToml(str, easyTableArrayExpected);
- test.done();
-};
-
-exports.testHarderTableArrays = function(test) {
- var str = fs.readFileSync(__dirname + "/table_arrays_hard.toml", 'utf8')
- test.parsesToml(str, hardTableArrayExpected);
- test.done();
-};
-
-exports.testSupportsTrailingCommasInArrays = function(test) {
- var str = 'arr = [1, 2, 3,]';
- var expected = { arr: [1, 2, 3] };
- test.parsesToml(str, expected);
- test.done();
-};
-
-exports.testSingleElementArrayWithNoTrailingComma = function(test) {
- var str = "a = [1]";
- test.parsesToml(str, {
- a: [1]
- });
- test.done();
-};
-
-exports.testEmptyArray = function(test) {
- var str = "a = []";
- test.parsesToml(str, {
- a: []
- });
- test.done();
-};
-
-exports.testArrayWithWhitespace = function(test) {
- var str = "[versions]\nfiles = [\n 3, \n 5 \n\n ]";
- test.parsesToml(str, {
- versions: {
- files: [3, 5]
- }
- });
- test.done();
-};
-
-exports.testEmptyArrayWithWhitespace = function(test) {
- var str = "[versions]\nfiles = [\n \n ]";
- test.parsesToml(str, {
- versions: {
- files: []
- }
- });
- test.done();
-};
-
-exports.testDefineOnSuperkey = function(test) {
- var str = "[a.b]\nc = 1\n\n[a]\nd = 2";
- var expected = {
- a: {
- b: {
- c: 1
- },
- d: 2
- }
- };
- test.parsesToml(str, expected);
- test.done();
-};
-
-exports.testWhitespace = function(test) {
- var str = "a = 1\n \n b = 2 ";
- test.parsesToml(str, {
- a: 1, b: 2
- });
- test.done();
-};
-
-exports.testUnicode = function(test) {
- var str = "str = \"My name is Jos\\u00E9\"";
- test.parsesToml(str, {
- str: "My name is Jos\u00E9"
- });
-
- var str = "str = \"My name is Jos\\U000000E9\"";
- test.parsesToml(str, {
- str: "My name is Jos\u00E9"
- });
- test.done();
-};
-
-exports.testMultilineStrings = function(test) {
- var str = fs.readFileSync(__dirname + "/multiline_strings.toml", 'utf8');
- test.parsesToml(str, {
- key1: "One\nTwo",
- key2: "One\nTwo",
- key3: "One\nTwo"
- });
- test.done();
-};
-
-exports.testMultilineEatWhitespace = function(test) {
- var str = fs.readFileSync(__dirname + "/multiline_eat_whitespace.toml", 'utf8');
- test.parsesToml(str, {
- key1: "The quick brown fox jumps over the lazy dog.",
- key2: "The quick brown fox jumps over the lazy dog.",
- key3: "The quick brown fox jumps over the lazy dog."
- });
- test.done();
-};
-
-exports.testLiteralStrings = function(test) {
- var str = fs.readFileSync(__dirname + "/literal_strings.toml", 'utf8');
- test.parsesToml(str, {
- winpath: "C:\\Users\\nodejs\\templates",
- winpath2: "\\\\ServerX\\admin$\\system32\\",
- quoted: "Tom \"Dubs\" Preston-Werner",
- regex: "<\\i\\c*\\s*>"
- });
- test.done();
-};
-
-exports.testMultilineLiteralStrings = function(test) {
- var str = fs.readFileSync(__dirname + "/multiline_literal_strings.toml", 'utf8');
- test.parsesToml(str, {
- regex2: "I [dw]on't need \\d{2} apples",
- lines: "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n"
- });
- test.done();
-};
-
-exports.testIntegerFormats = function(test) {
- var str = "a = +99\nb = 42\nc = 0\nd = -17\ne = 1_000_001\nf = 1_2_3_4_5 # why u do dis";
- test.parsesToml(str, {
- a: 99,
- b: 42,
- c: 0,
- d: -17,
- e: 1000001,
- f: 12345
- });
- test.done();
-};
-
-exports.testFloatFormats = function(test) {
- var str = "a = +1.0\nb = 3.1415\nc = -0.01\n" +
- "d = 5e+22\ne = 1e6\nf = -2E-2\n" +
- "g = 6.626e-34\n" +
- "h = 9_224_617.445_991_228_313\n" +
- "i = 1e1_000";
- test.parsesToml(str, {
- a: 1.0,
- b: 3.1415,
- c: -0.01,
- d: 5e22,
- e: 1e6,
- f: -2e-2,
- g: 6.626e-34,
- h: 9224617.445991228313,
- i: 1e1000
- });
- test.done();
-};
-
-exports.testDate = function(test) {
- var date = new Date("1979-05-27T07:32:00Z");
- test.parsesToml("a = 1979-05-27T07:32:00Z", {
- a: date
- });
- test.done();
-};
-
-exports.testDateWithOffset = function(test) {
- var date1 = new Date("1979-05-27T07:32:00-07:00"),
- date2 = new Date("1979-05-27T07:32:00+02:00");
- test.parsesToml("a = 1979-05-27T07:32:00-07:00\nb = 1979-05-27T07:32:00+02:00", {
- a: date1,
- b: date2
- });
- test.done();
-};
-
-exports.testDateWithSecondFraction = function(test) {
- var date = new Date("1979-05-27T00:32:00.999999-07:00");
- test.parsesToml("a = 1979-05-27T00:32:00.999999-07:00", {
- a: date
- });
- test.done();
-};
-
-exports.testDateFromIsoString = function(test) {
- // https://github.com/BinaryMuse/toml-node/issues/20
- var date = new Date(),
- dateStr = date.toISOString(),
- tomlStr = "a = " + dateStr;
-
- test.parsesToml(tomlStr, {
- a: date
- });
- test.done();
-};
-
-exports.testLeadingNewlines = function(test) {
- // https://github.com/BinaryMuse/toml-node/issues/22
- var str = "\ntest = \"ing\"";
- test.parsesToml(str, {
- test: "ing"
- });
- test.done();
-};
-
-exports.testInlineTables = function(test) {
- var str = fs.readFileSync(__dirname + "/inline_tables.toml", 'utf8');
- test.parsesToml(str, {
- name: {
- first: "Tom",
- last: "Preston-Werner"
- },
- point: {
- x: 1,
- y: 2
- },
- nested: {
- x: {
- a: {
- b: 3
- }
- }
- },
- points: [
- { x: 1, y: 2, z: 3 },
- { x: 7, y: 8, z: 9 },
- { x: 2, y: 4, z: 8 }
- ],
- arrays: [
- { x: [1, 2, 3], y: [4, 5, 6] },
- { x: [7, 8, 9], y: [0, 1, 2] }
- ]
- });
- test.done();
-};
-
-exports.testEmptyInlineTables = function(test) {
- // https://github.com/BinaryMuse/toml-node/issues/24
- var str = "a = { }";
- test.parsesToml(str, {
- a: {}
- });
- test.done();
-};
-
-exports.testKeyNamesWithWhitespaceAroundStartAndFinish = function(test) {
- var str = "[ a ]\nb = 1";
- test.parsesToml(str, {
- a: {
- b: 1
- }
- });
- test.done();
-};
-
-exports.testKeyNamesWithWhitespaceAroundDots = function(test) {
- var str = "[ a . b . c]\nd = 1";
- test.parsesToml(str, {
- a: {
- b: {
- c: {
- d: 1
- }
- }
- }
- });
- test.done();
-};
-
-exports.testSimpleQuotedKeyNames = function(test) {
- var str = "[\"ʞ\"]\na = 1";
- test.parsesToml(str, {
- "ʞ": {
- a: 1
- }
- });
- test.done();
-};
-
-exports.testComplexQuotedKeyNames = function(test) {
- var str = "[ a . \"ʞ\" . c ]\nd = 1";
- test.parsesToml(str, {
- a: {
- "ʞ": {
- c: {
- d: 1
- }
- }
- }
- });
- test.done();
-};
-
-exports.testEscapedQuotesInQuotedKeyNames = function(test) {
- test.parsesToml("[\"the \\\"thing\\\"\"]\na = true", {
- 'the "thing"': {
- a: true
- }
- });
- test.done();
-};
-
-exports.testMoreComplexQuotedKeyNames = function(test) {
- // https://github.com/BinaryMuse/toml-node/issues/21
- test.parsesToml('["the\\ key"]\n\none = "one"\ntwo = 2\nthree = false', {
- "the\\ key": {
- one: "one",
- two: 2,
- three: false
- }
- });
- test.parsesToml('[a."the\\ key"]\n\none = "one"\ntwo = 2\nthree = false', {
- a: {
- "the\\ key": {
- one: "one",
- two: 2,
- three: false
- }
- }
- });
- test.parsesToml('[a."the-key"]\n\none = "one"\ntwo = 2\nthree = false', {
- a: {
- "the-key": {
- one: "one",
- two: 2,
- three: false
- }
- }
- });
- test.parsesToml('[a."the.key"]\n\none = "one"\ntwo = 2\nthree = false', {
- a: {
- "the.key": {
- one: "one",
- two: 2,
- three: false
- }
- }
- });
- // https://github.com/BinaryMuse/toml-node/issues/34
- test.parsesToml('[table]\n\'a "quoted value"\' = "value"', {
- table: {
- 'a "quoted value"': "value"
- }
- });
- // https://github.com/BinaryMuse/toml-node/issues/33
- test.parsesToml('[module]\n"foo=bar" = "zzz"', {
- module: {
- "foo=bar": "zzz"
- }
- });
-
- test.done();
-};
-
-exports.testErrorOnBadUnicode = function(test) {
- var str = "str = \"My name is Jos\\uD800\"";
- test.throws(function() {
- toml.parse(str);
- });
- test.done();
-};
-
-exports.testErrorOnDotAtStartOfKey = function(test) {
- test.throws(function() {
- var str = "[.a]\nb = 1";
- toml.parse(str);
- });
- test.done()
-};
-
-exports.testErrorOnDotAtEndOfKey = function(test) {
- test.throws(function() {
- var str = "[.a]\nb = 1";
- toml.parse(str);
- });
- test.done()
-};
-
-exports.testErrorOnTableOverride = function(test) {
- test.throws(function() {
- var str = "[a]\nb = 1\n\n[a]\nc = 2";
- toml.parse(str);
- });
- test.done()
-};
-
-exports.testErrorOnKeyOverride = function(test) {
- test.throws(function() {
- var str = "[a]\nb = 1\n[a.b]\nc = 2";
- toml.parse(str);
- });
- test.done()
-};
-
-exports.testErrorOnKeyOverrideWithNested = function(test) {
- // https://github.com/BinaryMuse/toml-node/issues/23
- test.throws(function() {
- var str = "[a]\nb = \"a\"\n[a.b.c]";
- toml.parse(str);
- }, "existing key 'a.b'");
- test.done();
-};
-
-exports.testErrorOnKeyOverrideWithArrayTable = function(test) {
- test.throws(function() {
- var str = "[a]\nb = 1\n[[a]]\nc = 2";
- toml.parse(str);
- });
- test.done()
-};
-
-exports.testErrorOnKeyReplace = function(test) {
- test.throws(function() {
- var str = "[a]\nb = 1\nb = 2";
- toml.parse(str);
- });
- test.done()
-};
-
-exports.testErrorOnInlineTableReplace = function(test) {
- // https://github.com/BinaryMuse/toml-node/issues/25
- test.throws(function() {
- var str = "a = { b = 1 }\n[a]\nc = 2";
- toml.parse(str);
- }, "existing key 'a'");
- test.done();
-};
-
-exports.testErrorOnArrayMismatch = function(test) {
- test.throws(function() {
- var str = 'data = [1, 2, "test"]'
- toml.parse(str);
- });
- test.done();
-};
-
-exports.testErrorOnBadInputs = function(test) {
- var count = 0;
- for (i in badInputs) {
- (function(num) {
- test.throws(function() {
- toml.parse(badInputs[num]);
- });
- })(i);
- }
- test.done();
-};
-
-exports.testErrorsHaveCorrectLineAndColumn = function(test) {
- var str = "[a]\nb = 1\n [a.b]\nc = 2";
- try { toml.parse(str); }
- catch (e) {
- test.equal(e.line, 3);
- test.equal(e.column, 2);
- test.done();
- }
-};
diff --git a/node_modules/typedarray/.travis.yml b/node_modules/typedarray/.travis.yml
deleted file mode 100644
index cc4dba29d959a2da7b97f9edd3c7c91384b2ee5b..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/.travis.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-language: node_js
-node_js:
- - "0.8"
- - "0.10"
diff --git a/node_modules/typedarray/LICENSE b/node_modules/typedarray/LICENSE
deleted file mode 100644
index 11adfaec9e7f95f3eab1ecdd6f1f8715fcdc4311..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/LICENSE
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- Copyright (c) 2010, Linden Research, Inc.
- Copyright (c) 2012, Joshua Bell
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
- $/LicenseInfo$
- */
-
-// Original can be found at:
-// https://bitbucket.org/lindenlab/llsd
-// Modifications by Joshua Bell inexorabletash@gmail.com
-// https://github.com/inexorabletash/polyfill
-
-// ES3/ES5 implementation of the Krhonos Typed Array Specification
-// Ref: http://www.khronos.org/registry/typedarray/specs/latest/
-// Date: 2011-02-01
-//
-// Variations:
-// * Allows typed_array.get/set() as alias for subscripts (typed_array[])
diff --git a/node_modules/typedarray/example/tarray.js b/node_modules/typedarray/example/tarray.js
deleted file mode 100644
index 8423d7c9b1c327e5c76744eccf7ec469b3ffdd79..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/example/tarray.js
+++ /dev/null
@@ -1,4 +0,0 @@
-var Uint8Array = require('../').Uint8Array;
-var ua = new Uint8Array(5);
-ua[1] = 256 + 55;
-console.log(ua[1]);
diff --git a/node_modules/typedarray/index.js b/node_modules/typedarray/index.js
deleted file mode 100644
index 5e540841f432413f874b4c75ffc7280f114c37eb..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/index.js
+++ /dev/null
@@ -1,630 +0,0 @@
-var undefined = (void 0); // Paranoia
-
-// Beyond this value, index getters/setters (i.e. array[0], array[1]) are so slow to
-// create, and consume so much memory, that the browser appears frozen.
-var MAX_ARRAY_LENGTH = 1e5;
-
-// Approximations of internal ECMAScript conversion functions
-var ECMAScript = (function() {
- // Stash a copy in case other scripts modify these
- var opts = Object.prototype.toString,
- ophop = Object.prototype.hasOwnProperty;
-
- return {
- // Class returns internal [[Class]] property, used to avoid cross-frame instanceof issues:
- Class: function(v) { return opts.call(v).replace(/^\[object *|\]$/g, ''); },
- HasProperty: function(o, p) { return p in o; },
- HasOwnProperty: function(o, p) { return ophop.call(o, p); },
- IsCallable: function(o) { return typeof o === 'function'; },
- ToInt32: function(v) { return v >> 0; },
- ToUint32: function(v) { return v >>> 0; }
- };
-}());
-
-// Snapshot intrinsics
-var LN2 = Math.LN2,
- abs = Math.abs,
- floor = Math.floor,
- log = Math.log,
- min = Math.min,
- pow = Math.pow,
- round = Math.round;
-
-// ES5: lock down object properties
-function configureProperties(obj) {
- if (getOwnPropNames && defineProp) {
- var props = getOwnPropNames(obj), i;
- for (i = 0; i < props.length; i += 1) {
- defineProp(obj, props[i], {
- value: obj[props[i]],
- writable: false,
- enumerable: false,
- configurable: false
- });
- }
- }
-}
-
-// emulate ES5 getter/setter API using legacy APIs
-// http://blogs.msdn.com/b/ie/archive/2010/09/07/transitioning-existing-code-to-the-es5-getter-setter-apis.aspx
-// (second clause tests for Object.defineProperty() in IE<9 that only supports extending DOM prototypes, but
-// note that IE<9 does not support __defineGetter__ or __defineSetter__ so it just renders the method harmless)
-var defineProp
-if (Object.defineProperty && (function() {
- try {
- Object.defineProperty({}, 'x', {});
- return true;
- } catch (e) {
- return false;
- }
- })()) {
- defineProp = Object.defineProperty;
-} else {
- defineProp = function(o, p, desc) {
- if (!o === Object(o)) throw new TypeError("Object.defineProperty called on non-object");
- if (ECMAScript.HasProperty(desc, 'get') && Object.prototype.__defineGetter__) { Object.prototype.__defineGetter__.call(o, p, desc.get); }
- if (ECMAScript.HasProperty(desc, 'set') && Object.prototype.__defineSetter__) { Object.prototype.__defineSetter__.call(o, p, desc.set); }
- if (ECMAScript.HasProperty(desc, 'value')) { o[p] = desc.value; }
- return o;
- };
-}
-
-var getOwnPropNames = Object.getOwnPropertyNames || function (o) {
- if (o !== Object(o)) throw new TypeError("Object.getOwnPropertyNames called on non-object");
- var props = [], p;
- for (p in o) {
- if (ECMAScript.HasOwnProperty(o, p)) {
- props.push(p);
- }
- }
- return props;
-};
-
-// ES5: Make obj[index] an alias for obj._getter(index)/obj._setter(index, value)
-// for index in 0 ... obj.length
-function makeArrayAccessors(obj) {
- if (!defineProp) { return; }
-
- if (obj.length > MAX_ARRAY_LENGTH) throw new RangeError("Array too large for polyfill");
-
- function makeArrayAccessor(index) {
- defineProp(obj, index, {
- 'get': function() { return obj._getter(index); },
- 'set': function(v) { obj._setter(index, v); },
- enumerable: true,
- configurable: false
- });
- }
-
- var i;
- for (i = 0; i < obj.length; i += 1) {
- makeArrayAccessor(i);
- }
-}
-
-// Internal conversion functions:
-// pack() - take a number (interpreted as Type), output a byte array
-// unpack() - take a byte array, output a Type-like number
-
-function as_signed(value, bits) { var s = 32 - bits; return (value << s) >> s; }
-function as_unsigned(value, bits) { var s = 32 - bits; return (value << s) >>> s; }
-
-function packI8(n) { return [n & 0xff]; }
-function unpackI8(bytes) { return as_signed(bytes[0], 8); }
-
-function packU8(n) { return [n & 0xff]; }
-function unpackU8(bytes) { return as_unsigned(bytes[0], 8); }
-
-function packU8Clamped(n) { n = round(Number(n)); return [n < 0 ? 0 : n > 0xff ? 0xff : n & 0xff]; }
-
-function packI16(n) { return [(n >> 8) & 0xff, n & 0xff]; }
-function unpackI16(bytes) { return as_signed(bytes[0] << 8 | bytes[1], 16); }
-
-function packU16(n) { return [(n >> 8) & 0xff, n & 0xff]; }
-function unpackU16(bytes) { return as_unsigned(bytes[0] << 8 | bytes[1], 16); }
-
-function packI32(n) { return [(n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff]; }
-function unpackI32(bytes) { return as_signed(bytes[0] << 24 | bytes[1] << 16 | bytes[2] << 8 | bytes[3], 32); }
-
-function packU32(n) { return [(n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff]; }
-function unpackU32(bytes) { return as_unsigned(bytes[0] << 24 | bytes[1] << 16 | bytes[2] << 8 | bytes[3], 32); }
-
-function packIEEE754(v, ebits, fbits) {
-
- var bias = (1 << (ebits - 1)) - 1,
- s, e, f, ln,
- i, bits, str, bytes;
-
- function roundToEven(n) {
- var w = floor(n), f = n - w;
- if (f < 0.5)
- return w;
- if (f > 0.5)
- return w + 1;
- return w % 2 ? w + 1 : w;
- }
-
- // Compute sign, exponent, fraction
- if (v !== v) {
- // NaN
- // http://dev.w3.org/2006/webapi/WebIDL/#es-type-mapping
- e = (1 << ebits) - 1; f = pow(2, fbits - 1); s = 0;
- } else if (v === Infinity || v === -Infinity) {
- e = (1 << ebits) - 1; f = 0; s = (v < 0) ? 1 : 0;
- } else if (v === 0) {
- e = 0; f = 0; s = (1 / v === -Infinity) ? 1 : 0;
- } else {
- s = v < 0;
- v = abs(v);
-
- if (v >= pow(2, 1 - bias)) {
- e = min(floor(log(v) / LN2), 1023);
- f = roundToEven(v / pow(2, e) * pow(2, fbits));
- if (f / pow(2, fbits) >= 2) {
- e = e + 1;
- f = 1;
- }
- if (e > bias) {
- // Overflow
- e = (1 << ebits) - 1;
- f = 0;
- } else {
- // Normalized
- e = e + bias;
- f = f - pow(2, fbits);
- }
- } else {
- // Denormalized
- e = 0;
- f = roundToEven(v / pow(2, 1 - bias - fbits));
- }
- }
-
- // Pack sign, exponent, fraction
- bits = [];
- for (i = fbits; i; i -= 1) { bits.push(f % 2 ? 1 : 0); f = floor(f / 2); }
- for (i = ebits; i; i -= 1) { bits.push(e % 2 ? 1 : 0); e = floor(e / 2); }
- bits.push(s ? 1 : 0);
- bits.reverse();
- str = bits.join('');
-
- // Bits to bytes
- bytes = [];
- while (str.length) {
- bytes.push(parseInt(str.substring(0, 8), 2));
- str = str.substring(8);
- }
- return bytes;
-}
-
-function unpackIEEE754(bytes, ebits, fbits) {
-
- // Bytes to bits
- var bits = [], i, j, b, str,
- bias, s, e, f;
-
- for (i = bytes.length; i; i -= 1) {
- b = bytes[i - 1];
- for (j = 8; j; j -= 1) {
- bits.push(b % 2 ? 1 : 0); b = b >> 1;
- }
- }
- bits.reverse();
- str = bits.join('');
-
- // Unpack sign, exponent, fraction
- bias = (1 << (ebits - 1)) - 1;
- s = parseInt(str.substring(0, 1), 2) ? -1 : 1;
- e = parseInt(str.substring(1, 1 + ebits), 2);
- f = parseInt(str.substring(1 + ebits), 2);
-
- // Produce number
- if (e === (1 << ebits) - 1) {
- return f !== 0 ? NaN : s * Infinity;
- } else if (e > 0) {
- // Normalized
- return s * pow(2, e - bias) * (1 + f / pow(2, fbits));
- } else if (f !== 0) {
- // Denormalized
- return s * pow(2, -(bias - 1)) * (f / pow(2, fbits));
- } else {
- return s < 0 ? -0 : 0;
- }
-}
-
-function unpackF64(b) { return unpackIEEE754(b, 11, 52); }
-function packF64(v) { return packIEEE754(v, 11, 52); }
-function unpackF32(b) { return unpackIEEE754(b, 8, 23); }
-function packF32(v) { return packIEEE754(v, 8, 23); }
-
-
-//
-// 3 The ArrayBuffer Type
-//
-
-(function() {
-
- /** @constructor */
- var ArrayBuffer = function ArrayBuffer(length) {
- length = ECMAScript.ToInt32(length);
- if (length < 0) throw new RangeError('ArrayBuffer size is not a small enough positive integer');
-
- this.byteLength = length;
- this._bytes = [];
- this._bytes.length = length;
-
- var i;
- for (i = 0; i < this.byteLength; i += 1) {
- this._bytes[i] = 0;
- }
-
- configureProperties(this);
- };
-
- exports.ArrayBuffer = exports.ArrayBuffer || ArrayBuffer;
-
- //
- // 4 The ArrayBufferView Type
- //
-
- // NOTE: this constructor is not exported
- /** @constructor */
- var ArrayBufferView = function ArrayBufferView() {
- //this.buffer = null;
- //this.byteOffset = 0;
- //this.byteLength = 0;
- };
-
- //
- // 5 The Typed Array View Types
- //
-
- function makeConstructor(bytesPerElement, pack, unpack) {
- // Each TypedArray type requires a distinct constructor instance with
- // identical logic, which this produces.
-
- var ctor;
- ctor = function(buffer, byteOffset, length) {
- var array, sequence, i, s;
-
- if (!arguments.length || typeof arguments[0] === 'number') {
- // Constructor(unsigned long length)
- this.length = ECMAScript.ToInt32(arguments[0]);
- if (length < 0) throw new RangeError('ArrayBufferView size is not a small enough positive integer');
-
- this.byteLength = this.length * this.BYTES_PER_ELEMENT;
- this.buffer = new ArrayBuffer(this.byteLength);
- this.byteOffset = 0;
- } else if (typeof arguments[0] === 'object' && arguments[0].constructor === ctor) {
- // Constructor(TypedArray array)
- array = arguments[0];
-
- this.length = array.length;
- this.byteLength = this.length * this.BYTES_PER_ELEMENT;
- this.buffer = new ArrayBuffer(this.byteLength);
- this.byteOffset = 0;
-
- for (i = 0; i < this.length; i += 1) {
- this._setter(i, array._getter(i));
- }
- } else if (typeof arguments[0] === 'object' &&
- !(arguments[0] instanceof ArrayBuffer || ECMAScript.Class(arguments[0]) === 'ArrayBuffer')) {
- // Constructor(sequence array)
- sequence = arguments[0];
-
- this.length = ECMAScript.ToUint32(sequence.length);
- this.byteLength = this.length * this.BYTES_PER_ELEMENT;
- this.buffer = new ArrayBuffer(this.byteLength);
- this.byteOffset = 0;
-
- for (i = 0; i < this.length; i += 1) {
- s = sequence[i];
- this._setter(i, Number(s));
- }
- } else if (typeof arguments[0] === 'object' &&
- (arguments[0] instanceof ArrayBuffer || ECMAScript.Class(arguments[0]) === 'ArrayBuffer')) {
- // Constructor(ArrayBuffer buffer,
- // optional unsigned long byteOffset, optional unsigned long length)
- this.buffer = buffer;
-
- this.byteOffset = ECMAScript.ToUint32(byteOffset);
- if (this.byteOffset > this.buffer.byteLength) {
- throw new RangeError("byteOffset out of range");
- }
-
- if (this.byteOffset % this.BYTES_PER_ELEMENT) {
- // The given byteOffset must be a multiple of the element
- // size of the specific type, otherwise an exception is raised.
- throw new RangeError("ArrayBuffer length minus the byteOffset is not a multiple of the element size.");
- }
-
- if (arguments.length < 3) {
- this.byteLength = this.buffer.byteLength - this.byteOffset;
-
- if (this.byteLength % this.BYTES_PER_ELEMENT) {
- throw new RangeError("length of buffer minus byteOffset not a multiple of the element size");
- }
- this.length = this.byteLength / this.BYTES_PER_ELEMENT;
- } else {
- this.length = ECMAScript.ToUint32(length);
- this.byteLength = this.length * this.BYTES_PER_ELEMENT;
- }
-
- if ((this.byteOffset + this.byteLength) > this.buffer.byteLength) {
- throw new RangeError("byteOffset and length reference an area beyond the end of the buffer");
- }
- } else {
- throw new TypeError("Unexpected argument type(s)");
- }
-
- this.constructor = ctor;
-
- configureProperties(this);
- makeArrayAccessors(this);
- };
-
- ctor.prototype = new ArrayBufferView();
- ctor.prototype.BYTES_PER_ELEMENT = bytesPerElement;
- ctor.prototype._pack = pack;
- ctor.prototype._unpack = unpack;
- ctor.BYTES_PER_ELEMENT = bytesPerElement;
-
- // getter type (unsigned long index);
- ctor.prototype._getter = function(index) {
- if (arguments.length < 1) throw new SyntaxError("Not enough arguments");
-
- index = ECMAScript.ToUint32(index);
- if (index >= this.length) {
- return undefined;
- }
-
- var bytes = [], i, o;
- for (i = 0, o = this.byteOffset + index * this.BYTES_PER_ELEMENT;
- i < this.BYTES_PER_ELEMENT;
- i += 1, o += 1) {
- bytes.push(this.buffer._bytes[o]);
- }
- return this._unpack(bytes);
- };
-
- // NONSTANDARD: convenience alias for getter: type get(unsigned long index);
- ctor.prototype.get = ctor.prototype._getter;
-
- // setter void (unsigned long index, type value);
- ctor.prototype._setter = function(index, value) {
- if (arguments.length < 2) throw new SyntaxError("Not enough arguments");
-
- index = ECMAScript.ToUint32(index);
- if (index >= this.length) {
- return undefined;
- }
-
- var bytes = this._pack(value), i, o;
- for (i = 0, o = this.byteOffset + index * this.BYTES_PER_ELEMENT;
- i < this.BYTES_PER_ELEMENT;
- i += 1, o += 1) {
- this.buffer._bytes[o] = bytes[i];
- }
- };
-
- // void set(TypedArray array, optional unsigned long offset);
- // void set(sequence array, optional unsigned long offset);
- ctor.prototype.set = function(index, value) {
- if (arguments.length < 1) throw new SyntaxError("Not enough arguments");
- var array, sequence, offset, len,
- i, s, d,
- byteOffset, byteLength, tmp;
-
- if (typeof arguments[0] === 'object' && arguments[0].constructor === this.constructor) {
- // void set(TypedArray array, optional unsigned long offset);
- array = arguments[0];
- offset = ECMAScript.ToUint32(arguments[1]);
-
- if (offset + array.length > this.length) {
- throw new RangeError("Offset plus length of array is out of range");
- }
-
- byteOffset = this.byteOffset + offset * this.BYTES_PER_ELEMENT;
- byteLength = array.length * this.BYTES_PER_ELEMENT;
-
- if (array.buffer === this.buffer) {
- tmp = [];
- for (i = 0, s = array.byteOffset; i < byteLength; i += 1, s += 1) {
- tmp[i] = array.buffer._bytes[s];
- }
- for (i = 0, d = byteOffset; i < byteLength; i += 1, d += 1) {
- this.buffer._bytes[d] = tmp[i];
- }
- } else {
- for (i = 0, s = array.byteOffset, d = byteOffset;
- i < byteLength; i += 1, s += 1, d += 1) {
- this.buffer._bytes[d] = array.buffer._bytes[s];
- }
- }
- } else if (typeof arguments[0] === 'object' && typeof arguments[0].length !== 'undefined') {
- // void set(sequence array, optional unsigned long offset);
- sequence = arguments[0];
- len = ECMAScript.ToUint32(sequence.length);
- offset = ECMAScript.ToUint32(arguments[1]);
-
- if (offset + len > this.length) {
- throw new RangeError("Offset plus length of array is out of range");
- }
-
- for (i = 0; i < len; i += 1) {
- s = sequence[i];
- this._setter(offset + i, Number(s));
- }
- } else {
- throw new TypeError("Unexpected argument type(s)");
- }
- };
-
- // TypedArray subarray(long begin, optional long end);
- ctor.prototype.subarray = function(start, end) {
- function clamp(v, min, max) { return v < min ? min : v > max ? max : v; }
-
- start = ECMAScript.ToInt32(start);
- end = ECMAScript.ToInt32(end);
-
- if (arguments.length < 1) { start = 0; }
- if (arguments.length < 2) { end = this.length; }
-
- if (start < 0) { start = this.length + start; }
- if (end < 0) { end = this.length + end; }
-
- start = clamp(start, 0, this.length);
- end = clamp(end, 0, this.length);
-
- var len = end - start;
- if (len < 0) {
- len = 0;
- }
-
- return new this.constructor(
- this.buffer, this.byteOffset + start * this.BYTES_PER_ELEMENT, len);
- };
-
- return ctor;
- }
-
- var Int8Array = makeConstructor(1, packI8, unpackI8);
- var Uint8Array = makeConstructor(1, packU8, unpackU8);
- var Uint8ClampedArray = makeConstructor(1, packU8Clamped, unpackU8);
- var Int16Array = makeConstructor(2, packI16, unpackI16);
- var Uint16Array = makeConstructor(2, packU16, unpackU16);
- var Int32Array = makeConstructor(4, packI32, unpackI32);
- var Uint32Array = makeConstructor(4, packU32, unpackU32);
- var Float32Array = makeConstructor(4, packF32, unpackF32);
- var Float64Array = makeConstructor(8, packF64, unpackF64);
-
- exports.Int8Array = exports.Int8Array || Int8Array;
- exports.Uint8Array = exports.Uint8Array || Uint8Array;
- exports.Uint8ClampedArray = exports.Uint8ClampedArray || Uint8ClampedArray;
- exports.Int16Array = exports.Int16Array || Int16Array;
- exports.Uint16Array = exports.Uint16Array || Uint16Array;
- exports.Int32Array = exports.Int32Array || Int32Array;
- exports.Uint32Array = exports.Uint32Array || Uint32Array;
- exports.Float32Array = exports.Float32Array || Float32Array;
- exports.Float64Array = exports.Float64Array || Float64Array;
-}());
-
-//
-// 6 The DataView View Type
-//
-
-(function() {
- function r(array, index) {
- return ECMAScript.IsCallable(array.get) ? array.get(index) : array[index];
- }
-
- var IS_BIG_ENDIAN = (function() {
- var u16array = new(exports.Uint16Array)([0x1234]),
- u8array = new(exports.Uint8Array)(u16array.buffer);
- return r(u8array, 0) === 0x12;
- }());
-
- // Constructor(ArrayBuffer buffer,
- // optional unsigned long byteOffset,
- // optional unsigned long byteLength)
- /** @constructor */
- var DataView = function DataView(buffer, byteOffset, byteLength) {
- if (arguments.length === 0) {
- buffer = new exports.ArrayBuffer(0);
- } else if (!(buffer instanceof exports.ArrayBuffer || ECMAScript.Class(buffer) === 'ArrayBuffer')) {
- throw new TypeError("TypeError");
- }
-
- this.buffer = buffer || new exports.ArrayBuffer(0);
-
- this.byteOffset = ECMAScript.ToUint32(byteOffset);
- if (this.byteOffset > this.buffer.byteLength) {
- throw new RangeError("byteOffset out of range");
- }
-
- if (arguments.length < 3) {
- this.byteLength = this.buffer.byteLength - this.byteOffset;
- } else {
- this.byteLength = ECMAScript.ToUint32(byteLength);
- }
-
- if ((this.byteOffset + this.byteLength) > this.buffer.byteLength) {
- throw new RangeError("byteOffset and length reference an area beyond the end of the buffer");
- }
-
- configureProperties(this);
- };
-
- function makeGetter(arrayType) {
- return function(byteOffset, littleEndian) {
-
- byteOffset = ECMAScript.ToUint32(byteOffset);
-
- if (byteOffset + arrayType.BYTES_PER_ELEMENT > this.byteLength) {
- throw new RangeError("Array index out of range");
- }
- byteOffset += this.byteOffset;
-
- var uint8Array = new exports.Uint8Array(this.buffer, byteOffset, arrayType.BYTES_PER_ELEMENT),
- bytes = [], i;
- for (i = 0; i < arrayType.BYTES_PER_ELEMENT; i += 1) {
- bytes.push(r(uint8Array, i));
- }
-
- if (Boolean(littleEndian) === Boolean(IS_BIG_ENDIAN)) {
- bytes.reverse();
- }
-
- return r(new arrayType(new exports.Uint8Array(bytes).buffer), 0);
- };
- }
-
- DataView.prototype.getUint8 = makeGetter(exports.Uint8Array);
- DataView.prototype.getInt8 = makeGetter(exports.Int8Array);
- DataView.prototype.getUint16 = makeGetter(exports.Uint16Array);
- DataView.prototype.getInt16 = makeGetter(exports.Int16Array);
- DataView.prototype.getUint32 = makeGetter(exports.Uint32Array);
- DataView.prototype.getInt32 = makeGetter(exports.Int32Array);
- DataView.prototype.getFloat32 = makeGetter(exports.Float32Array);
- DataView.prototype.getFloat64 = makeGetter(exports.Float64Array);
-
- function makeSetter(arrayType) {
- return function(byteOffset, value, littleEndian) {
-
- byteOffset = ECMAScript.ToUint32(byteOffset);
- if (byteOffset + arrayType.BYTES_PER_ELEMENT > this.byteLength) {
- throw new RangeError("Array index out of range");
- }
-
- // Get bytes
- var typeArray = new arrayType([value]),
- byteArray = new exports.Uint8Array(typeArray.buffer),
- bytes = [], i, byteView;
-
- for (i = 0; i < arrayType.BYTES_PER_ELEMENT; i += 1) {
- bytes.push(r(byteArray, i));
- }
-
- // Flip if necessary
- if (Boolean(littleEndian) === Boolean(IS_BIG_ENDIAN)) {
- bytes.reverse();
- }
-
- // Write them
- byteView = new exports.Uint8Array(this.buffer, byteOffset, arrayType.BYTES_PER_ELEMENT);
- byteView.set(bytes);
- };
- }
-
- DataView.prototype.setUint8 = makeSetter(exports.Uint8Array);
- DataView.prototype.setInt8 = makeSetter(exports.Int8Array);
- DataView.prototype.setUint16 = makeSetter(exports.Uint16Array);
- DataView.prototype.setInt16 = makeSetter(exports.Int16Array);
- DataView.prototype.setUint32 = makeSetter(exports.Uint32Array);
- DataView.prototype.setInt32 = makeSetter(exports.Int32Array);
- DataView.prototype.setFloat32 = makeSetter(exports.Float32Array);
- DataView.prototype.setFloat64 = makeSetter(exports.Float64Array);
-
- exports.DataView = exports.DataView || DataView;
-
-}());
diff --git a/node_modules/typedarray/package.json b/node_modules/typedarray/package.json
deleted file mode 100644
index a3d2c0113e050f7e64075b4aefffb49275591c48..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/package.json
+++ /dev/null
@@ -1,112 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "typedarray",
- "raw": "typedarray@^0.0.6",
- "rawSpec": "^0.0.6",
- "scope": null,
- "spec": ">=0.0.6 <0.0.7",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-stream"
- ]
- ],
- "_from": "typedarray@>=0.0.6 <0.0.7",
- "_id": "typedarray@0.0.6",
- "_inCache": true,
- "_installable": true,
- "_location": "/typedarray",
- "_npmUser": {
- "email": "mail@substack.net",
- "name": "substack"
- },
- "_npmVersion": "1.4.3",
- "_phantomChildren": {},
- "_requested": {
- "name": "typedarray",
- "raw": "typedarray@^0.0.6",
- "rawSpec": "^0.0.6",
- "scope": null,
- "spec": ">=0.0.6 <0.0.7",
- "type": "range"
- },
- "_requiredBy": [
- "/concat-stream"
- ],
- "_resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
- "_shasum": "867ac74e3864187b1d3d47d996a78ec5c8830777",
- "_shrinkwrap": null,
- "_spec": "typedarray@^0.0.6",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/concat-stream",
- "author": {
- "email": "mail@substack.net",
- "name": "James Halliday",
- "url": "http://substack.net"
- },
- "bugs": {
- "url": "https://github.com/substack/typedarray/issues"
- },
- "dependencies": {},
- "description": "TypedArray polyfill for old browsers",
- "devDependencies": {
- "tape": "~2.3.2"
- },
- "directories": {},
- "dist": {
- "shasum": "867ac74e3864187b1d3d47d996a78ec5c8830777",
- "tarball": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz"
- },
- "homepage": "https://github.com/substack/typedarray",
- "keywords": [
- "ArrayBuffer",
- "DataView",
- "Float32Array",
- "Float64Array",
- "Int8Array",
- "Int16Array",
- "Int32Array",
- "Uint8Array",
- "Uint8ClampedArray",
- "Uint16Array",
- "Uint32Array",
- "typed",
- "array",
- "polyfill"
- ],
- "license": "MIT",
- "main": "index.js",
- "maintainers": [
- {
- "email": "mail@substack.net",
- "name": "substack"
- }
- ],
- "name": "typedarray",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/substack/typedarray.git"
- },
- "scripts": {
- "test": "tape test/*.js test/server/*.js"
- },
- "testling": {
- "browsers": [
- "ie/6..latest",
- "firefox/16..latest",
- "firefox/nightly",
- "chrome/22..latest",
- "chrome/canary",
- "opera/12..latest",
- "opera/next",
- "safari/5.1..latest",
- "ipad/6.0..latest",
- "iphone/6.0..latest",
- "android-browser/4.2..latest"
- ],
- "files": "test/*.js"
- },
- "version": "0.0.6"
-}
diff --git a/node_modules/typedarray/readme.markdown b/node_modules/typedarray/readme.markdown
deleted file mode 100644
index d18f6f7197e6a5d852c592ba7539d58e9e4ea729..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/readme.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
-# typedarray
-
-TypedArray polyfill ripped from [this
-module](https://raw.github.com/inexorabletash/polyfill).
-
-[![build status](https://secure.travis-ci.org/substack/typedarray.png)](http://travis-ci.org/substack/typedarray)
-
-[![testling badge](https://ci.testling.com/substack/typedarray.png)](https://ci.testling.com/substack/typedarray)
-
-# example
-
-``` js
-var Uint8Array = require('typedarray').Uint8Array;
-var ua = new Uint8Array(5);
-ua[1] = 256 + 55;
-console.log(ua[1]);
-```
-
-output:
-
-```
-55
-```
-
-# methods
-
-``` js
-var TA = require('typedarray')
-```
-
-The `TA` object has the following constructors:
-
-* TA.ArrayBuffer
-* TA.DataView
-* TA.Float32Array
-* TA.Float64Array
-* TA.Int8Array
-* TA.Int16Array
-* TA.Int32Array
-* TA.Uint8Array
-* TA.Uint8ClampedArray
-* TA.Uint16Array
-* TA.Uint32Array
-
-# install
-
-With [npm](https://npmjs.org) do:
-
-```
-npm install typedarray
-```
-
-To use this module in the browser, compile with
-[browserify](http://browserify.org)
-or download a UMD build from browserify CDN:
-
-http://wzrd.in/standalone/typedarray@latest
-
-# license
-
-MIT
diff --git a/node_modules/typedarray/test/server/undef_globals.js b/node_modules/typedarray/test/server/undef_globals.js
deleted file mode 100644
index 425950f9fc9ed7c09d78c749f27014cfdf4a84d3..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/test/server/undef_globals.js
+++ /dev/null
@@ -1,19 +0,0 @@
-var test = require('tape');
-var vm = require('vm');
-var fs = require('fs');
-var src = fs.readFileSync(__dirname + '/../../index.js', 'utf8');
-
-test('u8a without globals', function (t) {
- var c = {
- module: { exports: {} },
- };
- c.exports = c.module.exports;
- vm.runInNewContext(src, c);
- var TA = c.module.exports;
- var ua = new(TA.Uint8Array)(5);
-
- t.equal(ua.length, 5);
- ua[1] = 256 + 55;
- t.equal(ua[1], 55);
- t.end();
-});
diff --git a/node_modules/typedarray/test/tarray.js b/node_modules/typedarray/test/tarray.js
deleted file mode 100644
index df596a34f23c0ef931cd5b41139985b8d23e8e2f..0000000000000000000000000000000000000000
--- a/node_modules/typedarray/test/tarray.js
+++ /dev/null
@@ -1,10 +0,0 @@
-var TA = require('../');
-var test = require('tape');
-
-test('tiny u8a test', function (t) {
- var ua = new(TA.Uint8Array)(5);
- t.equal(ua.length, 5);
- ua[1] = 256 + 55;
- t.equal(ua[1], 55);
- t.end();
-});
diff --git a/node_modules/util-deprecate/History.md b/node_modules/util-deprecate/History.md
deleted file mode 100644
index acc8675372e980824723cfcfec09c0ba43a3195a..0000000000000000000000000000000000000000
--- a/node_modules/util-deprecate/History.md
+++ /dev/null
@@ -1,16 +0,0 @@
-
-1.0.2 / 2015-10-07
-==================
-
- * use try/catch when checking `localStorage` (#3, @kumavis)
-
-1.0.1 / 2014-11-25
-==================
-
- * browser: use `console.warn()` for deprecation calls
- * browser: more jsdocs
-
-1.0.0 / 2014-04-30
-==================
-
- * initial commit
diff --git a/node_modules/util-deprecate/LICENSE b/node_modules/util-deprecate/LICENSE
deleted file mode 100644
index 6a60e8c225c9baca25907f87c74b428e5d85de0c..0000000000000000000000000000000000000000
--- a/node_modules/util-deprecate/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2014 Nathan Rajlich
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
diff --git a/node_modules/util-deprecate/README.md b/node_modules/util-deprecate/README.md
deleted file mode 100644
index 75622fa7c250a6605f4778d9dffe97bf60291d17..0000000000000000000000000000000000000000
--- a/node_modules/util-deprecate/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-util-deprecate
-==============
-### The Node.js `util.deprecate()` function with browser support
-
-In Node.js, this module simply re-exports the `util.deprecate()` function.
-
-In the web browser (i.e. via browserify), a browser-specific implementation
-of the `util.deprecate()` function is used.
-
-
-## API
-
-A `deprecate()` function is the only thing exposed by this module.
-
-``` javascript
-// setup:
-exports.foo = deprecate(foo, 'foo() is deprecated, use bar() instead');
-
-
-// users see:
-foo();
-// foo() is deprecated, use bar() instead
-foo();
-foo();
-```
-
-
-## License
-
-(The MIT License)
-
-Copyright (c) 2014 Nathan Rajlich
-
-Permission is hereby granted, free of charge, to any person
-obtaining a copy of this software and associated documentation
-files (the "Software"), to deal in the Software without
-restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the
-Software is furnished to do so, subject to the following
-conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-OTHER DEALINGS IN THE SOFTWARE.
diff --git a/node_modules/util-deprecate/browser.js b/node_modules/util-deprecate/browser.js
deleted file mode 100644
index 549ae2f065ea5add2c4b3667e412a9d0e7d2b1af..0000000000000000000000000000000000000000
--- a/node_modules/util-deprecate/browser.js
+++ /dev/null
@@ -1,67 +0,0 @@
-
-/**
- * Module exports.
- */
-
-module.exports = deprecate;
-
-/**
- * Mark that a method should not be used.
- * Returns a modified function which warns once by default.
- *
- * If `localStorage.noDeprecation = true` is set, then it is a no-op.
- *
- * If `localStorage.throwDeprecation = true` is set, then deprecated functions
- * will throw an Error when invoked.
- *
- * If `localStorage.traceDeprecation = true` is set, then deprecated functions
- * will invoke `console.trace()` instead of `console.error()`.
- *
- * @param {Function} fn - the function to deprecate
- * @param {String} msg - the string to print to the console when `fn` is invoked
- * @returns {Function} a new "deprecated" version of `fn`
- * @api public
- */
-
-function deprecate (fn, msg) {
- if (config('noDeprecation')) {
- return fn;
- }
-
- var warned = false;
- function deprecated() {
- if (!warned) {
- if (config('throwDeprecation')) {
- throw new Error(msg);
- } else if (config('traceDeprecation')) {
- console.trace(msg);
- } else {
- console.warn(msg);
- }
- warned = true;
- }
- return fn.apply(this, arguments);
- }
-
- return deprecated;
-}
-
-/**
- * Checks `localStorage` for boolean values for the given `name`.
- *
- * @param {String} name
- * @returns {Boolean}
- * @api private
- */
-
-function config (name) {
- // accessing global.localStorage can trigger a DOMException in sandboxed iframes
- try {
- if (!global.localStorage) return false;
- } catch (_) {
- return false;
- }
- var val = global.localStorage[name];
- if (null == val) return false;
- return String(val).toLowerCase() === 'true';
-}
diff --git a/node_modules/util-deprecate/node.js b/node_modules/util-deprecate/node.js
deleted file mode 100644
index 5e6fcff5ddd3fbf8bdda6310c224114d30b7509e..0000000000000000000000000000000000000000
--- a/node_modules/util-deprecate/node.js
+++ /dev/null
@@ -1,6 +0,0 @@
-
-/**
- * For Node.js, simply re-export the core `util.deprecate` function.
- */
-
-module.exports = require('util').deprecate;
diff --git a/node_modules/util-deprecate/package.json b/node_modules/util-deprecate/package.json
deleted file mode 100644
index bb6a94352884ea00baceb33ba2cfa7dec156e6ee..0000000000000000000000000000000000000000
--- a/node_modules/util-deprecate/package.json
+++ /dev/null
@@ -1,88 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "util-deprecate",
- "raw": "util-deprecate@^1.0.1",
- "rawSpec": "^1.0.1",
- "scope": null,
- "spec": ">=1.0.1 <2.0.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/readable-stream"
- ]
- ],
- "_from": "util-deprecate@>=1.0.1 <2.0.0",
- "_id": "util-deprecate@1.0.2",
- "_inCache": true,
- "_installable": true,
- "_location": "/util-deprecate",
- "_nodeVersion": "4.1.2",
- "_npmUser": {
- "email": "nathan@tootallnate.net",
- "name": "tootallnate"
- },
- "_npmVersion": "2.14.4",
- "_phantomChildren": {},
- "_requested": {
- "name": "util-deprecate",
- "raw": "util-deprecate@^1.0.1",
- "rawSpec": "^1.0.1",
- "scope": null,
- "spec": ">=1.0.1 <2.0.0",
- "type": "range"
- },
- "_requiredBy": [
- "/readable-stream"
- ],
- "_resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
- "_shasum": "450d4dc9fa70de732762fbd2d4a28981419a0ccf",
- "_shrinkwrap": null,
- "_spec": "util-deprecate@^1.0.1",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/readable-stream",
- "author": {
- "email": "nathan@tootallnate.net",
- "name": "Nathan Rajlich",
- "url": "http://n8.io/"
- },
- "browser": "browser.js",
- "bugs": {
- "url": "https://github.com/TooTallNate/util-deprecate/issues"
- },
- "dependencies": {},
- "description": "The Node.js `util.deprecate()` function with browser support",
- "devDependencies": {},
- "directories": {},
- "dist": {
- "shasum": "450d4dc9fa70de732762fbd2d4a28981419a0ccf",
- "tarball": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz"
- },
- "gitHead": "475fb6857cd23fafff20c1be846c1350abf8e6d4",
- "homepage": "https://github.com/TooTallNate/util-deprecate",
- "keywords": [
- "util",
- "deprecate",
- "browserify",
- "browser",
- "node"
- ],
- "license": "MIT",
- "main": "node.js",
- "maintainers": [
- {
- "email": "nathan@tootallnate.net",
- "name": "tootallnate"
- }
- ],
- "name": "util-deprecate",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/TooTallNate/util-deprecate.git"
- },
- "scripts": {
- "test": "echo \"Error: no test specified\" && exit 1"
- },
- "version": "1.0.2"
-}
diff --git a/node_modules/xtend/.jshintrc b/node_modules/xtend/.jshintrc
deleted file mode 100644
index 77887b5f0f2efc24bd55430cb6f95f8a0cad89d8..0000000000000000000000000000000000000000
--- a/node_modules/xtend/.jshintrc
+++ /dev/null
@@ -1,30 +0,0 @@
-{
- "maxdepth": 4,
- "maxstatements": 200,
- "maxcomplexity": 12,
- "maxlen": 80,
- "maxparams": 5,
-
- "curly": true,
- "eqeqeq": true,
- "immed": true,
- "latedef": false,
- "noarg": true,
- "noempty": true,
- "nonew": true,
- "undef": true,
- "unused": "vars",
- "trailing": true,
-
- "quotmark": true,
- "expr": true,
- "asi": true,
-
- "browser": false,
- "esnext": true,
- "devel": false,
- "node": false,
- "nonstandard": false,
-
- "predef": ["require", "module", "__dirname", "__filename"]
-}
diff --git a/node_modules/xtend/LICENSE b/node_modules/xtend/LICENSE
deleted file mode 100644
index 0099f4f6c77f40ac409076408ad07449ffe246d3..0000000000000000000000000000000000000000
--- a/node_modules/xtend/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-Copyright (c) 2012-2014 Raynos.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/node_modules/xtend/README.md b/node_modules/xtend/README.md
deleted file mode 100644
index 4a2703cff276b155e89c3abb7b09fbdfe90273f4..0000000000000000000000000000000000000000
--- a/node_modules/xtend/README.md
+++ /dev/null
@@ -1,32 +0,0 @@
-# xtend
-
-[![browser support][3]][4]
-
-[![locked](http://badges.github.io/stability-badges/dist/locked.svg)](http://github.com/badges/stability-badges)
-
-Extend like a boss
-
-xtend is a basic utility library which allows you to extend an object by appending all of the properties from each object in a list. When there are identical properties, the right-most property takes precedence.
-
-## Examples
-
-```js
-var extend = require("xtend")
-
-// extend returns a new object. Does not mutate arguments
-var combination = extend({
- a: "a",
- b: "c"
-}, {
- b: "b"
-})
-// { a: "a", b: "b" }
-```
-
-## Stability status: Locked
-
-## MIT Licensed
-
-
- [3]: http://ci.testling.com/Raynos/xtend.png
- [4]: http://ci.testling.com/Raynos/xtend
diff --git a/node_modules/xtend/immutable.js b/node_modules/xtend/immutable.js
deleted file mode 100644
index 94889c9de11a181cec153de1713c8ae14ae4cb43..0000000000000000000000000000000000000000
--- a/node_modules/xtend/immutable.js
+++ /dev/null
@@ -1,19 +0,0 @@
-module.exports = extend
-
-var hasOwnProperty = Object.prototype.hasOwnProperty;
-
-function extend() {
- var target = {}
-
- for (var i = 0; i < arguments.length; i++) {
- var source = arguments[i]
-
- for (var key in source) {
- if (hasOwnProperty.call(source, key)) {
- target[key] = source[key]
- }
- }
- }
-
- return target
-}
diff --git a/node_modules/xtend/mutable.js b/node_modules/xtend/mutable.js
deleted file mode 100644
index 72debede6ca58592fe93b5ab22d434311a76861b..0000000000000000000000000000000000000000
--- a/node_modules/xtend/mutable.js
+++ /dev/null
@@ -1,17 +0,0 @@
-module.exports = extend
-
-var hasOwnProperty = Object.prototype.hasOwnProperty;
-
-function extend(target) {
- for (var i = 1; i < arguments.length; i++) {
- var source = arguments[i]
-
- for (var key in source) {
- if (hasOwnProperty.call(source, key)) {
- target[key] = source[key]
- }
- }
- }
-
- return target
-}
diff --git a/node_modules/xtend/package.json b/node_modules/xtend/package.json
deleted file mode 100644
index cf5b3673ebb7d360be99b72fa8d32e1a1d3ed78e..0000000000000000000000000000000000000000
--- a/node_modules/xtend/package.json
+++ /dev/null
@@ -1,125 +0,0 @@
-{
- "_args": [
- [
- {
- "name": "xtend",
- "raw": "xtend@~4.0.1",
- "rawSpec": "~4.0.1",
- "scope": null,
- "spec": ">=4.0.1 <4.1.0",
- "type": "range"
- },
- "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2"
- ]
- ],
- "_from": "xtend@>=4.0.1 <4.1.0",
- "_hasShrinkwrap": false,
- "_id": "xtend@4.0.2",
- "_inCache": true,
- "_installable": true,
- "_location": "/xtend",
- "_nodeVersion": "10.15.3",
- "_npmOperationalInternal": {
- "host": "s3://npm-registry-packages",
- "tmp": "tmp/xtend_4.0.2_1562592945262_0.1667332210531911"
- },
- "_npmUser": {
- "email": "raynos2@gmail.com",
- "name": "raynos"
- },
- "_npmVersion": "6.9.2",
- "_phantomChildren": {},
- "_requested": {
- "name": "xtend",
- "raw": "xtend@~4.0.1",
- "rawSpec": "~4.0.1",
- "scope": null,
- "spec": ">=4.0.1 <4.1.0",
- "type": "range"
- },
- "_requiredBy": [
- "/through2"
- ],
- "_resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
- "_shasum": "bb72779f5fa465186b1f438f674fa347fdb5db54",
- "_shrinkwrap": null,
- "_spec": "xtend@~4.0.1",
- "_where": "/Users/xxm/Documents/gitlab/codechina-docs/node_modules/through2",
- "author": {
- "email": "raynos2@gmail.com",
- "name": "Raynos"
- },
- "bugs": {
- "email": "raynos2@gmail.com",
- "url": "https://github.com/Raynos/xtend/issues"
- },
- "contributors": [
- {
- "name": "Jake Verbaten"
- },
- {
- "name": "Matt Esch"
- }
- ],
- "dependencies": {},
- "description": "extend like a boss",
- "devDependencies": {
- "tape": "~1.1.0"
- },
- "directories": {},
- "dist": {
- "fileCount": 7,
- "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
- "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJdI0ayCRA9TVsSAnZWagAAx5sP/0GFGMoeIKaOsNkW4VqT\npbchFQOM3JuA3y4h1pL0kHhRETKAN9aTU1eBv9VleXcCI5kbhtCQONZWCBGH\n4SV9SUFTE+Com3Yo1X1F00Fc8UEx1JtbSf82DLvjUy3dW4I7nGN6o001/lO1\nMjBB5fi4xrP/YPMuVbXhcB2WrgZ6X2VeSSAHnhkfxwfOAVq+shXzYvmRvFnN\nacFhuKRbQx24fQQPZrRY0FyCc797AJZhNJmrq3CRNbpkJ32TrKIjYibB6xm5\nE7HGTPKxF4j0WRlcRLOrhAIUXLV/kg7l5/YGCGjgzjhqGyoIId1Tn4tNinYb\naUcYalwfE8a4w6WdJp+rtNjftK2sjju5cqKbyg2UkelihMUlEhbeO+MfHgVa\nuXrrMnD55o/zMLnNfh31N2zEWAFbo5O7bdgD066zERKxMcvzOU6e/NgEYN3I\n0IJHNmYTb6sjFSC3+dsoLdyoHyda8wtnmuD2jiRLALMb22SdJG7n2DOJV6jc\n2nqTOn31vz9uB3WT5eLadpmNzYx7x/0cs5So2JGuBVMYYc+jmeqLo/6ZdG7w\nNi/hw8ZXh2RTsWPlXDcO99jQqzHg62J09h+Mci0GSVkl/fnHbx3Ho/jG7E5z\nwxbryfQHlY5haAOEATFROEQlXvAs3NIXCp1EG1fyqjyQU2MU1rZsPwycGF5Q\nff8r\r\n=1Too\r\n-----END PGP SIGNATURE-----\r\n",
- "shasum": "bb72779f5fa465186b1f438f674fa347fdb5db54",
- "tarball": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
- "unpackedSize": 6465
- },
- "engines": {
- "node": ">=0.4"
- },
- "gitHead": "37816c0e2e25da2901d584442235946d5cd8c80d",
- "homepage": "https://github.com/Raynos/xtend",
- "keywords": [
- "extend",
- "merge",
- "options",
- "opts",
- "object",
- "array"
- ],
- "license": "MIT",
- "main": "immutable",
- "maintainers": [
- {
- "email": "raynos2@gmail.com",
- "name": "raynos"
- }
- ],
- "name": "xtend",
- "optionalDependencies": {},
- "readme": "ERROR: No README data found!",
- "repository": {
- "type": "git",
- "url": "git://github.com/Raynos/xtend.git"
- },
- "scripts": {
- "test": "node test"
- },
- "testling": {
- "browsers": [
- "ie/7..latest",
- "firefox/16..latest",
- "firefox/nightly",
- "chrome/22..latest",
- "chrome/canary",
- "opera/12..latest",
- "opera/next",
- "safari/5.1..latest",
- "ipad/6.0..latest",
- "iphone/6.0..latest"
- ],
- "files": "test.js"
- },
- "version": "4.0.2"
-}
diff --git a/node_modules/xtend/test.js b/node_modules/xtend/test.js
deleted file mode 100644
index b895b42b3f76804de7ee2ef0231a3343b2f461b3..0000000000000000000000000000000000000000
--- a/node_modules/xtend/test.js
+++ /dev/null
@@ -1,103 +0,0 @@
-var test = require("tape")
-var extend = require("./")
-var mutableExtend = require("./mutable")
-
-test("merge", function(assert) {
- var a = { a: "foo" }
- var b = { b: "bar" }
-
- assert.deepEqual(extend(a, b), { a: "foo", b: "bar" })
- assert.end()
-})
-
-test("replace", function(assert) {
- var a = { a: "foo" }
- var b = { a: "bar" }
-
- assert.deepEqual(extend(a, b), { a: "bar" })
- assert.end()
-})
-
-test("undefined", function(assert) {
- var a = { a: undefined }
- var b = { b: "foo" }
-
- assert.deepEqual(extend(a, b), { a: undefined, b: "foo" })
- assert.deepEqual(extend(b, a), { a: undefined, b: "foo" })
- assert.end()
-})
-
-test("handle 0", function(assert) {
- var a = { a: "default" }
- var b = { a: 0 }
-
- assert.deepEqual(extend(a, b), { a: 0 })
- assert.deepEqual(extend(b, a), { a: "default" })
- assert.end()
-})
-
-test("is immutable", function (assert) {
- var record = {}
-
- extend(record, { foo: "bar" })
- assert.equal(record.foo, undefined)
- assert.end()
-})
-
-test("null as argument", function (assert) {
- var a = { foo: "bar" }
- var b = null
- var c = void 0
-
- assert.deepEqual(extend(b, a, c), { foo: "bar" })
- assert.end()
-})
-
-test("mutable", function (assert) {
- var a = { foo: "bar" }
-
- mutableExtend(a, { bar: "baz" })
-
- assert.equal(a.bar, "baz")
- assert.end()
-})
-
-test("null prototype", function(assert) {
- var a = { a: "foo" }
- var b = Object.create(null)
- b.b = "bar";
-
- assert.deepEqual(extend(a, b), { a: "foo", b: "bar" })
- assert.end()
-})
-
-test("null prototype mutable", function (assert) {
- var a = { foo: "bar" }
- var b = Object.create(null)
- b.bar = "baz";
-
- mutableExtend(a, b)
-
- assert.equal(a.bar, "baz")
- assert.end()
-})
-
-test("prototype pollution", function (assert) {
- var a = {}
- var maliciousPayload = '{"__proto__":{"oops":"It works!"}}'
-
- assert.strictEqual(a.oops, undefined)
- extend({}, maliciousPayload)
- assert.strictEqual(a.oops, undefined)
- assert.end()
-})
-
-test("prototype pollution mutable", function (assert) {
- var a = {}
- var maliciousPayload = '{"__proto__":{"oops":"It works!"}}'
-
- assert.strictEqual(a.oops, undefined)
- mutableExtend({}, maliciousPayload)
- assert.strictEqual(a.oops, undefined)
- assert.end()
-})