Last active
August 29, 2015 14:10
-
-
Save mzgoddard/2515e4e1863f2c2ef89a to your computer and use it in GitHub Desktop.
Dummy source node map, with reduction over non-word tokens.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
SourceMapConcatHelper.prototype._dummyNode = function(src, name) { | |
var node = new SourceNode(); | |
var lineIndex = 1; | |
var charIndex = 0; | |
// Tokenize on words, new lines, and white space. | |
var tokens = src.split(/(\n|\b)/g); | |
// Filter out empty strings. | |
tokens = tokens.filter(function(t) { return !!t; }); | |
// Reduce whitespace tokens together. | |
tokens = tokens.reduce(function(dest, t, index, src) { | |
var lastTokenIsNotWord = | |
dest.length && /^\W+$/.test(dest[dest.length - 1]); | |
var tokenIsNotWord = /^\W+$/.test(t); | |
var nextTokenIsNotWord = /^\W+$/.test(src[index + 1]); | |
var secondLastTokenIsNotWord = | |
dest.length >= 2 && /^\W+$/.test(dest[dest.length - 2]); | |
if ( | |
lastTokenIsNotWord && tokenIsNotWord && ( | |
t !== '\n' && nextTokenIsNotWord || | |
t === '\n' && secondLastTokenIsNotWord | |
) | |
) { | |
dest[dest.length - 1] += t; | |
} else { | |
dest.push(t); | |
} | |
return dest; | |
}, []); | |
tokens.forEach(function(token) { | |
node.add(new SourceNode(lineIndex, charIndex, name, token)); | |
if (token[token.length - 1] === '\n') { | |
lineIndex++; | |
charIndex = 0; | |
} else { | |
charIndex += token.length; | |
} | |
}); | |
return node; | |
}; |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
SourceMapConcatHelper.prototype._dummyNode = function(src, name) { | |
var node = new SourceNode(); | |
var lineIndex = 1; | |
var charIndex = 0; | |
// Tokenize on words, new lines, and white space. | |
var tokens = src.split(/(\n|[^\S\n]+|\b)/g); | |
// Filter out empty strings. | |
tokens = tokens.filter(function(t) { return !!t; }); | |
tokens.forEach(function(token) { | |
// If the token contains a non-whitespace or newline character, add it. | |
if (/[\S\n]/.test(token)) { | |
node.add(new SourceNode(lineIndex, charIndex, name, token)); | |
} | |
if (token[token.length - 1] === '\n') { | |
lineIndex++; | |
charIndex = 0; | |
} else { | |
charIndex += token.length; | |
} | |
}); | |
return node; | |
}; |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment