//<script type="text/javascript">
-//imports['Object.js'].load(Object);
-XObject = imports.XObject.XObject;
-console = imports['console.js'].console;
+
+const XObject = imports.XObject.XObject;
+const console = imports.console.console;
-JSDOC = imports['JSDOC.js'].JSDOC;
-Token = imports['JSDOC/Token.js'].Token;
-Lang = imports['JSDOC/Lang.js'].Lang;
+
+const Token = imports.Token.Token;
+const Lang = imports.Lang.Lang;
/**
@class Search a {@link JSDOC.TextStream} for language tokens.
*/
-TokenReader = XObject.define(
+const TokenReader = XObject.define(
function(o) {
- this.keepDocs = true;
- this.keepWhite = false;
- this.keepComments = false;
- this.sepIdents = false; // seperate '.' in identifiers..
XObject.extend(this, o || {});
},
Object,
{
-
-
+ /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
+ collapseWhite : false, // only reduces white space...
+ /** @cfg {Boolean} keepDocs keep JSDOC comments **/
+ keepDocs : true,
+ /** @cfg {Boolean} keepWhite keep White space **/
+ keepWhite : false,
+ /** @cfg {Boolean} keepComments keep all comments **/
+ keepComments : false,
+ /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
+ sepIdents : false,
+ /** @cfg {String} filename name of file being parsed. **/
+ filename : '',
+ /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
+ ignoreBadGrammer : false,
/**
- @type {JSDOC.Token[]}
+ * tokenize a stream
+ * @return {Array} of tokens
+ *
+ * ts = new TextStream(File.read(str));
+ * tr = TokenReader({ keepComments : true, keepWhite : true });
+ * tr.tokenize(ts)
+ *
*/
-
-
tokenize : function(/**JSDOC.TextStream*/stream) {
+ this.line =1;
var tokens = [];
- /**@ignore*/ tokens.last = function() { return tokens[tokens.length-1]; }
- /**@ignore*/ tokens.lastSym = function() {
+ /**@ignore*/
+ tokens.last = function() { return tokens[tokens.length-1]; }
+ /**@ignore*/
+ tokens.lastSym = function() {
for (var i = tokens.length-1; i >= 0; i--) {
if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
}
+ return true;
}
while (!stream.look().eof) {
if (this.read_word(stream, tokens)) continue;
// if execution reaches here then an error has happened
- tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN"));
+ tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
}
return tokens;
},
+ /**
+ * findPuncToken - find the id of a token (previous to current)
+ * need to back check syntax..
+ *
+ * @arg {Array} tokens the array of tokens.
+ * @arg {String} token data (eg. '(')
+ * @arg {Number} offset where to start reading from
+ * @return {Number} position of token
+ */
+ findPuncToken : function(tokens, data, n) {
+ n = n || tokens.length -1;
+ var stack = 0;
+ while (n > -1) {
+
+ if (!stack && tokens[n].data == data) {
+ return n;
+ }
+
+ if (tokens[n].data == ')' || tokens[n].data == '}') {
+ stack++;
+ n--;
+ continue;
+ }
+ if (stack && (tokens[n].data == '{' || tokens[n].data == '(')) {
+ stack--;
+ n--;
+ continue;
+ }
+
+
+ n--;
+ }
+ return -1;
+ },
+ /**
+ * lastSym - find the last token symbol
+ * need to back check syntax..
+ *
+ * @arg {Array} tokens the array of tokens.
+ * @arg {Number} offset where to start..
+ * @return {Token} the token
+ */
+ lastSym : function(tokens, n) {
+ for (var i = n-1; i >= 0; i--) {
+ if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
+ }
+ return null;
+ },
+
+
+
/**
@returns {Boolean} Was the token found?
*/
if (found === "") {
return false;
}
- else {
- var name;
- if ((name = Lang.keyword(found))) tokens.push(new Token(found, "KEYW", name));
- else tokens.push(new Token(found, "NAME", "NAME"));
+
+ var name;
+ if ((name = Lang.keyword(found))) {
+ if (found == 'return' && tokens.lastSym().data == ')') {
+ //Seed.print('@' + tokens.length);
+ var n = this.findPuncToken(tokens, ')');
+ //Seed.print(')@' + n);
+ n = this.findPuncToken(tokens, '(', n-1);
+ //Seed.print('(@' + n);
+
+ var lt = this.lastSym(tokens, n);
+ print(JSON.stringify(lt));
+ if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
+ if (!this.ignoreBadGrammer) {
+ throw {
+ name : "ArgumentError",
+ message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
+ }
+ }
+ }
+
+
+
+ }
+
+ tokens.push(new Token(found, "KEYW", name, this.line));
+ return true;
+ }
+ if (!this.sepIdents || found.indexOf('.') < 0 ) {
+ tokens.push(new Token(found, "NAME", "NAME", this.line));
return true;
}
+ var n = found.split('.');
+ var p = false;
+ var _this = this;
+ n.forEach(function(nm) {
+ if (p) {
+ tokens.push(new Token('.', "PUNC", "DOT", _this.line));
+ }
+ p=true;
+ tokens.push(new Token(nm, "NAME", "NAME", _this.line));
+ });
+ return true;
+
+
},
/**
found += stream.next();
}
+
if (found === "") {
return false;
}
- else {
- tokens.push(new Token(found, "PUNC", Lang.punc(found)));
- return true;
+
+ if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
+ //print("Error - comma found before " + found);
+ //print(JSON.stringify(tokens.lastSym(), null,4));
+ if (this.ignoreBadGrammer) {
+ print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
+ } else {
+
+ throw {
+ name : "ArgumentError",
+ message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
+ }
+ }
}
+
+ tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
+ return true;
+
},
/**
read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
var found = "";
- while (!stream.look().eof && Lang.isSpace(stream.look())) {
+ while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
found += stream.next();
}
if (found === "") {
return false;
}
- else {
- if (this.collapseWhite) found = " ";
- if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE"));
- return true;
- }
+ //print("WHITE = " + JSON.stringify(found));
+ if (this.collapseWhite) found = " ";
+ if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+ return true;
+
},
/**
*/
read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
var found = "";
-
+ var line = this.line;
while (!stream.look().eof && Lang.isNewline(stream.look())) {
+ this.line++;
found += stream.next();
}
if (found === "") {
return false;
}
- else {
- if (this.collapseWhite) found = "\n";
- if (this.keepWhite) tokens.push(new Token(found, "WHIT", "NEWLINE"));
- return true;
+ //this.line++;
+ if (this.collapseWhite) {
+ found = "\n";
+ }
+ if (this.keepWhite) {
+ console.log('got here?');
+ var last = tokens.pop();
+ if (last && last.name != "WHIT") {
+ tokens.push(last);
+ }
+
+ tokens.push(new Token(found, "WHIT", "NEWLINE", line));
}
+ return true;
},
/**
read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
if (stream.look() == "/" && stream.look(1) == "*") {
var found = stream.next(2);
-
+ var c = '';
+ var line = this.line;
while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
- found += stream.next();
+ c = stream.next();
+ if (c == "\n") this.line++;
+ found += c;
}
// to start doclet we allow /** or /*** but not /**/ or /****
- if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC"));
- else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM"));
+ if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
+ else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
return true;
}
return false;
||
(stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
) {
-
+ var line = this.line;
while (!stream.look().eof && !Lang.isNewline(stream.look())) {
found += stream.next();
}
-
+ if (!stream.look().eof) {
+ found += stream.next();
+ }
if (this.keepComments) {
- tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM"));
+ tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
}
+ this.line++;
return true;
}
return false;
}
else if (stream.look() == "\"") {
string += stream.next();
- tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE"));
+ tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
return true;
}
else {
}
else if (stream.look() == "'") {
string += stream.next();
- tokens.push(new Token(string, "STRN", "SINGLE_QUOTE"));
+ tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
return true;
}
else {
return false;
}
else {
- if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL"));
- else tokens.push(new Token(found, "NUMB", "DECIMAL"));
+ if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
+ else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
return true;
}
},
while (!stream.look().eof) {
if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
- tokens.push(new Token(found, "NUMB", "HEX_DEC"));
+ tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
return true;
}
else {
regex += stream.next();
}
- tokens.push(new Token(regex, "REGX", "REGX"));
+ tokens.push(new Token(regex, "REGX", "REGX", this.line));
return true;
}
else {