1 //<script type="text/javascript">
6 //const Token = imports.Token.Token;
7 //const Lang = imports.Lang.Lang;
10 @class Search a {@link JSDOC.TextStream} for language tokens.
15 public class TokenArray: Object {
17 public Gee.ArrayList<Token> tokens;
19 get { return this.tokens.size }
24 this.items = new Gee.ArrayList<Token>();
27 public Token? last() {
28 if (this.tokens > 0) {
29 return this.tokens[this.tokens.length-1];
33 public Token? lastSym () {
34 for (var i = this.tokens.length-1; i >= 0; i--) {
35 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM"))) {
36 return this.tokens.get(i);
41 public void push (Token t) {
44 public Token get(int i) {
45 return this.tokens.get(i);
50 public class TokenReader : Object
57 * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
60 /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
61 public bool collapseWhite = false, // only reduces white space...
62 /** @cfg {Boolean} keepDocs keep JSDOC comments **/
63 public bool keepDocs = true,
64 /** @cfg {Boolean} keepWhite keep White space **/
65 public bool keepWhite = false,
66 /** @cfg {Boolean} keepComments keep all comments **/
67 public bool keepComments = false,
68 /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
69 public bool sepIdents = false,
70 /** @cfg {String} filename name of file being parsed. **/
71 public string filename = "";
72 /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
73 public bool ignoreBadGrammer = false,
80 * @return {Array} of tokens
82 * ts = new TextStream(File.read(str));
83 * tr = TokenReader({ keepComments : true, keepWhite : true });
87 public TokenArray tokenize(TextStream stream)
90 var tokens = new TokenArray();
95 stream.look(0, out eof)
99 if (this.read_mlcomment(stream, tokens)) continue;
100 if (this.read_slcomment(stream, tokens)) continue;
101 if (this.read_dbquote(stream, tokens)) continue;
102 if (this.read_snquote(stream, tokens)) continue;
103 if (this.read_regx(stream, tokens)) continue;
104 if (this.read_numb(stream, tokens)) continue;
105 if (this.read_punc(stream, tokens)) continue;
106 if (this.read_newline(stream, tokens)) continue;
107 if (this.read_space(stream, tokens)) continue;
108 if (this.read_word(stream, tokens)) continue;
110 // if execution reaches here then an error has happened
112 new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
122 * findPuncToken - find the id of a token (previous to current)
123 * need to back check syntax..
125 * @arg {Array} tokens the array of tokens.
126 * @arg {String} token data (eg. '(')
127 * @arg {Number} offset where to start reading from
128 * @return {Number} position of token
130 public int findPuncToken(TokenArray tokens, string data, int n) {
131 n = n || tokens.length -1;
135 if (!stack && tokens.get(n).data == data) {
139 if (tokens.get(n).data == ')' || tokens.get(n).data == '}') {
144 if (stack && (tokens.get(n).data == '{' || tokens.get(n).data == '(')) {
156 * lastSym - find the last token symbol
157 * need to back check syntax..
159 * @arg {Array} tokens the array of tokens.
160 * @arg {Number} offset where to start..
161 * @return {Token} the token
163 public Token lastSym(TokenArray tokens, int n) {
164 for (var i = n-1; i >= 0; i--) {
165 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
166 return tokens.get(i);
175 @returns {Boolean} Was the token found?
177 read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
179 while (!stream.look().eof && Lang.isWordChar(stream.look())) {
180 found += stream.next();
188 if ((name = Lang.keyword(found))) {
189 if (found == 'return' && tokens.lastSym().data == ')') {
190 //Seed.print('@' + tokens.length);
191 var n = this.findPuncToken(tokens, ')');
192 //Seed.print(')@' + n);
193 n = this.findPuncToken(tokens, '(', n-1);
194 //Seed.print('(@' + n);
196 var lt = this.lastSym(tokens, n);
197 print(JSON.stringify(lt));
198 if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
199 if (!this.ignoreBadGrammer) {
201 name : "ArgumentError",
202 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
211 tokens.push(new Token(found, "KEYW", name, this.line));
214 if (!this.sepIdents || found.indexOf('.') < 0 ) {
215 tokens.push(new Token(found, "NAME", "NAME", this.line));
218 var n = found.split('.');
221 n.forEach(function(nm) {
223 tokens.push(new Token('.', "PUNC", "DOT", _this.line));
226 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
234 @returns {Boolean} Was the token found?
236 read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
239 while (!stream.look().eof && Lang.punc(found+stream.look())) {
240 found += stream.next();
248 if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
249 //print("Error - comma found before " + found);
250 //print(JSON.stringify(tokens.lastSym(), null,4));
251 if (this.ignoreBadGrammer) {
252 print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
256 name : "ArgumentError",
257 message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
262 tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
268 @returns {Boolean} Was the token found?
270 read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
273 while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
274 found += stream.next();
280 //print("WHITE = " + JSON.stringify(found));
281 if (this.collapseWhite) found = " ";
282 if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
288 @returns {Boolean} Was the token found?
290 read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
292 var line = this.line;
293 while (!stream.look().eof && Lang.isNewline(stream.look())) {
295 found += stream.next();
302 if (this.collapseWhite) {
305 if (this.keepWhite) {
306 var last = tokens ? tokens.pop() : false;
307 if (last && last.name != "WHIT") {
311 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
317 @returns {Boolean} Was the token found?
319 read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
320 if (stream.look() == "/" && stream.look(1) == "*") {
321 var found = stream.next(2);
323 var line = this.line;
324 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
326 if (c == "\n") this.line++;
330 // to start doclet we allow /** or /*** but not /**/ or /****
331 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
332 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
339 @returns {Boolean} Was the token found?
341 read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
344 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
346 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
348 var line = this.line;
349 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
350 found += stream.next();
352 if (!stream.look().eof) {
353 found += stream.next();
355 if (this.keepComments) {
356 tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
365 @returns {Boolean} Was the token found?
367 read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
368 if (stream.look() == "\"") {
370 var string = stream.next();
372 while (!stream.look().eof) {
373 if (stream.look() == "\\") {
374 if (Lang.isNewline(stream.look(1))) {
377 } while (!stream.look().eof && Lang.isNewline(stream.look()));
381 string += stream.next(2);
384 else if (stream.look() == "\"") {
385 string += stream.next();
386 tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
390 string += stream.next();
394 return false; // error! unterminated string
398 @returns {Boolean} Was the token found?
400 read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
401 if (stream.look() == "'") {
403 var string = stream.next();
405 while (!stream.look().eof) {
406 if (stream.look() == "\\") { // escape sequence
407 string += stream.next(2);
409 else if (stream.look() == "'") {
410 string += stream.next();
411 tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
415 string += stream.next();
419 return false; // error! unterminated string
423 @returns {Boolean} Was the token found?
425 read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
426 if (stream.look() === "0" && stream.look(1) == "x") {
427 return this.read_hex(stream, tokens);
432 while (!stream.look().eof && Lang.isNumber(found+stream.look())){
433 found += stream.next();
440 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
441 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
446 requires("../lib/JSDOC/TextStream.js");
447 requires("../lib/JSDOC/Token.js");
448 requires("../lib/JSDOC/Lang.js");
450 plan(3, "testing read_numb");
453 var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
454 var tr = new TokenReader();
455 var tokens = tr.tokenize(new TextStream(src));
457 var hexToken, octToken, decToken;
458 for (var i = 0; i < tokens.length; i++) {
459 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
460 if (tokens[i].name == "OCTAL") octToken = tokens[i];
461 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
465 is(decToken.data, "8.0", "decimal number is found in source.");
466 is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
467 is(octToken.data, "0777", "octal number is found in source.");
471 @returns {Boolean} Was the token found?
473 read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
474 var found = stream.next(2);
476 while (!stream.look().eof) {
477 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
478 tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
482 found += stream.next();
489 @returns {Boolean} Was the token found?
491 read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
499 !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
504 && !last.is("RIGHT_PAREN")
505 && !last.is("RIGHT_BRACKET")
510 var regex = stream.next();
512 while (!stream.look().eof) {
513 if (stream.look() == "\\") { // escape sequence
514 regex += stream.next(2);
516 else if (stream.look() == "/") {
517 regex += stream.next();
519 while (/[gmi]/.test(stream.look())) {
520 regex += stream.next();
523 tokens.push(new Token(regex, "REGX", "REGX", this.line));
527 regex += stream.next();
530 // error: unterminated regex