1 //<script type="text/javascript">
6 //const Token = imports.Token.Token;
7 //const Lang = imports.Lang.Lang;
10 @class Search a {@link JSDOC.TextStream} for language tokens.
15 public class TokenArray: Object {
17 Gee.ArrayList<Token> tokens;
21 this.items = new Gee.ArrayList<Token>();
24 public Token? last() {
25 if (this.tokens > 0) {
26 return this.tokens[this.tokens.length-1];
30 public Token? lastSym = function() {
31 for (var i = this.tokens.length-1; i >= 0; i--) {
32 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM"))) {
33 return this.tokens.get(i);
41 public class TokenReader : Object
48 * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
51 /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
52 public bool collapseWhite = false, // only reduces white space...
53 /** @cfg {Boolean} keepDocs keep JSDOC comments **/
54 public bool keepDocs = true,
55 /** @cfg {Boolean} keepWhite keep White space **/
56 public bool keepWhite = false,
57 /** @cfg {Boolean} keepComments keep all comments **/
58 public bool keepComments = false,
59 /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
60 public bool sepIdents = false,
61 /** @cfg {String} filename name of file being parsed. **/
62 public string filename = "";
63 /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
64 public bool ignoreBadGrammer = false,
71 * @return {Array} of tokens
73 * ts = new TextStream(File.read(str));
74 * tr = TokenReader({ keepComments : true, keepWhite : true });
78 public TokenArray tokenize(TextStream stream)
81 var tokens = new TokenArray();
86 stream.look(0, out eof)
90 if (this.read_mlcomment(stream, tokens)) continue;
91 if (this.read_slcomment(stream, tokens)) continue;
92 if (this.read_dbquote(stream, tokens)) continue;
93 if (this.read_snquote(stream, tokens)) continue;
94 if (this.read_regx(stream, tokens)) continue;
95 if (this.read_numb(stream, tokens)) continue;
96 if (this.read_punc(stream, tokens)) continue;
97 if (this.read_newline(stream, tokens)) continue;
98 if (this.read_space(stream, tokens)) continue;
99 if (this.read_word(stream, tokens)) continue;
101 // if execution reaches here then an error has happened
102 tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
111 * findPuncToken - find the id of a token (previous to current)
112 * need to back check syntax..
114 * @arg {Array} tokens the array of tokens.
115 * @arg {String} token data (eg. '(')
116 * @arg {Number} offset where to start reading from
117 * @return {Number} position of token
119 findPuncToken : function(tokens, data, n) {
120 n = n || tokens.length -1;
124 if (!stack && tokens[n].data == data) {
128 if (tokens[n].data == ')' || tokens[n].data == '}') {
133 if (stack && (tokens[n].data == '{' || tokens[n].data == '(')) {
145 * lastSym - find the last token symbol
146 * need to back check syntax..
148 * @arg {Array} tokens the array of tokens.
149 * @arg {Number} offset where to start..
150 * @return {Token} the token
152 lastSym : function(tokens, n) {
153 for (var i = n-1; i >= 0; i--) {
154 if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
162 @returns {Boolean} Was the token found?
164 read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
166 while (!stream.look().eof && Lang.isWordChar(stream.look())) {
167 found += stream.next();
175 if ((name = Lang.keyword(found))) {
176 if (found == 'return' && tokens.lastSym().data == ')') {
177 //Seed.print('@' + tokens.length);
178 var n = this.findPuncToken(tokens, ')');
179 //Seed.print(')@' + n);
180 n = this.findPuncToken(tokens, '(', n-1);
181 //Seed.print('(@' + n);
183 var lt = this.lastSym(tokens, n);
184 print(JSON.stringify(lt));
185 if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
186 if (!this.ignoreBadGrammer) {
188 name : "ArgumentError",
189 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
198 tokens.push(new Token(found, "KEYW", name, this.line));
201 if (!this.sepIdents || found.indexOf('.') < 0 ) {
202 tokens.push(new Token(found, "NAME", "NAME", this.line));
205 var n = found.split('.');
208 n.forEach(function(nm) {
210 tokens.push(new Token('.', "PUNC", "DOT", _this.line));
213 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
221 @returns {Boolean} Was the token found?
223 read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
226 while (!stream.look().eof && Lang.punc(found+stream.look())) {
227 found += stream.next();
235 if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
236 //print("Error - comma found before " + found);
237 //print(JSON.stringify(tokens.lastSym(), null,4));
238 if (this.ignoreBadGrammer) {
239 print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
243 name : "ArgumentError",
244 message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
249 tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
255 @returns {Boolean} Was the token found?
257 read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
260 while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
261 found += stream.next();
267 //print("WHITE = " + JSON.stringify(found));
268 if (this.collapseWhite) found = " ";
269 if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
275 @returns {Boolean} Was the token found?
277 read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
279 var line = this.line;
280 while (!stream.look().eof && Lang.isNewline(stream.look())) {
282 found += stream.next();
289 if (this.collapseWhite) {
292 if (this.keepWhite) {
293 var last = tokens ? tokens.pop() : false;
294 if (last && last.name != "WHIT") {
298 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
304 @returns {Boolean} Was the token found?
306 read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
307 if (stream.look() == "/" && stream.look(1) == "*") {
308 var found = stream.next(2);
310 var line = this.line;
311 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
313 if (c == "\n") this.line++;
317 // to start doclet we allow /** or /*** but not /**/ or /****
318 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
319 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
326 @returns {Boolean} Was the token found?
328 read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
331 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
333 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
335 var line = this.line;
336 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
337 found += stream.next();
339 if (!stream.look().eof) {
340 found += stream.next();
342 if (this.keepComments) {
343 tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
352 @returns {Boolean} Was the token found?
354 read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
355 if (stream.look() == "\"") {
357 var string = stream.next();
359 while (!stream.look().eof) {
360 if (stream.look() == "\\") {
361 if (Lang.isNewline(stream.look(1))) {
364 } while (!stream.look().eof && Lang.isNewline(stream.look()));
368 string += stream.next(2);
371 else if (stream.look() == "\"") {
372 string += stream.next();
373 tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
377 string += stream.next();
381 return false; // error! unterminated string
385 @returns {Boolean} Was the token found?
387 read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
388 if (stream.look() == "'") {
390 var string = stream.next();
392 while (!stream.look().eof) {
393 if (stream.look() == "\\") { // escape sequence
394 string += stream.next(2);
396 else if (stream.look() == "'") {
397 string += stream.next();
398 tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
402 string += stream.next();
406 return false; // error! unterminated string
410 @returns {Boolean} Was the token found?
412 read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
413 if (stream.look() === "0" && stream.look(1) == "x") {
414 return this.read_hex(stream, tokens);
419 while (!stream.look().eof && Lang.isNumber(found+stream.look())){
420 found += stream.next();
427 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
428 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
433 requires("../lib/JSDOC/TextStream.js");
434 requires("../lib/JSDOC/Token.js");
435 requires("../lib/JSDOC/Lang.js");
437 plan(3, "testing read_numb");
440 var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
441 var tr = new TokenReader();
442 var tokens = tr.tokenize(new TextStream(src));
444 var hexToken, octToken, decToken;
445 for (var i = 0; i < tokens.length; i++) {
446 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
447 if (tokens[i].name == "OCTAL") octToken = tokens[i];
448 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
452 is(decToken.data, "8.0", "decimal number is found in source.");
453 is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
454 is(octToken.data, "0777", "octal number is found in source.");
458 @returns {Boolean} Was the token found?
460 read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
461 var found = stream.next(2);
463 while (!stream.look().eof) {
464 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
465 tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
469 found += stream.next();
476 @returns {Boolean} Was the token found?
478 read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
486 !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
491 && !last.is("RIGHT_PAREN")
492 && !last.is("RIGHT_BRACKET")
497 var regex = stream.next();
499 while (!stream.look().eof) {
500 if (stream.look() == "\\") { // escape sequence
501 regex += stream.next(2);
503 else if (stream.look() == "/") {
504 regex += stream.next();
506 while (/[gmi]/.test(stream.look())) {
507 regex += stream.next();
510 tokens.push(new Token(regex, "REGX", "REGX", this.line));
514 regex += stream.next();
517 // error: unterminated regex