JSDOC/TokenReader.vala
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44     }
45
46
47     public class TokenReader : Object
48     {
49         
50         
51         
52         /*
53          *
54          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
55          */
56         
57         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
58         public bool collapseWhite = false, // only reduces white space...
59         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
60         public bool keepDocs = true,
61         /** @cfg {Boolean} keepWhite keep White space **/
62         public bool keepWhite = false,
63         /** @cfg {Boolean} keepComments  keep all comments **/
64         public bool keepComments = false,
65         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
66         public bool sepIdents = false,
67         /** @cfg {String} filename name of file being parsed. **/
68         public string filename = "";
69         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
70         public bool ignoreBadGrammer = false,
71         
72         
73         int line = 0;
74         
75         /**
76          * tokenize a stream
77          * @return {Array} of tokens
78          * 
79          * ts = new TextStream(File.read(str));
80          * tr = TokenReader({ keepComments : true, keepWhite : true });
81          * tr.tokenize(ts)
82          * 
83          */
84         public TokenArray tokenize(TextStream stream)
85         {
86             this.line =1;
87             var tokens = new TokenArray();
88            
89             bool eof;
90             while (true) {
91                 
92                 stream.look(0, out eof) 
93                 if (eof) {
94                     break;
95                 }
96                 if (this.read_mlcomment(stream, tokens)) continue;
97                 if (this.read_slcomment(stream, tokens)) continue;
98                 if (this.read_dbquote(stream, tokens))   continue;
99                 if (this.read_snquote(stream, tokens))   continue;
100                 if (this.read_regx(stream, tokens))      continue;
101                 if (this.read_numb(stream, tokens))      continue;
102                 if (this.read_punc(stream, tokens))      continue;
103                 if (this.read_newline(stream, tokens))   continue;
104                 if (this.read_space(stream, tokens))     continue;
105                 if (this.read_word(stream, tokens))      continue;
106                 
107                 // if execution reaches here then an error has happened
108                 tokens.push(
109                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
110                 );
111             }
112             
113             
114             
115             return tokens;
116         },
117
118         /**
119          * findPuncToken - find the id of a token (previous to current)
120          * need to back check syntax..
121          * 
122          * @arg {Array} tokens the array of tokens.
123          * @arg {String} token data (eg. '(')
124          * @arg {Number} offset where to start reading from
125          * @return {Number} position of token
126          */
127         public int findPuncToken(TokenArray tokens, string data, int n) {
128             n = n || tokens.length -1;
129             var stack = 0;
130             while (n > -1) {
131                 
132                 if (!stack && tokens[n].data == data) {
133                     return n;
134                 }
135                 
136                 if (tokens[n].data  == ')' || tokens[n].data  == '}') {
137                     stack++;
138                     n--;
139                     continue;
140                 }
141                 if (stack && (tokens[n].data  == '{' || tokens[n].data  == '(')) {
142                     stack--;
143                     n--;
144                     continue;
145                 }
146                 
147                 
148                 n--;
149             }
150             return -1;
151         },
152         /**
153          * lastSym - find the last token symbol
154          * need to back check syntax..
155          * 
156          * @arg {Array} tokens the array of tokens.
157          * @arg {Number} offset where to start..
158          * @return {Token} the token
159          */
160         lastSym : function(tokens, n) {
161             for (var i = n-1; i >= 0; i--) {
162                 if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
163             }
164             return null;
165         },
166         
167          
168         
169         /**
170             @returns {Boolean} Was the token found?
171          */
172         read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
173             var found = "";
174             while (!stream.look().eof && Lang.isWordChar(stream.look())) {
175                 found += stream.next();
176             }
177             
178             if (found === "") {
179                 return false;
180             }
181             
182             var name;
183             if ((name = Lang.keyword(found))) {
184                 if (found == 'return' && tokens.lastSym().data == ')') {
185                     //Seed.print('@' + tokens.length);
186                     var n = this.findPuncToken(tokens, ')');
187                     //Seed.print(')@' + n);
188                     n = this.findPuncToken(tokens, '(', n-1);
189                     //Seed.print('(@' + n);
190                     
191                     var lt = this.lastSym(tokens, n);
192                     print(JSON.stringify(lt));
193                     if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
194                         if (!this.ignoreBadGrammer) {
195                             throw {
196                                 name : "ArgumentError", 
197                                 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
198                             }
199                         }
200                     }
201                     
202                     
203                     
204                 }
205                 
206                 tokens.push(new Token(found, "KEYW", name, this.line));
207                 return true;
208             }
209             if (!this.sepIdents || found.indexOf('.') < 0 ) {
210                 tokens.push(new Token(found, "NAME", "NAME", this.line));
211                 return true;
212             }
213             var n = found.split('.');
214             var p = false;
215             var _this = this;
216             n.forEach(function(nm) {
217                 if (p) {
218                     tokens.push(new Token('.', "PUNC", "DOT", _this.line));
219                 }
220                 p=true;
221                 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
222             });
223             return true;
224                 
225
226         },
227
228         /**
229             @returns {Boolean} Was the token found?
230          */
231         read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
232             var found = "";
233             var name;
234             while (!stream.look().eof && Lang.punc(found+stream.look())) {
235                 found += stream.next();
236             }
237             
238             
239             if (found === "") {
240                 return false;
241             }
242             
243             if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
244                 //print("Error - comma found before " + found);
245                 //print(JSON.stringify(tokens.lastSym(), null,4));
246                 if (this.ignoreBadGrammer) {
247                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
248                 } else {
249                     
250                     throw {
251                         name : "ArgumentError", 
252                         message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
253                     }
254                 }
255             }
256             
257             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
258             return true;
259             
260         },
261
262         /**
263             @returns {Boolean} Was the token found?
264          */
265         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
266             var found = "";
267             
268             while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
269                 found += stream.next();
270             }
271             
272             if (found === "") {
273                 return false;
274             }
275             //print("WHITE = " + JSON.stringify(found)); 
276             if (this.collapseWhite) found = " ";
277             if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
278             return true;
279         
280         },
281
282         /**
283             @returns {Boolean} Was the token found?
284          */
285         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
286             var found = "";
287             var line = this.line;
288             while (!stream.look().eof && Lang.isNewline(stream.look())) {
289                 this.line++;
290                 found += stream.next();
291             }
292             
293             if (found === "") {
294                 return false;
295             }
296             //this.line++;
297             if (this.collapseWhite) {
298                 found = "\n";
299             }
300              if (this.keepWhite) {
301                 var last = tokens ? tokens.pop() : false;
302                 if (last && last.name != "WHIT") {
303                     tokens.push(last);
304                 }
305                 
306                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
307             }
308             return true;
309         },
310
311         /**
312             @returns {Boolean} Was the token found?
313          */
314         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
315             if (stream.look() == "/" && stream.look(1) == "*") {
316                 var found = stream.next(2);
317                 var c = '';
318                 var line = this.line;
319                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
320                     c = stream.next();
321                     if (c == "\n") this.line++;
322                     found += c;
323                 }
324                 
325                 // to start doclet we allow /** or /*** but not /**/ or /****
326                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
327                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
328                 return true;
329             }
330             return false;
331         },
332
333         /**
334             @returns {Boolean} Was the token found?
335          */
336         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
337             var found;
338             if (
339                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
340                 || 
341                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
342             ) {
343                 var line = this.line;
344                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
345                     found += stream.next();
346                 }
347                 if (!stream.look().eof) {
348                     found += stream.next();
349                 }
350                 if (this.keepComments) {
351                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
352                 }
353                 this.line++;
354                 return true;
355             }
356             return false;
357         },
358
359         /**
360             @returns {Boolean} Was the token found?
361          */
362         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
363             if (stream.look() == "\"") {
364                 // find terminator
365                 var string = stream.next();
366                 
367                 while (!stream.look().eof) {
368                     if (stream.look() == "\\") {
369                         if (Lang.isNewline(stream.look(1))) {
370                             do {
371                                 stream.next();
372                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
373                             string += "\\\n";
374                         }
375                         else {
376                             string += stream.next(2);
377                         }
378                     }
379                     else if (stream.look() == "\"") {
380                         string += stream.next();
381                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
382                         return true;
383                     }
384                     else {
385                         string += stream.next();
386                     }
387                 }
388             }
389             return false; // error! unterminated string
390         },
391
392         /**
393             @returns {Boolean} Was the token found?
394          */
395         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
396             if (stream.look() == "'") {
397                 // find terminator
398                 var string = stream.next();
399                 
400                 while (!stream.look().eof) {
401                     if (stream.look() == "\\") { // escape sequence
402                         string += stream.next(2);
403                     }
404                     else if (stream.look() == "'") {
405                         string += stream.next();
406                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
407                         return true;
408                     }
409                     else {
410                         string += stream.next();
411                     }
412                 }
413             }
414             return false; // error! unterminated string
415         },
416
417         /**
418             @returns {Boolean} Was the token found?
419          */
420         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
421             if (stream.look() === "0" && stream.look(1) == "x") {
422                 return this.read_hex(stream, tokens);
423             }
424             
425             var found = "";
426             
427             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
428                 found += stream.next();
429             }
430             
431             if (found === "") {
432                 return false;
433             }
434             else {
435                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
436                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
437                 return true;
438             }
439         },
440         /*t:
441             requires("../lib/JSDOC/TextStream.js");
442             requires("../lib/JSDOC/Token.js");
443             requires("../lib/JSDOC/Lang.js");
444             
445             plan(3, "testing read_numb");
446             
447             //// setup
448             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
449             var tr = new TokenReader();
450             var tokens = tr.tokenize(new TextStream(src));
451             
452             var hexToken, octToken, decToken;
453             for (var i = 0; i < tokens.length; i++) {
454                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
455                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
456                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
457             }
458             ////
459             
460             is(decToken.data, "8.0", "decimal number is found in source.");
461             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
462             is(octToken.data, "0777", "octal number is found in source.");
463         */
464
465         /**
466             @returns {Boolean} Was the token found?
467          */
468         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
469             var found = stream.next(2);
470             
471             while (!stream.look().eof) {
472                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
473                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
474                     return true;
475                 }
476                 else {
477                     found += stream.next();
478                 }
479             }
480             return false;
481         },
482
483         /**
484             @returns {Boolean} Was the token found?
485          */
486         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
487             var last;
488             if (
489                 stream.look() == "/"
490                 && 
491                 (
492                     
493                     (
494                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
495                         || 
496                         (
497                                !last.is("NUMB")
498                             && !last.is("NAME")
499                             && !last.is("RIGHT_PAREN")
500                             && !last.is("RIGHT_BRACKET")
501                         )
502                     )
503                 )
504             ) {
505                 var regex = stream.next();
506                 
507                 while (!stream.look().eof) {
508                     if (stream.look() == "\\") { // escape sequence
509                         regex += stream.next(2);
510                     }
511                     else if (stream.look() == "/") {
512                         regex += stream.next();
513                         
514                         while (/[gmi]/.test(stream.look())) {
515                             regex += stream.next();
516                         }
517                         
518                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
519                         return true;
520                     }
521                     else {
522                         regex += stream.next();
523                     }
524                 }
525                 // error: unterminated regex
526             }
527             return false;
528         }
529 });